1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007-2012 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
52 #include <sys/types.h>
57 #define ETIME ETIMEDOUT
59 #include "libdrm_macros.h"
60 #include "libdrm_lists.h"
61 #include "intel_bufmgr.h"
62 #include "intel_bufmgr_priv.h"
63 #include "intel_chipset.h"
77 #define memclear(s) memset(&s, 0, sizeof(s))
79 #define DBG(...) do { \
80 if (bufmgr_gem->bufmgr.debug) \
81 fprintf(stderr, __VA_ARGS__); \
84 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
85 #define MAX2(A, B) ((A) > (B) ? (A) : (B))
88 * upper_32_bits - return bits 32-63 of a number
89 * @n: the number we're accessing
91 * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress
92 * the "right shift count >= width of type" warning when that quantity is
95 #define upper_32_bits(n) ((__u32)(((n) >> 16) >> 16))
98 * lower_32_bits - return bits 0-31 of a number
99 * @n: the number we're accessing
101 #define lower_32_bits(n) ((__u32)(n))
103 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
105 struct drm_intel_gem_bo_bucket {
110 typedef struct _drm_intel_bufmgr_gem {
111 drm_intel_bufmgr bufmgr;
119 pthread_mutex_t lock;
121 struct drm_i915_gem_exec_object *exec_objects;
122 struct drm_i915_gem_exec_object2 *exec2_objects;
123 drm_intel_bo **exec_bos;
127 /** Array of lists of cached gem objects of power-of-two sizes */
128 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
132 drmMMListHead managers;
134 drm_intel_bo_gem *name_table;
135 drm_intel_bo_gem *handle_table;
137 drmMMListHead vma_cache;
138 int vma_count, vma_open, vma_max;
141 int available_fences;
144 unsigned int has_bsd : 1;
145 unsigned int has_blt : 1;
146 unsigned int has_relaxed_fencing : 1;
147 unsigned int has_llc : 1;
148 unsigned int has_wait_timeout : 1;
149 unsigned int bo_reuse : 1;
150 unsigned int no_exec : 1;
151 unsigned int has_vebox : 1;
152 unsigned int has_exec_async : 1;
160 } drm_intel_bufmgr_gem;
162 #define DRM_INTEL_RELOC_FENCE (1<<0)
164 typedef struct _drm_intel_reloc_target_info {
167 } drm_intel_reloc_target;
169 struct _drm_intel_bo_gem {
177 * Kenel-assigned global name for this object
179 * List contains both flink named and prime fd'd objects
181 unsigned int global_name;
183 UT_hash_handle handle_hh;
184 UT_hash_handle name_hh;
187 * Index of the buffer within the validation list while preparing a
188 * batchbuffer execution.
193 * Current tiling mode
195 uint32_t tiling_mode;
196 uint32_t swizzle_mode;
197 unsigned long stride;
199 unsigned long kflags;
203 /** Array passed to the DRM containing relocation information. */
204 struct drm_i915_gem_relocation_entry *relocs;
206 * Array of info structs corresponding to relocs[i].target_handle etc
208 drm_intel_reloc_target *reloc_target_info;
209 /** Number of entries in relocs */
211 /** Array of BOs that are referenced by this buffer and will be softpinned */
212 drm_intel_bo **softpin_target;
213 /** Number softpinned BOs that are referenced by this buffer */
214 int softpin_target_count;
215 /** Maximum amount of softpinned BOs that are referenced by this buffer */
216 int softpin_target_size;
218 /** Mapped address for the buffer, saved across map/unmap cycles */
220 /** GTT virtual address for the buffer, saved across map/unmap cycles */
222 /** WC CPU address for the buffer, saved across map/unmap cycles */
225 * Virtual address of the buffer allocated by user, used for userptr
230 drmMMListHead vma_list;
236 * Boolean of whether this BO and its children have been included in
237 * the current drm_intel_bufmgr_check_aperture_space() total.
239 bool included_in_check_aperture;
242 * Boolean of whether this buffer has been used as a relocation
243 * target and had its size accounted for, and thus can't have any
244 * further relocations added to it.
246 bool used_as_reloc_target;
249 * Boolean of whether we have encountered an error whilst building the relocation tree.
254 * Boolean of whether this buffer can be re-used
259 * Boolean of whether the GPU is definitely not accessing the buffer.
261 * This is only valid when reusable, since non-reusable
262 * buffers are those that have been shared with other
263 * processes, so we don't know their state.
268 * Boolean of whether this buffer was allocated with userptr
273 * Boolean of whether this buffer can be placed in the full 48-bit
274 * address range on gen8+.
276 * By default, buffers will be keep in a 32-bit range, unless this
277 * flag is explicitly set.
279 bool use_48b_address_range;
282 * Whether this buffer is softpinned at offset specified by the user
287 * Size in bytes of this buffer and its relocation descendents.
289 * Used to avoid costly tree walking in
290 * drm_intel_bufmgr_check_aperture in the common case.
295 * Number of potential fence registers required by this buffer and its
298 int reloc_tree_fences;
300 /** Flags that we may need to do the SW_FINISH ioctl on unmap. */
301 bool mapped_cpu_write;
305 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
308 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
311 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
312 uint32_t * swizzle_mode);
315 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
316 uint32_t tiling_mode,
319 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
322 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
324 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
326 static inline drm_intel_bo_gem *to_bo_gem(drm_intel_bo *bo)
328 return (drm_intel_bo_gem *)bo;
332 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
333 uint32_t *tiling_mode)
335 unsigned long min_size, max_size;
338 if (*tiling_mode == I915_TILING_NONE)
341 /* 965+ just need multiples of page size for tiling */
342 if (bufmgr_gem->gen >= 4)
343 return ROUND_UP_TO(size, 4096);
345 /* Older chips need powers of two, of at least 512k or 1M */
346 if (bufmgr_gem->gen == 3) {
347 min_size = 1024*1024;
348 max_size = 128*1024*1024;
351 max_size = 64*1024*1024;
354 if (size > max_size) {
355 *tiling_mode = I915_TILING_NONE;
359 /* Do we need to allocate every page for the fence? */
360 if (bufmgr_gem->has_relaxed_fencing)
361 return ROUND_UP_TO(size, 4096);
363 for (i = min_size; i < size; i <<= 1)
370 * Round a given pitch up to the minimum required for X tiling on a
371 * given chip. We use 512 as the minimum to allow for a later tiling
375 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
376 unsigned long pitch, uint32_t *tiling_mode)
378 unsigned long tile_width;
381 /* If untiled, then just align it so that we can do rendering
382 * to it with the 3D engine.
384 if (*tiling_mode == I915_TILING_NONE)
385 return ALIGN(pitch, 64);
387 if (*tiling_mode == I915_TILING_X
388 || (IS_915(bufmgr_gem->pci_device)
389 && *tiling_mode == I915_TILING_Y))
394 /* 965 is flexible */
395 if (bufmgr_gem->gen >= 4)
396 return ROUND_UP_TO(pitch, tile_width);
398 /* The older hardware has a maximum pitch of 8192 with tiled
399 * surfaces, so fallback to untiled if it's too large.
402 *tiling_mode = I915_TILING_NONE;
403 return ALIGN(pitch, 64);
406 /* Pre-965 needs power of two tile width */
407 for (i = tile_width; i < pitch; i <<= 1)
413 static struct drm_intel_gem_bo_bucket *
414 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
419 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
420 struct drm_intel_gem_bo_bucket *bucket =
421 &bufmgr_gem->cache_bucket[i];
422 if (bucket->size >= size) {
431 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
435 for (i = 0; i < bufmgr_gem->exec_count; i++) {
436 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
437 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
439 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL) {
440 DBG("%2d: %d %s(%s)\n", i, bo_gem->gem_handle,
441 bo_gem->is_softpin ? "*" : "",
446 for (j = 0; j < bo_gem->reloc_count; j++) {
447 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
448 drm_intel_bo_gem *target_gem =
449 (drm_intel_bo_gem *) target_bo;
451 DBG("%2d: %d %s(%s)@0x%08x %08x -> "
452 "%d (%s)@0x%08x %08x + 0x%08x\n",
455 bo_gem->is_softpin ? "*" : "",
457 upper_32_bits(bo_gem->relocs[j].offset),
458 lower_32_bits(bo_gem->relocs[j].offset),
459 target_gem->gem_handle,
461 upper_32_bits(target_bo->offset64),
462 lower_32_bits(target_bo->offset64),
463 bo_gem->relocs[j].delta);
466 for (j = 0; j < bo_gem->softpin_target_count; j++) {
467 drm_intel_bo *target_bo = bo_gem->softpin_target[j];
468 drm_intel_bo_gem *target_gem =
469 (drm_intel_bo_gem *) target_bo;
470 DBG("%2d: %d %s(%s) -> "
471 "%d *(%s)@0x%08x %08x\n",
474 bo_gem->is_softpin ? "*" : "",
476 target_gem->gem_handle,
478 upper_32_bits(target_bo->offset64),
479 lower_32_bits(target_bo->offset64));
485 drm_intel_gem_bo_reference(drm_intel_bo *bo)
487 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
489 atomic_inc(&bo_gem->refcount);
493 * Adds the given buffer to the list of buffers to be validated (moved into the
494 * appropriate memory type) with the next batch submission.
496 * If a buffer is validated multiple times in a batch submission, it ends up
497 * with the intersection of the memory type flags and the union of the
501 drm_intel_add_validate_buffer(drm_intel_bo *bo)
503 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
504 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
507 if (bo_gem->validate_index != -1)
510 /* Extend the array of validation entries as necessary. */
511 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
512 int new_size = bufmgr_gem->exec_size * 2;
517 bufmgr_gem->exec_objects =
518 realloc(bufmgr_gem->exec_objects,
519 sizeof(*bufmgr_gem->exec_objects) * new_size);
520 bufmgr_gem->exec_bos =
521 realloc(bufmgr_gem->exec_bos,
522 sizeof(*bufmgr_gem->exec_bos) * new_size);
523 bufmgr_gem->exec_size = new_size;
526 index = bufmgr_gem->exec_count;
527 bo_gem->validate_index = index;
528 /* Fill in array entry */
529 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
530 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
531 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
532 bufmgr_gem->exec_objects[index].alignment = bo->align;
533 bufmgr_gem->exec_objects[index].offset = 0;
534 bufmgr_gem->exec_bos[index] = bo;
535 bufmgr_gem->exec_count++;
539 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
541 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
542 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
547 flags |= EXEC_OBJECT_NEEDS_FENCE;
548 if (bo_gem->use_48b_address_range)
549 flags |= EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
550 if (bo_gem->is_softpin)
551 flags |= EXEC_OBJECT_PINNED;
553 if (bo_gem->validate_index != -1) {
554 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |= flags;
558 /* Extend the array of validation entries as necessary. */
559 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
560 int new_size = bufmgr_gem->exec_size * 2;
565 bufmgr_gem->exec2_objects =
566 realloc(bufmgr_gem->exec2_objects,
567 sizeof(*bufmgr_gem->exec2_objects) * new_size);
568 bufmgr_gem->exec_bos =
569 realloc(bufmgr_gem->exec_bos,
570 sizeof(*bufmgr_gem->exec_bos) * new_size);
571 bufmgr_gem->exec_size = new_size;
574 index = bufmgr_gem->exec_count;
575 bo_gem->validate_index = index;
576 /* Fill in array entry */
577 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
578 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
579 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
580 bufmgr_gem->exec2_objects[index].alignment = bo->align;
581 bufmgr_gem->exec2_objects[index].offset = bo->offset64;
582 bufmgr_gem->exec2_objects[index].flags = flags | bo_gem->kflags;
583 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
584 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
585 bufmgr_gem->exec_bos[index] = bo;
586 bufmgr_gem->exec_count++;
589 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
593 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
594 drm_intel_bo_gem *bo_gem,
595 unsigned int alignment)
599 assert(!bo_gem->used_as_reloc_target);
601 /* The older chipsets are far-less flexible in terms of tiling,
602 * and require tiled buffer to be size aligned in the aperture.
603 * This means that in the worst possible case we will need a hole
604 * twice as large as the object in order for it to fit into the
605 * aperture. Optimal packing is for wimps.
607 size = bo_gem->bo.size;
608 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE) {
609 unsigned int min_size;
611 if (bufmgr_gem->has_relaxed_fencing) {
612 if (bufmgr_gem->gen == 3)
613 min_size = 1024*1024;
617 while (min_size < size)
622 /* Account for worst-case alignment. */
623 alignment = MAX2(alignment, min_size);
626 bo_gem->reloc_tree_size = size + alignment;
630 drm_intel_setup_reloc_list(drm_intel_bo *bo)
632 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
633 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
634 unsigned int max_relocs = bufmgr_gem->max_relocs;
636 if (bo->size / 4 < max_relocs)
637 max_relocs = bo->size / 4;
639 bo_gem->relocs = malloc(max_relocs *
640 sizeof(struct drm_i915_gem_relocation_entry));
641 bo_gem->reloc_target_info = malloc(max_relocs *
642 sizeof(drm_intel_reloc_target));
643 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
644 bo_gem->has_error = true;
646 free (bo_gem->relocs);
647 bo_gem->relocs = NULL;
649 free (bo_gem->reloc_target_info);
650 bo_gem->reloc_target_info = NULL;
659 drm_intel_gem_bo_busy(drm_intel_bo *bo)
661 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
662 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
663 struct drm_i915_gem_busy busy;
666 if (bo_gem->reusable && bo_gem->idle)
670 busy.handle = bo_gem->gem_handle;
672 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
674 bo_gem->idle = !busy.busy;
679 return (ret == 0 && busy.busy);
683 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
684 drm_intel_bo_gem *bo_gem, int state)
686 struct drm_i915_gem_madvise madv;
689 madv.handle = bo_gem->gem_handle;
692 drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
694 return madv.retained;
698 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
700 return drm_intel_gem_bo_madvise_internal
701 ((drm_intel_bufmgr_gem *) bo->bufmgr,
702 (drm_intel_bo_gem *) bo,
706 /* drop the oldest entries that have been purged by the kernel */
708 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
709 struct drm_intel_gem_bo_bucket *bucket)
711 while (!DRMLISTEMPTY(&bucket->head)) {
712 drm_intel_bo_gem *bo_gem;
714 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
715 bucket->head.next, head);
716 if (drm_intel_gem_bo_madvise_internal
717 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
720 DRMLISTDEL(&bo_gem->head);
721 drm_intel_gem_bo_free(&bo_gem->bo);
725 static drm_intel_bo *
726 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
730 uint32_t tiling_mode,
731 unsigned long stride,
732 unsigned int alignment)
734 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
735 drm_intel_bo_gem *bo_gem;
736 unsigned int page_size = getpagesize();
738 struct drm_intel_gem_bo_bucket *bucket;
739 bool alloc_from_cache;
740 unsigned long bo_size;
741 bool for_render = false;
743 if (flags & BO_ALLOC_FOR_RENDER)
746 /* Round the allocated size up to a power of two number of pages. */
747 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
749 /* If we don't have caching at this size, don't actually round the
752 if (bucket == NULL) {
754 if (bo_size < page_size)
757 bo_size = bucket->size;
760 pthread_mutex_lock(&bufmgr_gem->lock);
761 /* Get a buffer out of the cache if available */
763 alloc_from_cache = false;
764 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
766 /* Allocate new render-target BOs from the tail (MRU)
767 * of the list, as it will likely be hot in the GPU
768 * cache and in the aperture for us.
770 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
771 bucket->head.prev, head);
772 DRMLISTDEL(&bo_gem->head);
773 alloc_from_cache = true;
774 bo_gem->bo.align = alignment;
776 assert(alignment == 0);
777 /* For non-render-target BOs (where we're probably
778 * going to map it first thing in order to fill it
779 * with data), check if the last BO in the cache is
780 * unbusy, and only reuse in that case. Otherwise,
781 * allocating a new buffer is probably faster than
782 * waiting for the GPU to finish.
784 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
785 bucket->head.next, head);
786 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
787 alloc_from_cache = true;
788 DRMLISTDEL(&bo_gem->head);
792 if (alloc_from_cache) {
793 if (!drm_intel_gem_bo_madvise_internal
794 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
795 drm_intel_gem_bo_free(&bo_gem->bo);
796 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
801 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
804 drm_intel_gem_bo_free(&bo_gem->bo);
810 if (!alloc_from_cache) {
811 struct drm_i915_gem_create create;
813 bo_gem = calloc(1, sizeof(*bo_gem));
817 /* drm_intel_gem_bo_free calls DRMLISTDEL() for an uninitialized
818 list (vma_list), so better set the list head here */
819 DRMINITLISTHEAD(&bo_gem->vma_list);
821 bo_gem->bo.size = bo_size;
824 create.size = bo_size;
826 ret = drmIoctl(bufmgr_gem->fd,
827 DRM_IOCTL_I915_GEM_CREATE,
834 bo_gem->gem_handle = create.handle;
835 bo_gem->bo.handle = bo_gem->gem_handle;
836 bo_gem->bo.bufmgr = bufmgr;
837 bo_gem->bo.align = alignment;
839 bo_gem->tiling_mode = I915_TILING_NONE;
840 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
843 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
848 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
849 gem_handle, sizeof(bo_gem->gem_handle),
854 atomic_set(&bo_gem->refcount, 1);
855 bo_gem->validate_index = -1;
856 bo_gem->reloc_tree_fences = 0;
857 bo_gem->used_as_reloc_target = false;
858 bo_gem->has_error = false;
859 bo_gem->reusable = true;
860 bo_gem->use_48b_address_range = false;
862 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, alignment);
863 pthread_mutex_unlock(&bufmgr_gem->lock);
865 DBG("bo_create: buf %d (%s) %ldb\n",
866 bo_gem->gem_handle, bo_gem->name, size);
871 drm_intel_gem_bo_free(&bo_gem->bo);
873 pthread_mutex_unlock(&bufmgr_gem->lock);
877 static drm_intel_bo *
878 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
881 unsigned int alignment)
883 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
889 static drm_intel_bo *
890 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
893 unsigned int alignment)
895 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
896 I915_TILING_NONE, 0, 0);
899 static drm_intel_bo *
900 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
901 int x, int y, int cpp, uint32_t *tiling_mode,
902 unsigned long *pitch, unsigned long flags)
904 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
905 unsigned long size, stride;
909 unsigned long aligned_y, height_alignment;
911 tiling = *tiling_mode;
913 /* If we're tiled, our allocations are in 8 or 32-row blocks,
914 * so failure to align our height means that we won't allocate
917 * If we're untiled, we still have to align to 2 rows high
918 * because the data port accesses 2x2 blocks even if the
919 * bottom row isn't to be rendered, so failure to align means
920 * we could walk off the end of the GTT and fault. This is
921 * documented on 965, and may be the case on older chipsets
922 * too so we try to be careful.
925 height_alignment = 2;
927 if ((bufmgr_gem->gen == 2) && tiling != I915_TILING_NONE)
928 height_alignment = 16;
929 else if (tiling == I915_TILING_X
930 || (IS_915(bufmgr_gem->pci_device)
931 && tiling == I915_TILING_Y))
932 height_alignment = 8;
933 else if (tiling == I915_TILING_Y)
934 height_alignment = 32;
935 aligned_y = ALIGN(y, height_alignment);
938 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling_mode);
939 size = stride * aligned_y;
940 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
941 } while (*tiling_mode != tiling);
944 if (tiling == I915_TILING_NONE)
947 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
951 static drm_intel_bo *
952 drm_intel_gem_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
955 uint32_t tiling_mode,
960 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
961 drm_intel_bo_gem *bo_gem;
963 struct drm_i915_gem_userptr userptr;
965 /* Tiling with userptr surfaces is not supported
966 * on all hardware so refuse it for time being.
968 if (tiling_mode != I915_TILING_NONE)
971 bo_gem = calloc(1, sizeof(*bo_gem));
975 atomic_set(&bo_gem->refcount, 1);
976 DRMINITLISTHEAD(&bo_gem->vma_list);
978 bo_gem->bo.size = size;
981 userptr.user_ptr = (__u64)((unsigned long)addr);
982 userptr.user_size = size;
983 userptr.flags = flags;
985 ret = drmIoctl(bufmgr_gem->fd,
986 DRM_IOCTL_I915_GEM_USERPTR,
989 DBG("bo_create_userptr: "
990 "ioctl failed with user ptr %p size 0x%lx, "
991 "user flags 0x%lx\n", addr, size, flags);
996 pthread_mutex_lock(&bufmgr_gem->lock);
998 bo_gem->gem_handle = userptr.handle;
999 bo_gem->bo.handle = bo_gem->gem_handle;
1000 bo_gem->bo.bufmgr = bufmgr;
1001 bo_gem->is_userptr = true;
1002 bo_gem->bo.virtual = addr;
1003 /* Save the address provided by user */
1004 bo_gem->user_virtual = addr;
1005 bo_gem->tiling_mode = I915_TILING_NONE;
1006 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
1009 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
1010 gem_handle, sizeof(bo_gem->gem_handle),
1013 bo_gem->name = name;
1014 bo_gem->validate_index = -1;
1015 bo_gem->reloc_tree_fences = 0;
1016 bo_gem->used_as_reloc_target = false;
1017 bo_gem->has_error = false;
1018 bo_gem->reusable = false;
1019 bo_gem->use_48b_address_range = false;
1021 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1022 pthread_mutex_unlock(&bufmgr_gem->lock);
1024 DBG("bo_create_userptr: "
1025 "ptr %p buf %d (%s) size %ldb, stride 0x%x, tile mode %d\n",
1026 addr, bo_gem->gem_handle, bo_gem->name,
1027 size, stride, tiling_mode);
1033 has_userptr(drm_intel_bufmgr_gem *bufmgr_gem)
1038 struct drm_i915_gem_userptr userptr;
1040 pgsz = sysconf(_SC_PAGESIZE);
1043 ret = posix_memalign(&ptr, pgsz, pgsz);
1045 DBG("Failed to get a page (%ld) for userptr detection!\n",
1051 userptr.user_ptr = (__u64)(unsigned long)ptr;
1052 userptr.user_size = pgsz;
1055 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_USERPTR, &userptr);
1057 if (errno == ENODEV && userptr.flags == 0) {
1058 userptr.flags = I915_USERPTR_UNSYNCHRONIZED;
1065 /* We don't release the userptr bo here as we want to keep the
1066 * kernel mm tracking alive for our lifetime. The first time we
1067 * create a userptr object the kernel has to install a mmu_notifer
1068 * which is a heavyweight operation (e.g. it requires taking all
1069 * mm_locks and stop_machine()).
1072 bufmgr_gem->userptr_active.ptr = ptr;
1073 bufmgr_gem->userptr_active.handle = userptr.handle;
1078 static drm_intel_bo *
1079 check_bo_alloc_userptr(drm_intel_bufmgr *bufmgr,
1082 uint32_t tiling_mode,
1085 unsigned long flags)
1087 if (has_userptr((drm_intel_bufmgr_gem *)bufmgr))
1088 bufmgr->bo_alloc_userptr = drm_intel_gem_bo_alloc_userptr;
1090 bufmgr->bo_alloc_userptr = NULL;
1092 return drm_intel_bo_alloc_userptr(bufmgr, name, addr,
1093 tiling_mode, stride, size, flags);
1097 * Returns a drm_intel_bo wrapping the given buffer object handle.
1099 * This can be used when one application needs to pass a buffer object
1103 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
1105 unsigned int handle)
1107 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1108 drm_intel_bo_gem *bo_gem;
1110 struct drm_gem_open open_arg;
1111 struct drm_i915_gem_get_tiling get_tiling;
1113 /* At the moment most applications only have a few named bo.
1114 * For instance, in a DRI client only the render buffers passed
1115 * between X and the client are named. And since X returns the
1116 * alternating names for the front/back buffer a linear search
1117 * provides a sufficiently fast match.
1119 pthread_mutex_lock(&bufmgr_gem->lock);
1120 HASH_FIND(name_hh, bufmgr_gem->name_table,
1121 &handle, sizeof(handle), bo_gem);
1123 drm_intel_gem_bo_reference(&bo_gem->bo);
1128 open_arg.name = handle;
1129 ret = drmIoctl(bufmgr_gem->fd,
1133 DBG("Couldn't reference %s handle 0x%08x: %s\n",
1134 name, handle, strerror(errno));
1138 /* Now see if someone has used a prime handle to get this
1139 * object from the kernel before by looking through the list
1140 * again for a matching gem_handle
1142 HASH_FIND(handle_hh, bufmgr_gem->handle_table,
1143 &open_arg.handle, sizeof(open_arg.handle), bo_gem);
1145 drm_intel_gem_bo_reference(&bo_gem->bo);
1149 bo_gem = calloc(1, sizeof(*bo_gem));
1153 atomic_set(&bo_gem->refcount, 1);
1154 DRMINITLISTHEAD(&bo_gem->vma_list);
1156 bo_gem->bo.size = open_arg.size;
1157 bo_gem->bo.offset = 0;
1158 bo_gem->bo.offset64 = 0;
1159 bo_gem->bo.virtual = NULL;
1160 bo_gem->bo.bufmgr = bufmgr;
1161 bo_gem->name = name;
1162 bo_gem->validate_index = -1;
1163 bo_gem->gem_handle = open_arg.handle;
1164 bo_gem->bo.handle = open_arg.handle;
1165 bo_gem->global_name = handle;
1166 bo_gem->reusable = false;
1167 bo_gem->use_48b_address_range = false;
1169 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
1170 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
1171 HASH_ADD(name_hh, bufmgr_gem->name_table,
1172 global_name, sizeof(bo_gem->global_name), bo_gem);
1174 memclear(get_tiling);
1175 get_tiling.handle = bo_gem->gem_handle;
1176 ret = drmIoctl(bufmgr_gem->fd,
1177 DRM_IOCTL_I915_GEM_GET_TILING,
1182 bo_gem->tiling_mode = get_tiling.tiling_mode;
1183 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
1184 /* XXX stride is unknown */
1185 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
1186 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
1189 pthread_mutex_unlock(&bufmgr_gem->lock);
1193 drm_intel_gem_bo_free(&bo_gem->bo);
1194 pthread_mutex_unlock(&bufmgr_gem->lock);
1199 drm_intel_gem_bo_free(drm_intel_bo *bo)
1201 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1202 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1203 struct drm_gem_close close;
1206 DRMLISTDEL(&bo_gem->vma_list);
1207 if (bo_gem->mem_virtual) {
1208 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->mem_virtual, 0));
1209 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1210 bufmgr_gem->vma_count--;
1212 if (bo_gem->wc_virtual) {
1213 VG(VALGRIND_FREELIKE_BLOCK(bo_gem->wc_virtual, 0));
1214 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1215 bufmgr_gem->vma_count--;
1217 if (bo_gem->gtt_virtual) {
1218 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1219 bufmgr_gem->vma_count--;
1222 if (bo_gem->global_name)
1223 HASH_DELETE(name_hh, bufmgr_gem->name_table, bo_gem);
1224 HASH_DELETE(handle_hh, bufmgr_gem->handle_table, bo_gem);
1226 /* Close this object */
1228 close.handle = bo_gem->gem_handle;
1229 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
1231 DBG("DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
1232 bo_gem->gem_handle, bo_gem->name, strerror(errno));
1238 drm_intel_gem_bo_mark_mmaps_incoherent(drm_intel_bo *bo)
1241 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1243 if (bo_gem->mem_virtual)
1244 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->mem_virtual, bo->size);
1246 if (bo_gem->wc_virtual)
1247 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->wc_virtual, bo->size);
1249 if (bo_gem->gtt_virtual)
1250 VALGRIND_MAKE_MEM_NOACCESS(bo_gem->gtt_virtual, bo->size);
1254 /** Frees all cached buffers significantly older than @time. */
1256 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
1260 if (bufmgr_gem->time == time)
1263 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1264 struct drm_intel_gem_bo_bucket *bucket =
1265 &bufmgr_gem->cache_bucket[i];
1267 while (!DRMLISTEMPTY(&bucket->head)) {
1268 drm_intel_bo_gem *bo_gem;
1270 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1271 bucket->head.next, head);
1272 if (time - bo_gem->free_time <= 1)
1275 DRMLISTDEL(&bo_gem->head);
1277 drm_intel_gem_bo_free(&bo_gem->bo);
1281 bufmgr_gem->time = time;
1284 static void drm_intel_gem_bo_purge_vma_cache(drm_intel_bufmgr_gem *bufmgr_gem)
1288 DBG("%s: cached=%d, open=%d, limit=%d\n", __FUNCTION__,
1289 bufmgr_gem->vma_count, bufmgr_gem->vma_open, bufmgr_gem->vma_max);
1291 if (bufmgr_gem->vma_max < 0)
1294 /* We may need to evict a few entries in order to create new mmaps */
1295 limit = bufmgr_gem->vma_max - 2*bufmgr_gem->vma_open;
1299 while (bufmgr_gem->vma_count > limit) {
1300 drm_intel_bo_gem *bo_gem;
1302 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1303 bufmgr_gem->vma_cache.next,
1305 assert(bo_gem->map_count == 0);
1306 DRMLISTDELINIT(&bo_gem->vma_list);
1308 if (bo_gem->mem_virtual) {
1309 drm_munmap(bo_gem->mem_virtual, bo_gem->bo.size);
1310 bo_gem->mem_virtual = NULL;
1311 bufmgr_gem->vma_count--;
1313 if (bo_gem->wc_virtual) {
1314 drm_munmap(bo_gem->wc_virtual, bo_gem->bo.size);
1315 bo_gem->wc_virtual = NULL;
1316 bufmgr_gem->vma_count--;
1318 if (bo_gem->gtt_virtual) {
1319 drm_munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
1320 bo_gem->gtt_virtual = NULL;
1321 bufmgr_gem->vma_count--;
1326 static void drm_intel_gem_bo_close_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1327 drm_intel_bo_gem *bo_gem)
1329 bufmgr_gem->vma_open--;
1330 DRMLISTADDTAIL(&bo_gem->vma_list, &bufmgr_gem->vma_cache);
1331 if (bo_gem->mem_virtual)
1332 bufmgr_gem->vma_count++;
1333 if (bo_gem->wc_virtual)
1334 bufmgr_gem->vma_count++;
1335 if (bo_gem->gtt_virtual)
1336 bufmgr_gem->vma_count++;
1337 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1340 static void drm_intel_gem_bo_open_vma(drm_intel_bufmgr_gem *bufmgr_gem,
1341 drm_intel_bo_gem *bo_gem)
1343 bufmgr_gem->vma_open++;
1344 DRMLISTDEL(&bo_gem->vma_list);
1345 if (bo_gem->mem_virtual)
1346 bufmgr_gem->vma_count--;
1347 if (bo_gem->wc_virtual)
1348 bufmgr_gem->vma_count--;
1349 if (bo_gem->gtt_virtual)
1350 bufmgr_gem->vma_count--;
1351 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
1355 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
1357 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1358 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1359 struct drm_intel_gem_bo_bucket *bucket;
1362 /* Unreference all the target buffers */
1363 for (i = 0; i < bo_gem->reloc_count; i++) {
1364 if (bo_gem->reloc_target_info[i].bo != bo) {
1365 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
1366 reloc_target_info[i].bo,
1370 for (i = 0; i < bo_gem->softpin_target_count; i++)
1371 drm_intel_gem_bo_unreference_locked_timed(bo_gem->softpin_target[i],
1374 bo_gem->reloc_count = 0;
1375 bo_gem->used_as_reloc_target = false;
1376 bo_gem->softpin_target_count = 0;
1378 DBG("bo_unreference final: %d (%s)\n",
1379 bo_gem->gem_handle, bo_gem->name);
1381 /* release memory associated with this object */
1382 if (bo_gem->reloc_target_info) {
1383 free(bo_gem->reloc_target_info);
1384 bo_gem->reloc_target_info = NULL;
1386 if (bo_gem->relocs) {
1387 free(bo_gem->relocs);
1388 bo_gem->relocs = NULL;
1390 if (bo_gem->softpin_target) {
1391 free(bo_gem->softpin_target);
1392 bo_gem->softpin_target = NULL;
1393 bo_gem->softpin_target_size = 0;
1396 /* Clear any left-over mappings */
1397 if (bo_gem->map_count) {
1398 DBG("bo freed with non-zero map-count %d\n", bo_gem->map_count);
1399 bo_gem->map_count = 0;
1400 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1401 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1404 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
1405 /* Put the buffer into our internal cache for reuse if we can. */
1406 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
1407 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
1408 I915_MADV_DONTNEED)) {
1409 bo_gem->free_time = time;
1411 bo_gem->name = NULL;
1412 bo_gem->validate_index = -1;
1416 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
1418 drm_intel_gem_bo_free(bo);
1422 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
1425 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1427 assert(atomic_read(&bo_gem->refcount) > 0);
1428 if (atomic_dec_and_test(&bo_gem->refcount))
1429 drm_intel_gem_bo_unreference_final(bo, time);
1432 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
1434 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1436 assert(atomic_read(&bo_gem->refcount) > 0);
1438 if (atomic_add_unless(&bo_gem->refcount, -1, 1)) {
1439 drm_intel_bufmgr_gem *bufmgr_gem =
1440 (drm_intel_bufmgr_gem *) bo->bufmgr;
1441 struct timespec time;
1443 clock_gettime(CLOCK_MONOTONIC, &time);
1445 pthread_mutex_lock(&bufmgr_gem->lock);
1447 if (atomic_dec_and_test(&bo_gem->refcount)) {
1448 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
1449 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
1452 pthread_mutex_unlock(&bufmgr_gem->lock);
1456 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
1458 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1459 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1460 struct drm_i915_gem_set_domain set_domain;
1463 if (bo_gem->is_userptr) {
1464 /* Return the same user ptr */
1465 bo->virtual = bo_gem->user_virtual;
1469 pthread_mutex_lock(&bufmgr_gem->lock);
1471 if (bo_gem->map_count++ == 0)
1472 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1474 if (!bo_gem->mem_virtual) {
1475 struct drm_i915_gem_mmap mmap_arg;
1477 DBG("bo_map: %d (%s), map_count=%d\n",
1478 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1481 mmap_arg.handle = bo_gem->gem_handle;
1482 mmap_arg.size = bo->size;
1483 ret = drmIoctl(bufmgr_gem->fd,
1484 DRM_IOCTL_I915_GEM_MMAP,
1488 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1489 __FILE__, __LINE__, bo_gem->gem_handle,
1490 bo_gem->name, strerror(errno));
1491 if (--bo_gem->map_count == 0)
1492 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1493 pthread_mutex_unlock(&bufmgr_gem->lock);
1496 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
1497 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
1499 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1500 bo_gem->mem_virtual);
1501 bo->virtual = bo_gem->mem_virtual;
1503 memclear(set_domain);
1504 set_domain.handle = bo_gem->gem_handle;
1505 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
1507 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
1509 set_domain.write_domain = 0;
1510 ret = drmIoctl(bufmgr_gem->fd,
1511 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1514 DBG("%s:%d: Error setting to CPU domain %d: %s\n",
1515 __FILE__, __LINE__, bo_gem->gem_handle,
1520 bo_gem->mapped_cpu_write = true;
1522 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1523 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->mem_virtual, bo->size));
1524 pthread_mutex_unlock(&bufmgr_gem->lock);
1530 map_gtt(drm_intel_bo *bo)
1532 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1533 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1536 if (bo_gem->is_userptr)
1539 if (bo_gem->map_count++ == 0)
1540 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
1542 /* Get a mapping of the buffer if we haven't before. */
1543 if (bo_gem->gtt_virtual == NULL) {
1544 struct drm_i915_gem_mmap_gtt mmap_arg;
1546 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
1547 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
1550 mmap_arg.handle = bo_gem->gem_handle;
1552 /* Get the fake offset back... */
1553 ret = drmIoctl(bufmgr_gem->fd,
1554 DRM_IOCTL_I915_GEM_MMAP_GTT,
1558 DBG("%s:%d: Error preparing buffer map %d (%s): %s .\n",
1560 bo_gem->gem_handle, bo_gem->name,
1562 if (--bo_gem->map_count == 0)
1563 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1568 bo_gem->gtt_virtual = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
1569 MAP_SHARED, bufmgr_gem->fd,
1571 if (bo_gem->gtt_virtual == MAP_FAILED) {
1572 bo_gem->gtt_virtual = NULL;
1574 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
1576 bo_gem->gem_handle, bo_gem->name,
1578 if (--bo_gem->map_count == 0)
1579 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1584 bo->virtual = bo_gem->gtt_virtual;
1586 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1587 bo_gem->gtt_virtual);
1593 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1595 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1596 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1597 struct drm_i915_gem_set_domain set_domain;
1600 pthread_mutex_lock(&bufmgr_gem->lock);
1604 pthread_mutex_unlock(&bufmgr_gem->lock);
1608 /* Now move it to the GTT domain so that the GPU and CPU
1609 * caches are flushed and the GPU isn't actively using the
1612 * The pagefault handler does this domain change for us when
1613 * it has unbound the BO from the GTT, but it's up to us to
1614 * tell it when we're about to use things if we had done
1615 * rendering and it still happens to be bound to the GTT.
1617 memclear(set_domain);
1618 set_domain.handle = bo_gem->gem_handle;
1619 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1620 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1621 ret = drmIoctl(bufmgr_gem->fd,
1622 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1625 DBG("%s:%d: Error setting domain %d: %s\n",
1626 __FILE__, __LINE__, bo_gem->gem_handle,
1630 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1631 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1632 pthread_mutex_unlock(&bufmgr_gem->lock);
1638 * Performs a mapping of the buffer object like the normal GTT
1639 * mapping, but avoids waiting for the GPU to be done reading from or
1640 * rendering to the buffer.
1642 * This is used in the implementation of GL_ARB_map_buffer_range: The
1643 * user asks to create a buffer, then does a mapping, fills some
1644 * space, runs a drawing command, then asks to map it again without
1645 * synchronizing because it guarantees that it won't write over the
1646 * data that the GPU is busy using (or, more specifically, that if it
1647 * does write over the data, it acknowledges that rendering is
1652 drm_intel_gem_bo_map_unsynchronized(drm_intel_bo *bo)
1654 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1655 #ifdef HAVE_VALGRIND
1656 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1660 /* If the CPU cache isn't coherent with the GTT, then use a
1661 * regular synchronized mapping. The problem is that we don't
1662 * track where the buffer was last used on the CPU side in
1663 * terms of drm_intel_bo_map vs drm_intel_gem_bo_map_gtt, so
1664 * we would potentially corrupt the buffer even when the user
1665 * does reasonable things.
1667 if (!bufmgr_gem->has_llc)
1668 return drm_intel_gem_bo_map_gtt(bo);
1670 pthread_mutex_lock(&bufmgr_gem->lock);
1674 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1675 VG(VALGRIND_MAKE_MEM_DEFINED(bo_gem->gtt_virtual, bo->size));
1678 pthread_mutex_unlock(&bufmgr_gem->lock);
1683 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1685 drm_intel_bufmgr_gem *bufmgr_gem;
1686 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1692 if (bo_gem->is_userptr)
1695 bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1697 pthread_mutex_lock(&bufmgr_gem->lock);
1699 if (bo_gem->map_count <= 0) {
1700 DBG("attempted to unmap an unmapped bo\n");
1701 pthread_mutex_unlock(&bufmgr_gem->lock);
1702 /* Preserve the old behaviour of just treating this as a
1703 * no-op rather than reporting the error.
1708 if (bo_gem->mapped_cpu_write) {
1709 struct drm_i915_gem_sw_finish sw_finish;
1711 /* Cause a flush to happen if the buffer's pinned for
1712 * scanout, so the results show up in a timely manner.
1713 * Unlike GTT set domains, this only does work if the
1714 * buffer should be scanout-related.
1716 memclear(sw_finish);
1717 sw_finish.handle = bo_gem->gem_handle;
1718 ret = drmIoctl(bufmgr_gem->fd,
1719 DRM_IOCTL_I915_GEM_SW_FINISH,
1721 ret = ret == -1 ? -errno : 0;
1723 bo_gem->mapped_cpu_write = false;
1726 /* We need to unmap after every innovation as we cannot track
1727 * an open vma for every bo as that will exhaust the system
1728 * limits and cause later failures.
1730 if (--bo_gem->map_count == 0) {
1731 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
1732 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
1735 pthread_mutex_unlock(&bufmgr_gem->lock);
1741 drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1743 return drm_intel_gem_bo_unmap(bo);
1747 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1748 unsigned long size, const void *data)
1750 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1751 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1752 struct drm_i915_gem_pwrite pwrite;
1755 if (bo_gem->is_userptr)
1759 pwrite.handle = bo_gem->gem_handle;
1760 pwrite.offset = offset;
1762 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1763 ret = drmIoctl(bufmgr_gem->fd,
1764 DRM_IOCTL_I915_GEM_PWRITE,
1768 DBG("%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1769 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1770 (int)size, strerror(errno));
1777 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1779 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1780 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1783 memclear(get_pipe_from_crtc_id);
1784 get_pipe_from_crtc_id.crtc_id = crtc_id;
1785 ret = drmIoctl(bufmgr_gem->fd,
1786 DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1787 &get_pipe_from_crtc_id);
1789 /* We return -1 here to signal that we don't
1790 * know which pipe is associated with this crtc.
1791 * This lets the caller know that this information
1792 * isn't available; using the wrong pipe for
1793 * vblank waiting can cause the chipset to lock up
1798 return get_pipe_from_crtc_id.pipe;
1802 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1803 unsigned long size, void *data)
1805 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1806 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1807 struct drm_i915_gem_pread pread;
1810 if (bo_gem->is_userptr)
1814 pread.handle = bo_gem->gem_handle;
1815 pread.offset = offset;
1817 pread.data_ptr = (uint64_t) (uintptr_t) data;
1818 ret = drmIoctl(bufmgr_gem->fd,
1819 DRM_IOCTL_I915_GEM_PREAD,
1823 DBG("%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1824 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1825 (int)size, strerror(errno));
1831 /** Waits for all GPU rendering with the object to have completed. */
1833 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1835 drm_intel_gem_bo_start_gtt_access(bo, 1);
1839 * Waits on a BO for the given amount of time.
1841 * @bo: buffer object to wait for
1842 * @timeout_ns: amount of time to wait in nanoseconds.
1843 * If value is less than 0, an infinite wait will occur.
1845 * Returns 0 if the wait was successful ie. the last batch referencing the
1846 * object has completed within the allotted time. Otherwise some negative return
1847 * value describes the error. Of particular interest is -ETIME when the wait has
1848 * failed to yield the desired result.
1850 * Similar to drm_intel_gem_bo_wait_rendering except a timeout parameter allows
1851 * the operation to give up after a certain amount of time. Another subtle
1852 * difference is the internal locking semantics are different (this variant does
1853 * not hold the lock for the duration of the wait). This makes the wait subject
1854 * to a larger userspace race window.
1856 * The implementation shall wait until the object is no longer actively
1857 * referenced within a batch buffer at the time of the call. The wait will
1858 * not guarantee that the buffer is re-issued via another thread, or an flinked
1859 * handle. Userspace must make sure this race does not occur if such precision
1862 * Note that some kernels have broken the inifite wait for negative values
1863 * promise, upgrade to latest stable kernels if this is the case.
1866 drm_intel_gem_bo_wait(drm_intel_bo *bo, int64_t timeout_ns)
1868 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1869 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1870 struct drm_i915_gem_wait wait;
1873 if (!bufmgr_gem->has_wait_timeout) {
1874 DBG("%s:%d: Timed wait is not supported. Falling back to "
1875 "infinite wait\n", __FILE__, __LINE__);
1877 drm_intel_gem_bo_wait_rendering(bo);
1880 return drm_intel_gem_bo_busy(bo) ? -ETIME : 0;
1885 wait.bo_handle = bo_gem->gem_handle;
1886 wait.timeout_ns = timeout_ns;
1887 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_WAIT, &wait);
1895 * Sets the object to the GTT read and possibly write domain, used by the X
1896 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1898 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1899 * can do tiled pixmaps this way.
1902 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1904 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1905 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1906 struct drm_i915_gem_set_domain set_domain;
1909 memclear(set_domain);
1910 set_domain.handle = bo_gem->gem_handle;
1911 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1912 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1913 ret = drmIoctl(bufmgr_gem->fd,
1914 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1917 DBG("%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1918 __FILE__, __LINE__, bo_gem->gem_handle,
1919 set_domain.read_domains, set_domain.write_domain,
1925 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1927 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1928 struct drm_gem_close close_bo;
1931 free(bufmgr_gem->exec2_objects);
1932 free(bufmgr_gem->exec_objects);
1933 free(bufmgr_gem->exec_bos);
1935 pthread_mutex_destroy(&bufmgr_gem->lock);
1937 /* Free any cached buffer objects we were going to reuse */
1938 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1939 struct drm_intel_gem_bo_bucket *bucket =
1940 &bufmgr_gem->cache_bucket[i];
1941 drm_intel_bo_gem *bo_gem;
1943 while (!DRMLISTEMPTY(&bucket->head)) {
1944 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1945 bucket->head.next, head);
1946 DRMLISTDEL(&bo_gem->head);
1948 drm_intel_gem_bo_free(&bo_gem->bo);
1952 /* Release userptr bo kept hanging around for optimisation. */
1953 if (bufmgr_gem->userptr_active.ptr) {
1955 close_bo.handle = bufmgr_gem->userptr_active.handle;
1956 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close_bo);
1957 free(bufmgr_gem->userptr_active.ptr);
1960 "Failed to release test userptr object! (%d) "
1961 "i915 kernel driver may not be sane!\n", errno);
1968 * Adds the target buffer to the validation list and adds the relocation
1969 * to the reloc_buffer's relocation list.
1971 * The relocation entry at the given offset must already contain the
1972 * precomputed relocation value, because the kernel will optimize out
1973 * the relocation entry write when the buffer hasn't moved from the
1974 * last known offset in target_bo.
1977 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1978 drm_intel_bo *target_bo, uint32_t target_offset,
1979 uint32_t read_domains, uint32_t write_domain,
1982 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1983 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1984 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1985 bool fenced_command;
1987 if (bo_gem->has_error)
1990 if (target_bo_gem->has_error) {
1991 bo_gem->has_error = true;
1995 /* We never use HW fences for rendering on 965+ */
1996 if (bufmgr_gem->gen >= 4)
1999 fenced_command = need_fence;
2000 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
2003 /* Create a new relocation list if needed */
2004 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
2007 /* Check overflow */
2008 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
2011 assert(offset <= bo->size - 4);
2012 assert((write_domain & (write_domain - 1)) == 0);
2014 /* An object needing a fence is a tiled buffer, so it won't have
2015 * relocs to other buffers.
2018 assert(target_bo_gem->reloc_count == 0);
2019 target_bo_gem->reloc_tree_fences = 1;
2022 /* Make sure that we're not adding a reloc to something whose size has
2023 * already been accounted for.
2025 assert(!bo_gem->used_as_reloc_target);
2026 if (target_bo_gem != bo_gem) {
2027 target_bo_gem->used_as_reloc_target = true;
2028 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
2029 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
2032 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
2033 if (target_bo != bo)
2034 drm_intel_gem_bo_reference(target_bo);
2036 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
2037 DRM_INTEL_RELOC_FENCE;
2039 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
2041 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
2042 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
2043 bo_gem->relocs[bo_gem->reloc_count].target_handle =
2044 target_bo_gem->gem_handle;
2045 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
2046 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
2047 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset64;
2048 bo_gem->reloc_count++;
2054 drm_intel_gem_bo_use_48b_address_range(drm_intel_bo *bo, uint32_t enable)
2056 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2057 bo_gem->use_48b_address_range = enable;
2061 drm_intel_gem_bo_add_softpin_target(drm_intel_bo *bo, drm_intel_bo *target_bo)
2063 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2064 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2065 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2066 if (bo_gem->has_error)
2069 if (target_bo_gem->has_error) {
2070 bo_gem->has_error = true;
2074 if (!target_bo_gem->is_softpin)
2076 if (target_bo_gem == bo_gem)
2079 if (bo_gem->softpin_target_count == bo_gem->softpin_target_size) {
2080 int new_size = bo_gem->softpin_target_size * 2;
2082 new_size = bufmgr_gem->max_relocs;
2084 bo_gem->softpin_target = realloc(bo_gem->softpin_target, new_size *
2085 sizeof(drm_intel_bo *));
2086 if (!bo_gem->softpin_target)
2089 bo_gem->softpin_target_size = new_size;
2091 bo_gem->softpin_target[bo_gem->softpin_target_count] = target_bo;
2092 drm_intel_gem_bo_reference(target_bo);
2093 bo_gem->softpin_target_count++;
2099 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
2100 drm_intel_bo *target_bo, uint32_t target_offset,
2101 uint32_t read_domains, uint32_t write_domain)
2103 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2104 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
2106 if (target_bo_gem->is_softpin)
2107 return drm_intel_gem_bo_add_softpin_target(bo, target_bo);
2109 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2110 read_domains, write_domain,
2111 !bufmgr_gem->fenced_relocs);
2115 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
2116 drm_intel_bo *target_bo,
2117 uint32_t target_offset,
2118 uint32_t read_domains, uint32_t write_domain)
2120 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
2121 read_domains, write_domain, true);
2125 drm_intel_gem_bo_get_reloc_count(drm_intel_bo *bo)
2127 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2129 return bo_gem->reloc_count;
2133 * Removes existing relocation entries in the BO after "start".
2135 * This allows a user to avoid a two-step process for state setup with
2136 * counting up all the buffer objects and doing a
2137 * drm_intel_bufmgr_check_aperture_space() before emitting any of the
2138 * relocations for the state setup. Instead, save the state of the
2139 * batchbuffer including drm_intel_gem_get_reloc_count(), emit all the
2140 * state, and then check if it still fits in the aperture.
2142 * Any further drm_intel_bufmgr_check_aperture_space() queries
2143 * involving this buffer in the tree are undefined after this call.
2145 * This also removes all softpinned targets being referenced by the BO.
2148 drm_intel_gem_bo_clear_relocs(drm_intel_bo *bo, int start)
2150 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2151 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2153 struct timespec time;
2155 clock_gettime(CLOCK_MONOTONIC, &time);
2157 assert(bo_gem->reloc_count >= start);
2159 /* Unreference the cleared target buffers */
2160 pthread_mutex_lock(&bufmgr_gem->lock);
2162 for (i = start; i < bo_gem->reloc_count; i++) {
2163 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->reloc_target_info[i].bo;
2164 if (&target_bo_gem->bo != bo) {
2165 bo_gem->reloc_tree_fences -= target_bo_gem->reloc_tree_fences;
2166 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo,
2170 bo_gem->reloc_count = start;
2172 for (i = 0; i < bo_gem->softpin_target_count; i++) {
2173 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) bo_gem->softpin_target[i];
2174 drm_intel_gem_bo_unreference_locked_timed(&target_bo_gem->bo, time.tv_sec);
2176 bo_gem->softpin_target_count = 0;
2178 pthread_mutex_unlock(&bufmgr_gem->lock);
2183 * Walk the tree of relocations rooted at BO and accumulate the list of
2184 * validations to be performed and update the relocation buffers with
2185 * index values into the validation list.
2188 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
2190 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2193 if (bo_gem->relocs == NULL)
2196 for (i = 0; i < bo_gem->reloc_count; i++) {
2197 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2199 if (target_bo == bo)
2202 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2204 /* Continue walking the tree depth-first. */
2205 drm_intel_gem_bo_process_reloc(target_bo);
2207 /* Add the target to the validate list */
2208 drm_intel_add_validate_buffer(target_bo);
2213 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
2215 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2218 if (bo_gem->relocs == NULL && bo_gem->softpin_target == NULL)
2221 for (i = 0; i < bo_gem->reloc_count; i++) {
2222 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
2225 if (target_bo == bo)
2228 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2230 /* Continue walking the tree depth-first. */
2231 drm_intel_gem_bo_process_reloc2(target_bo);
2233 need_fence = (bo_gem->reloc_target_info[i].flags &
2234 DRM_INTEL_RELOC_FENCE);
2236 /* Add the target to the validate list */
2237 drm_intel_add_validate_buffer2(target_bo, need_fence);
2240 for (i = 0; i < bo_gem->softpin_target_count; i++) {
2241 drm_intel_bo *target_bo = bo_gem->softpin_target[i];
2243 if (target_bo == bo)
2246 drm_intel_gem_bo_mark_mmaps_incoherent(bo);
2247 drm_intel_gem_bo_process_reloc2(target_bo);
2248 drm_intel_add_validate_buffer2(target_bo, false);
2254 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
2258 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2259 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2260 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2262 /* Update the buffer offset */
2263 if (bufmgr_gem->exec_objects[i].offset != bo->offset64) {
2264 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2265 bo_gem->gem_handle, bo_gem->name,
2266 upper_32_bits(bo->offset64),
2267 lower_32_bits(bo->offset64),
2268 upper_32_bits(bufmgr_gem->exec_objects[i].offset),
2269 lower_32_bits(bufmgr_gem->exec_objects[i].offset));
2270 bo->offset64 = bufmgr_gem->exec_objects[i].offset;
2271 bo->offset = bufmgr_gem->exec_objects[i].offset;
2277 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
2281 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2282 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
2283 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
2285 /* Update the buffer offset */
2286 if (bufmgr_gem->exec2_objects[i].offset != bo->offset64) {
2287 /* If we're seeing softpinned object here it means that the kernel
2288 * has relocated our object... Indicating a programming error
2290 assert(!bo_gem->is_softpin);
2291 DBG("BO %d (%s) migrated: 0x%08x %08x -> 0x%08x %08x\n",
2292 bo_gem->gem_handle, bo_gem->name,
2293 upper_32_bits(bo->offset64),
2294 lower_32_bits(bo->offset64),
2295 upper_32_bits(bufmgr_gem->exec2_objects[i].offset),
2296 lower_32_bits(bufmgr_gem->exec2_objects[i].offset));
2297 bo->offset64 = bufmgr_gem->exec2_objects[i].offset;
2298 bo->offset = bufmgr_gem->exec2_objects[i].offset;
2304 drm_intel_gem_bo_aub_dump_bmp(drm_intel_bo *bo,
2305 int x1, int y1, int width, int height,
2306 enum aub_dump_bmp_format format,
2307 int pitch, int offset)
2312 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
2313 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
2315 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2316 struct drm_i915_gem_execbuffer execbuf;
2319 if (to_bo_gem(bo)->has_error)
2322 pthread_mutex_lock(&bufmgr_gem->lock);
2323 /* Update indices and set up the validate list. */
2324 drm_intel_gem_bo_process_reloc(bo);
2326 /* Add the batch buffer to the validation list. There are no
2327 * relocations pointing to it.
2329 drm_intel_add_validate_buffer(bo);
2332 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
2333 execbuf.buffer_count = bufmgr_gem->exec_count;
2334 execbuf.batch_start_offset = 0;
2335 execbuf.batch_len = used;
2336 execbuf.cliprects_ptr = (uintptr_t) cliprects;
2337 execbuf.num_cliprects = num_cliprects;
2341 ret = drmIoctl(bufmgr_gem->fd,
2342 DRM_IOCTL_I915_GEM_EXECBUFFER,
2346 if (errno == ENOSPC) {
2347 DBG("Execbuffer fails to pin. "
2348 "Estimate: %u. Actual: %u. Available: %u\n",
2349 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2352 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2355 (unsigned int)bufmgr_gem->gtt_size);
2358 drm_intel_update_buffer_offsets(bufmgr_gem);
2360 if (bufmgr_gem->bufmgr.debug)
2361 drm_intel_gem_dump_validation_list(bufmgr_gem);
2363 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2364 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2366 bo_gem->idle = false;
2368 /* Disconnect the buffer from the validate list */
2369 bo_gem->validate_index = -1;
2370 bufmgr_gem->exec_bos[i] = NULL;
2372 bufmgr_gem->exec_count = 0;
2373 pthread_mutex_unlock(&bufmgr_gem->lock);
2379 do_exec2(drm_intel_bo *bo, int used, drm_intel_context *ctx,
2380 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2381 int in_fence, int *out_fence,
2384 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
2385 struct drm_i915_gem_execbuffer2 execbuf;
2389 if (to_bo_gem(bo)->has_error)
2392 switch (flags & 0x7) {
2396 if (!bufmgr_gem->has_blt)
2400 if (!bufmgr_gem->has_bsd)
2403 case I915_EXEC_VEBOX:
2404 if (!bufmgr_gem->has_vebox)
2407 case I915_EXEC_RENDER:
2408 case I915_EXEC_DEFAULT:
2412 pthread_mutex_lock(&bufmgr_gem->lock);
2413 /* Update indices and set up the validate list. */
2414 drm_intel_gem_bo_process_reloc2(bo);
2416 /* Add the batch buffer to the validation list. There are no relocations
2419 drm_intel_add_validate_buffer2(bo, 0);
2422 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
2423 execbuf.buffer_count = bufmgr_gem->exec_count;
2424 execbuf.batch_start_offset = 0;
2425 execbuf.batch_len = used;
2426 execbuf.cliprects_ptr = (uintptr_t)cliprects;
2427 execbuf.num_cliprects = num_cliprects;
2430 execbuf.flags = flags;
2432 i915_execbuffer2_set_context_id(execbuf, 0);
2434 i915_execbuffer2_set_context_id(execbuf, ctx->ctx_id);
2436 if (in_fence != -1) {
2437 execbuf.rsvd2 = in_fence;
2438 execbuf.flags |= I915_EXEC_FENCE_IN;
2440 if (out_fence != NULL) {
2442 execbuf.flags |= I915_EXEC_FENCE_OUT;
2445 if (bufmgr_gem->no_exec)
2446 goto skip_execution;
2448 ret = drmIoctl(bufmgr_gem->fd,
2449 DRM_IOCTL_I915_GEM_EXECBUFFER2_WR,
2453 if (ret == -ENOSPC) {
2454 DBG("Execbuffer fails to pin. "
2455 "Estimate: %u. Actual: %u. Available: %u\n",
2456 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
2457 bufmgr_gem->exec_count),
2458 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
2459 bufmgr_gem->exec_count),
2460 (unsigned int) bufmgr_gem->gtt_size);
2463 drm_intel_update_buffer_offsets2(bufmgr_gem);
2465 if (ret == 0 && out_fence != NULL)
2466 *out_fence = execbuf.rsvd2 >> 32;
2469 if (bufmgr_gem->bufmgr.debug)
2470 drm_intel_gem_dump_validation_list(bufmgr_gem);
2472 for (i = 0; i < bufmgr_gem->exec_count; i++) {
2473 drm_intel_bo_gem *bo_gem = to_bo_gem(bufmgr_gem->exec_bos[i]);
2475 bo_gem->idle = false;
2477 /* Disconnect the buffer from the validate list */
2478 bo_gem->validate_index = -1;
2479 bufmgr_gem->exec_bos[i] = NULL;
2481 bufmgr_gem->exec_count = 0;
2482 pthread_mutex_unlock(&bufmgr_gem->lock);
2488 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
2489 drm_clip_rect_t *cliprects, int num_cliprects,
2492 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2493 -1, NULL, I915_EXEC_RENDER);
2497 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
2498 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
2501 return do_exec2(bo, used, NULL, cliprects, num_cliprects, DR4,
2506 drm_intel_gem_bo_context_exec(drm_intel_bo *bo, drm_intel_context *ctx,
2507 int used, unsigned int flags)
2509 return do_exec2(bo, used, ctx, NULL, 0, 0, -1, NULL, flags);
2513 drm_intel_gem_bo_fence_exec(drm_intel_bo *bo,
2514 drm_intel_context *ctx,
2520 return do_exec2(bo, used, ctx, NULL, 0, 0, in_fence, out_fence, flags);
2524 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
2526 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2527 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2528 struct drm_i915_gem_pin pin;
2532 pin.handle = bo_gem->gem_handle;
2533 pin.alignment = alignment;
2535 ret = drmIoctl(bufmgr_gem->fd,
2536 DRM_IOCTL_I915_GEM_PIN,
2541 bo->offset64 = pin.offset;
2542 bo->offset = pin.offset;
2547 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
2549 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2550 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2551 struct drm_i915_gem_unpin unpin;
2555 unpin.handle = bo_gem->gem_handle;
2557 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
2565 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
2566 uint32_t tiling_mode,
2569 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2570 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2571 struct drm_i915_gem_set_tiling set_tiling;
2574 if (bo_gem->global_name == 0 &&
2575 tiling_mode == bo_gem->tiling_mode &&
2576 stride == bo_gem->stride)
2579 memset(&set_tiling, 0, sizeof(set_tiling));
2581 /* set_tiling is slightly broken and overwrites the
2582 * input on the error path, so we have to open code
2585 set_tiling.handle = bo_gem->gem_handle;
2586 set_tiling.tiling_mode = tiling_mode;
2587 set_tiling.stride = stride;
2589 ret = ioctl(bufmgr_gem->fd,
2590 DRM_IOCTL_I915_GEM_SET_TILING,
2592 } while (ret == -1 && (errno == EINTR || errno == EAGAIN));
2596 bo_gem->tiling_mode = set_tiling.tiling_mode;
2597 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
2598 bo_gem->stride = set_tiling.stride;
2603 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2606 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2607 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2610 /* Tiling with userptr surfaces is not supported
2611 * on all hardware so refuse it for time being.
2613 if (bo_gem->is_userptr)
2616 /* Linear buffers have no stride. By ensuring that we only ever use
2617 * stride 0 with linear buffers, we simplify our code.
2619 if (*tiling_mode == I915_TILING_NONE)
2622 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
2624 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2626 *tiling_mode = bo_gem->tiling_mode;
2631 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
2632 uint32_t * swizzle_mode)
2634 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2636 *tiling_mode = bo_gem->tiling_mode;
2637 *swizzle_mode = bo_gem->swizzle_mode;
2642 drm_intel_gem_bo_set_softpin_offset(drm_intel_bo *bo, uint64_t offset)
2644 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2646 bo_gem->is_softpin = true;
2647 bo->offset64 = offset;
2648 bo->offset = offset;
2653 drm_intel_bo_gem_create_from_prime(drm_intel_bufmgr *bufmgr, int prime_fd, int size)
2655 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2658 drm_intel_bo_gem *bo_gem;
2659 struct drm_i915_gem_get_tiling get_tiling;
2661 pthread_mutex_lock(&bufmgr_gem->lock);
2662 ret = drmPrimeFDToHandle(bufmgr_gem->fd, prime_fd, &handle);
2664 DBG("create_from_prime: failed to obtain handle from fd: %s\n", strerror(errno));
2665 pthread_mutex_unlock(&bufmgr_gem->lock);
2670 * See if the kernel has already returned this buffer to us. Just as
2671 * for named buffers, we must not create two bo's pointing at the same
2674 HASH_FIND(handle_hh, bufmgr_gem->handle_table,
2675 &handle, sizeof(handle), bo_gem);
2677 drm_intel_gem_bo_reference(&bo_gem->bo);
2681 bo_gem = calloc(1, sizeof(*bo_gem));
2685 atomic_set(&bo_gem->refcount, 1);
2686 DRMINITLISTHEAD(&bo_gem->vma_list);
2688 /* Determine size of bo. The fd-to-handle ioctl really should
2689 * return the size, but it doesn't. If we have kernel 3.12 or
2690 * later, we can lseek on the prime fd to get the size. Older
2691 * kernels will just fail, in which case we fall back to the
2692 * provided (estimated or guess size). */
2693 ret = lseek(prime_fd, 0, SEEK_END);
2695 bo_gem->bo.size = ret;
2697 bo_gem->bo.size = size;
2699 bo_gem->bo.handle = handle;
2700 bo_gem->bo.bufmgr = bufmgr;
2702 bo_gem->gem_handle = handle;
2703 HASH_ADD(handle_hh, bufmgr_gem->handle_table,
2704 gem_handle, sizeof(bo_gem->gem_handle), bo_gem);
2706 bo_gem->name = "prime";
2707 bo_gem->validate_index = -1;
2708 bo_gem->reloc_tree_fences = 0;
2709 bo_gem->used_as_reloc_target = false;
2710 bo_gem->has_error = false;
2711 bo_gem->reusable = false;
2712 bo_gem->use_48b_address_range = false;
2714 memclear(get_tiling);
2715 get_tiling.handle = bo_gem->gem_handle;
2716 if (drmIoctl(bufmgr_gem->fd,
2717 DRM_IOCTL_I915_GEM_GET_TILING,
2721 bo_gem->tiling_mode = get_tiling.tiling_mode;
2722 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
2723 /* XXX stride is unknown */
2724 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem, 0);
2727 pthread_mutex_unlock(&bufmgr_gem->lock);
2731 drm_intel_gem_bo_free(&bo_gem->bo);
2732 pthread_mutex_unlock(&bufmgr_gem->lock);
2737 drm_intel_bo_gem_export_to_prime(drm_intel_bo *bo, int *prime_fd)
2739 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2740 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2742 if (drmPrimeHandleToFD(bufmgr_gem->fd, bo_gem->gem_handle,
2743 DRM_CLOEXEC, prime_fd) != 0)
2746 bo_gem->reusable = false;
2752 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
2754 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
2755 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2757 if (!bo_gem->global_name) {
2758 struct drm_gem_flink flink;
2761 flink.handle = bo_gem->gem_handle;
2762 if (drmIoctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink))
2765 pthread_mutex_lock(&bufmgr_gem->lock);
2766 if (!bo_gem->global_name) {
2767 bo_gem->global_name = flink.name;
2768 bo_gem->reusable = false;
2770 HASH_ADD(name_hh, bufmgr_gem->name_table,
2771 global_name, sizeof(bo_gem->global_name),
2774 pthread_mutex_unlock(&bufmgr_gem->lock);
2777 *name = bo_gem->global_name;
2782 * Enables unlimited caching of buffer objects for reuse.
2784 * This is potentially very memory expensive, as the cache at each bucket
2785 * size is only bounded by how many buffers of that size we've managed to have
2786 * in flight at once.
2789 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
2791 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2793 bufmgr_gem->bo_reuse = true;
2797 * Disables implicit synchronisation before executing the bo
2799 * This will cause rendering corruption unless you correctly manage explicit
2800 * fences for all rendering involving this buffer - including use by others.
2801 * Disabling the implicit serialisation is only required if that serialisation
2802 * is too coarse (for example, you have split the buffer into many
2803 * non-overlapping regions and are sharing the whole buffer between concurrent
2804 * independent command streams).
2806 * Note the kernel must advertise support via I915_PARAM_HAS_EXEC_ASYNC,
2807 * which can be checked using drm_intel_bufmgr_can_disable_implicit_sync,
2808 * or subsequent execbufs involving the bo will generate EINVAL.
2811 drm_intel_gem_bo_disable_implicit_sync(drm_intel_bo *bo)
2813 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2815 bo_gem->kflags |= EXEC_OBJECT_ASYNC;
2819 * Enables implicit synchronisation before executing the bo
2821 * This is the default behaviour of the kernel, to wait upon prior writes
2822 * completing on the object before rendering with it, or to wait for prior
2823 * reads to complete before writing into the object.
2824 * drm_intel_gem_bo_disable_implicit_sync() can stop this behaviour, telling
2825 * the kernel never to insert a stall before using the object. Then this
2826 * function can be used to restore the implicit sync before subsequent
2830 drm_intel_gem_bo_enable_implicit_sync(drm_intel_bo *bo)
2832 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2834 bo_gem->kflags &= ~EXEC_OBJECT_ASYNC;
2838 * Query whether the kernel supports disabling of its implicit synchronisation
2839 * before execbuf. See drm_intel_gem_bo_disable_implicit_sync()
2842 drm_intel_bufmgr_gem_can_disable_implicit_sync(drm_intel_bufmgr *bufmgr)
2844 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
2846 return bufmgr_gem->has_exec_async;
2850 * Enable use of fenced reloc type.
2852 * New code should enable this to avoid unnecessary fence register
2853 * allocation. If this option is not enabled, all relocs will have fence
2854 * register allocated.
2857 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
2859 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
2861 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
2862 bufmgr_gem->fenced_relocs = true;
2866 * Return the additional aperture space required by the tree of buffer objects
2870 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
2872 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2876 if (bo == NULL || bo_gem->included_in_check_aperture)
2880 bo_gem->included_in_check_aperture = true;
2882 for (i = 0; i < bo_gem->reloc_count; i++)
2884 drm_intel_gem_bo_get_aperture_space(bo_gem->
2885 reloc_target_info[i].bo);
2891 * Count the number of buffers in this list that need a fence reg
2893 * If the count is greater than the number of available regs, we'll have
2894 * to ask the caller to resubmit a batch with fewer tiled buffers.
2896 * This function over-counts if the same buffer is used multiple times.
2899 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
2902 unsigned int total = 0;
2904 for (i = 0; i < count; i++) {
2905 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2910 total += bo_gem->reloc_tree_fences;
2916 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
2917 * for the next drm_intel_bufmgr_check_aperture_space() call.
2920 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
2922 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2925 if (bo == NULL || !bo_gem->included_in_check_aperture)
2928 bo_gem->included_in_check_aperture = false;
2930 for (i = 0; i < bo_gem->reloc_count; i++)
2931 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
2932 reloc_target_info[i].bo);
2936 * Return a conservative estimate for the amount of aperture required
2937 * for a collection of buffers. This may double-count some buffers.
2940 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
2943 unsigned int total = 0;
2945 for (i = 0; i < count; i++) {
2946 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
2948 total += bo_gem->reloc_tree_size;
2954 * Return the amount of aperture needed for a collection of buffers.
2955 * This avoids double counting any buffers, at the cost of looking
2956 * at every buffer in the set.
2959 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
2962 unsigned int total = 0;
2964 for (i = 0; i < count; i++) {
2965 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
2966 /* For the first buffer object in the array, we get an
2967 * accurate count back for its reloc_tree size (since nothing
2968 * had been flagged as being counted yet). We can save that
2969 * value out as a more conservative reloc_tree_size that
2970 * avoids double-counting target buffers. Since the first
2971 * buffer happens to usually be the batch buffer in our
2972 * callers, this can pull us back from doing the tree
2973 * walk on every new batch emit.
2976 drm_intel_bo_gem *bo_gem =
2977 (drm_intel_bo_gem *) bo_array[i];
2978 bo_gem->reloc_tree_size = total;
2982 for (i = 0; i < count; i++)
2983 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
2988 * Return -1 if the batchbuffer should be flushed before attempting to
2989 * emit rendering referencing the buffers pointed to by bo_array.
2991 * This is required because if we try to emit a batchbuffer with relocations
2992 * to a tree of buffers that won't simultaneously fit in the aperture,
2993 * the rendering will return an error at a point where the software is not
2994 * prepared to recover from it.
2996 * However, we also want to emit the batchbuffer significantly before we reach
2997 * the limit, as a series of batchbuffers each of which references buffers
2998 * covering almost all of the aperture means that at each emit we end up
2999 * waiting to evict a buffer from the last rendering, and we get synchronous
3000 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
3001 * get better parallelism.
3004 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
3006 drm_intel_bufmgr_gem *bufmgr_gem =
3007 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
3008 unsigned int total = 0;
3009 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
3012 /* Check for fence reg constraints if necessary */
3013 if (bufmgr_gem->available_fences) {
3014 total_fences = drm_intel_gem_total_fences(bo_array, count);
3015 if (total_fences > bufmgr_gem->available_fences)
3019 total = drm_intel_gem_estimate_batch_space(bo_array, count);
3021 if (total > threshold)
3022 total = drm_intel_gem_compute_batch_space(bo_array, count);
3024 if (total > threshold) {
3025 DBG("check_space: overflowed available aperture, "
3027 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
3030 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
3031 (int)bufmgr_gem->gtt_size / 1024);
3037 * Disable buffer reuse for objects which are shared with the kernel
3038 * as scanout buffers
3041 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
3043 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3045 bo_gem->reusable = false;
3050 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
3052 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3054 return bo_gem->reusable;
3058 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
3060 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3063 for (i = 0; i < bo_gem->reloc_count; i++) {
3064 if (bo_gem->reloc_target_info[i].bo == target_bo)
3066 if (bo == bo_gem->reloc_target_info[i].bo)
3068 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
3073 for (i = 0; i< bo_gem->softpin_target_count; i++) {
3074 if (bo_gem->softpin_target[i] == target_bo)
3076 if (_drm_intel_gem_bo_references(bo_gem->softpin_target[i], target_bo))
3083 /** Return true if target_bo is referenced by bo's relocation tree. */
3085 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
3087 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
3089 if (bo == NULL || target_bo == NULL)
3091 if (target_bo_gem->used_as_reloc_target)
3092 return _drm_intel_gem_bo_references(bo, target_bo);
3097 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
3099 unsigned int i = bufmgr_gem->num_buckets;
3101 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
3103 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
3104 bufmgr_gem->cache_bucket[i].size = size;
3105 bufmgr_gem->num_buckets++;
3109 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
3111 unsigned long size, cache_max_size = 64 * 1024 * 1024;
3113 /* OK, so power of two buckets was too wasteful of memory.
3114 * Give 3 other sizes between each power of two, to hopefully
3115 * cover things accurately enough. (The alternative is
3116 * probably to just go for exact matching of sizes, and assume
3117 * that for things like composited window resize the tiled
3118 * width/height alignment and rounding of sizes to pages will
3119 * get us useful cache hit rates anyway)
3121 add_bucket(bufmgr_gem, 4096);
3122 add_bucket(bufmgr_gem, 4096 * 2);
3123 add_bucket(bufmgr_gem, 4096 * 3);
3125 /* Initialize the linked lists for BO reuse cache. */
3126 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
3127 add_bucket(bufmgr_gem, size);
3129 add_bucket(bufmgr_gem, size + size * 1 / 4);
3130 add_bucket(bufmgr_gem, size + size * 2 / 4);
3131 add_bucket(bufmgr_gem, size + size * 3 / 4);
3136 drm_intel_bufmgr_gem_set_vma_cache_size(drm_intel_bufmgr *bufmgr, int limit)
3138 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3140 bufmgr_gem->vma_max = limit;
3142 drm_intel_gem_bo_purge_vma_cache(bufmgr_gem);
3146 parse_devid_override(const char *devid_override)
3148 static const struct {
3152 { "brw", PCI_CHIP_I965_GM },
3153 { "g4x", PCI_CHIP_GM45_GM },
3154 { "ilk", PCI_CHIP_ILD_G },
3155 { "snb", PCI_CHIP_SANDYBRIDGE_M_GT2_PLUS },
3156 { "ivb", PCI_CHIP_IVYBRIDGE_S_GT2 },
3157 { "hsw", PCI_CHIP_HASWELL_CRW_E_GT3 },
3158 { "byt", PCI_CHIP_VALLEYVIEW_3 },
3159 { "bdw", 0x1620 | BDW_ULX },
3160 { "skl", PCI_CHIP_SKYLAKE_DT_GT2 },
3161 { "kbl", PCI_CHIP_KABYLAKE_DT_GT2 },
3165 for (i = 0; i < ARRAY_SIZE(name_map); i++) {
3166 if (!strcmp(name_map[i].name, devid_override))
3167 return name_map[i].pci_id;
3170 return strtod(devid_override, NULL);
3174 * Get the PCI ID for the device. This can be overridden by setting the
3175 * INTEL_DEVID_OVERRIDE environment variable to the desired ID.
3178 get_pci_device_id(drm_intel_bufmgr_gem *bufmgr_gem)
3180 char *devid_override;
3183 drm_i915_getparam_t gp;
3185 if (geteuid() == getuid()) {
3186 devid_override = getenv("INTEL_DEVID_OVERRIDE");
3187 if (devid_override) {
3188 bufmgr_gem->no_exec = true;
3189 return parse_devid_override(devid_override);
3194 gp.param = I915_PARAM_CHIPSET_ID;
3196 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3198 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
3199 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
3205 drm_intel_bufmgr_gem_get_devid(drm_intel_bufmgr *bufmgr)
3207 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3209 return bufmgr_gem->pci_device;
3213 * Sets the AUB filename.
3215 * This function has to be called before drm_intel_bufmgr_gem_set_aub_dump()
3216 * for it to have any effect.
3219 drm_intel_bufmgr_gem_set_aub_filename(drm_intel_bufmgr *bufmgr,
3220 const char *filename)
3225 * Sets up AUB dumping.
3227 * This is a trace file format that can be used with the simulator.
3228 * Packets are emitted in a format somewhat like GPU command packets.
3229 * You can set up a GTT and upload your objects into the referenced
3230 * space, then send off batchbuffers and get BMPs out the other end.
3233 drm_intel_bufmgr_gem_set_aub_dump(drm_intel_bufmgr *bufmgr, int enable)
3235 fprintf(stderr, "libdrm aub dumping is deprecated.\n\n"
3236 "Use intel_aubdump from intel-gpu-tools instead. Install intel-gpu-tools,\n"
3237 "then run (for example)\n\n"
3238 "\t$ intel_aubdump --output=trace.aub glxgears -geometry 500x500\n\n"
3239 "See the intel_aubdump man page for more details.\n");
3243 drm_intel_gem_context_create(drm_intel_bufmgr *bufmgr)
3245 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3246 struct drm_i915_gem_context_create create;
3247 drm_intel_context *context = NULL;
3250 context = calloc(1, sizeof(*context));
3255 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_CREATE, &create);
3257 DBG("DRM_IOCTL_I915_GEM_CONTEXT_CREATE failed: %s\n",
3263 context->ctx_id = create.ctx_id;
3264 context->bufmgr = bufmgr;
3270 drm_intel_gem_context_get_id(drm_intel_context *ctx, uint32_t *ctx_id)
3275 *ctx_id = ctx->ctx_id;
3281 drm_intel_gem_context_destroy(drm_intel_context *ctx)
3283 drm_intel_bufmgr_gem *bufmgr_gem;
3284 struct drm_i915_gem_context_destroy destroy;
3292 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3293 destroy.ctx_id = ctx->ctx_id;
3294 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CONTEXT_DESTROY,
3297 fprintf(stderr, "DRM_IOCTL_I915_GEM_CONTEXT_DESTROY failed: %s\n",
3304 drm_intel_get_reset_stats(drm_intel_context *ctx,
3305 uint32_t *reset_count,
3309 drm_intel_bufmgr_gem *bufmgr_gem;
3310 struct drm_i915_reset_stats stats;
3318 bufmgr_gem = (drm_intel_bufmgr_gem *)ctx->bufmgr;
3319 stats.ctx_id = ctx->ctx_id;
3320 ret = drmIoctl(bufmgr_gem->fd,
3321 DRM_IOCTL_I915_GET_RESET_STATS,
3324 if (reset_count != NULL)
3325 *reset_count = stats.reset_count;
3328 *active = stats.batch_active;
3330 if (pending != NULL)
3331 *pending = stats.batch_pending;
3338 drm_intel_reg_read(drm_intel_bufmgr *bufmgr,
3342 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3343 struct drm_i915_reg_read reg_read;
3347 reg_read.offset = offset;
3349 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_REG_READ, ®_read);
3351 *result = reg_read.val;
3356 drm_intel_get_subslice_total(int fd, unsigned int *subslice_total)
3358 drm_i915_getparam_t gp;
3362 gp.value = (int*)subslice_total;
3363 gp.param = I915_PARAM_SUBSLICE_TOTAL;
3364 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3372 drm_intel_get_eu_total(int fd, unsigned int *eu_total)
3374 drm_i915_getparam_t gp;
3378 gp.value = (int*)eu_total;
3379 gp.param = I915_PARAM_EU_TOTAL;
3380 ret = drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp);
3388 drm_intel_get_pooled_eu(int fd)
3390 drm_i915_getparam_t gp;
3394 gp.param = I915_PARAM_HAS_POOLED_EU;
3396 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3403 drm_intel_get_min_eu_in_pool(int fd)
3405 drm_i915_getparam_t gp;
3409 gp.param = I915_PARAM_MIN_EU_IN_POOL;
3411 if (drmIoctl(fd, DRM_IOCTL_I915_GETPARAM, &gp))
3418 * Annotate the given bo for use in aub dumping.
3420 * \param annotations is an array of drm_intel_aub_annotation objects
3421 * describing the type of data in various sections of the bo. Each
3422 * element of the array specifies the type and subtype of a section of
3423 * the bo, and the past-the-end offset of that section. The elements
3424 * of \c annotations must be sorted so that ending_offset is
3427 * \param count is the number of elements in the \c annotations array.
3428 * If \c count is zero, then \c annotations will not be dereferenced.
3430 * Annotations are copied into a private data structure, so caller may
3431 * re-use the memory pointed to by \c annotations after the call
3434 * Annotations are stored for the lifetime of the bo; to reset to the
3435 * default state (no annotations), call this function with a \c count
3439 drm_intel_bufmgr_gem_set_aub_annotations(drm_intel_bo *bo,
3440 drm_intel_aub_annotation *annotations,
3445 static pthread_mutex_t bufmgr_list_mutex = PTHREAD_MUTEX_INITIALIZER;
3446 static drmMMListHead bufmgr_list = { &bufmgr_list, &bufmgr_list };
3448 static drm_intel_bufmgr_gem *
3449 drm_intel_bufmgr_gem_find(int fd)
3451 drm_intel_bufmgr_gem *bufmgr_gem;
3453 DRMLISTFOREACHENTRY(bufmgr_gem, &bufmgr_list, managers) {
3454 if (bufmgr_gem->fd == fd) {
3455 atomic_inc(&bufmgr_gem->refcount);
3464 drm_intel_bufmgr_gem_unref(drm_intel_bufmgr *bufmgr)
3466 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
3468 if (atomic_add_unless(&bufmgr_gem->refcount, -1, 1)) {
3469 pthread_mutex_lock(&bufmgr_list_mutex);
3471 if (atomic_dec_and_test(&bufmgr_gem->refcount)) {
3472 DRMLISTDEL(&bufmgr_gem->managers);
3473 drm_intel_bufmgr_gem_destroy(bufmgr);
3476 pthread_mutex_unlock(&bufmgr_list_mutex);
3480 void *drm_intel_gem_bo_map__gtt(drm_intel_bo *bo)
3482 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3483 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3485 if (bo_gem->gtt_virtual)
3486 return bo_gem->gtt_virtual;
3488 if (bo_gem->is_userptr)
3491 pthread_mutex_lock(&bufmgr_gem->lock);
3492 if (bo_gem->gtt_virtual == NULL) {
3493 struct drm_i915_gem_mmap_gtt mmap_arg;
3496 DBG("bo_map_gtt: mmap %d (%s), map_count=%d\n",
3497 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3499 if (bo_gem->map_count++ == 0)
3500 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3503 mmap_arg.handle = bo_gem->gem_handle;
3505 /* Get the fake offset back... */
3507 if (drmIoctl(bufmgr_gem->fd,
3508 DRM_IOCTL_I915_GEM_MMAP_GTT,
3511 ptr = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
3512 MAP_SHARED, bufmgr_gem->fd,
3515 if (ptr == MAP_FAILED) {
3516 if (--bo_gem->map_count == 0)
3517 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3521 bo_gem->gtt_virtual = ptr;
3523 pthread_mutex_unlock(&bufmgr_gem->lock);
3525 return bo_gem->gtt_virtual;
3528 void *drm_intel_gem_bo_map__cpu(drm_intel_bo *bo)
3530 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3531 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3533 if (bo_gem->mem_virtual)
3534 return bo_gem->mem_virtual;
3536 if (bo_gem->is_userptr) {
3537 /* Return the same user ptr */
3538 return bo_gem->user_virtual;
3541 pthread_mutex_lock(&bufmgr_gem->lock);
3542 if (!bo_gem->mem_virtual) {
3543 struct drm_i915_gem_mmap mmap_arg;
3545 if (bo_gem->map_count++ == 0)
3546 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3548 DBG("bo_map: %d (%s), map_count=%d\n",
3549 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3552 mmap_arg.handle = bo_gem->gem_handle;
3553 mmap_arg.size = bo->size;
3554 if (drmIoctl(bufmgr_gem->fd,
3555 DRM_IOCTL_I915_GEM_MMAP,
3557 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3558 __FILE__, __LINE__, bo_gem->gem_handle,
3559 bo_gem->name, strerror(errno));
3560 if (--bo_gem->map_count == 0)
3561 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3563 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3564 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3567 pthread_mutex_unlock(&bufmgr_gem->lock);
3569 return bo_gem->mem_virtual;
3572 void *drm_intel_gem_bo_map__wc(drm_intel_bo *bo)
3574 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
3575 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
3577 if (bo_gem->wc_virtual)
3578 return bo_gem->wc_virtual;
3580 if (bo_gem->is_userptr)
3583 pthread_mutex_lock(&bufmgr_gem->lock);
3584 if (!bo_gem->wc_virtual) {
3585 struct drm_i915_gem_mmap mmap_arg;
3587 if (bo_gem->map_count++ == 0)
3588 drm_intel_gem_bo_open_vma(bufmgr_gem, bo_gem);
3590 DBG("bo_map: %d (%s), map_count=%d\n",
3591 bo_gem->gem_handle, bo_gem->name, bo_gem->map_count);
3594 mmap_arg.handle = bo_gem->gem_handle;
3595 mmap_arg.size = bo->size;
3596 mmap_arg.flags = I915_MMAP_WC;
3597 if (drmIoctl(bufmgr_gem->fd,
3598 DRM_IOCTL_I915_GEM_MMAP,
3600 DBG("%s:%d: Error mapping buffer %d (%s): %s .\n",
3601 __FILE__, __LINE__, bo_gem->gem_handle,
3602 bo_gem->name, strerror(errno));
3603 if (--bo_gem->map_count == 0)
3604 drm_intel_gem_bo_close_vma(bufmgr_gem, bo_gem);
3606 VG(VALGRIND_MALLOCLIKE_BLOCK(mmap_arg.addr_ptr, mmap_arg.size, 0, 1));
3607 bo_gem->wc_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
3610 pthread_mutex_unlock(&bufmgr_gem->lock);
3612 return bo_gem->wc_virtual;
3616 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
3617 * and manage map buffer objections.
3619 * \param fd File descriptor of the opened DRM device.
3622 drm_intel_bufmgr_gem_init(int fd, int batch_size)
3624 drm_intel_bufmgr_gem *bufmgr_gem;
3625 struct drm_i915_gem_get_aperture aperture;
3626 drm_i915_getparam_t gp;
3630 pthread_mutex_lock(&bufmgr_list_mutex);
3632 bufmgr_gem = drm_intel_bufmgr_gem_find(fd);
3636 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
3637 if (bufmgr_gem == NULL)
3640 bufmgr_gem->fd = fd;
3641 atomic_set(&bufmgr_gem->refcount, 1);
3643 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
3650 ret = drmIoctl(bufmgr_gem->fd,
3651 DRM_IOCTL_I915_GEM_GET_APERTURE,
3655 bufmgr_gem->gtt_size = aperture.aper_available_size;
3657 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
3659 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
3660 fprintf(stderr, "Assuming %dkB available aperture size.\n"
3661 "May lead to reduced performance or incorrect "
3663 (int)bufmgr_gem->gtt_size / 1024);
3666 bufmgr_gem->pci_device = get_pci_device_id(bufmgr_gem);
3668 if (IS_GEN2(bufmgr_gem->pci_device))
3669 bufmgr_gem->gen = 2;
3670 else if (IS_GEN3(bufmgr_gem->pci_device))
3671 bufmgr_gem->gen = 3;
3672 else if (IS_GEN4(bufmgr_gem->pci_device))
3673 bufmgr_gem->gen = 4;
3674 else if (IS_GEN5(bufmgr_gem->pci_device))
3675 bufmgr_gem->gen = 5;
3676 else if (IS_GEN6(bufmgr_gem->pci_device))
3677 bufmgr_gem->gen = 6;
3678 else if (IS_GEN7(bufmgr_gem->pci_device))
3679 bufmgr_gem->gen = 7;
3680 else if (IS_GEN8(bufmgr_gem->pci_device))
3681 bufmgr_gem->gen = 8;
3682 else if (IS_GEN9(bufmgr_gem->pci_device))
3683 bufmgr_gem->gen = 9;
3690 if (IS_GEN3(bufmgr_gem->pci_device) &&
3691 bufmgr_gem->gtt_size > 256*1024*1024) {
3692 /* The unmappable part of gtt on gen 3 (i.e. above 256MB) can't
3693 * be used for tiled blits. To simplify the accounting, just
3694 * subtract the unmappable part (fixed to 256MB on all known
3695 * gen3 devices) if the kernel advertises it. */
3696 bufmgr_gem->gtt_size -= 256*1024*1024;
3702 gp.param = I915_PARAM_HAS_EXECBUF2;
3703 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3707 gp.param = I915_PARAM_HAS_BSD;
3708 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3709 bufmgr_gem->has_bsd = ret == 0;
3711 gp.param = I915_PARAM_HAS_BLT;
3712 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3713 bufmgr_gem->has_blt = ret == 0;
3715 gp.param = I915_PARAM_HAS_RELAXED_FENCING;
3716 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3717 bufmgr_gem->has_relaxed_fencing = ret == 0;
3719 gp.param = I915_PARAM_HAS_EXEC_ASYNC;
3720 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3721 bufmgr_gem->has_exec_async = ret == 0;
3723 bufmgr_gem->bufmgr.bo_alloc_userptr = check_bo_alloc_userptr;
3725 gp.param = I915_PARAM_HAS_WAIT_TIMEOUT;
3726 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3727 bufmgr_gem->has_wait_timeout = ret == 0;
3729 gp.param = I915_PARAM_HAS_LLC;
3730 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3732 /* Kernel does not supports HAS_LLC query, fallback to GPU
3733 * generation detection and assume that we have LLC on GEN6/7
3735 bufmgr_gem->has_llc = (IS_GEN6(bufmgr_gem->pci_device) |
3736 IS_GEN7(bufmgr_gem->pci_device));
3738 bufmgr_gem->has_llc = *gp.value;
3740 gp.param = I915_PARAM_HAS_VEBOX;
3741 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3742 bufmgr_gem->has_vebox = (ret == 0) & (*gp.value > 0);
3744 gp.param = I915_PARAM_HAS_EXEC_SOFTPIN;
3745 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3746 if (ret == 0 && *gp.value > 0)
3747 bufmgr_gem->bufmgr.bo_set_softpin_offset = drm_intel_gem_bo_set_softpin_offset;
3749 if (bufmgr_gem->gen < 4) {
3750 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
3751 gp.value = &bufmgr_gem->available_fences;
3752 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3754 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
3756 fprintf(stderr, "param: %d, val: %d\n", gp.param,
3758 bufmgr_gem->available_fences = 0;
3760 /* XXX The kernel reports the total number of fences,
3761 * including any that may be pinned.
3763 * We presume that there will be at least one pinned
3764 * fence for the scanout buffer, but there may be more
3765 * than one scanout and the user may be manually
3766 * pinning buffers. Let's move to execbuffer2 and
3767 * thereby forget the insanity of using fences...
3769 bufmgr_gem->available_fences -= 2;
3770 if (bufmgr_gem->available_fences < 0)
3771 bufmgr_gem->available_fences = 0;
3775 if (bufmgr_gem->gen >= 8) {
3776 gp.param = I915_PARAM_HAS_ALIASING_PPGTT;
3777 ret = drmIoctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
3778 if (ret == 0 && *gp.value == 3)
3779 bufmgr_gem->bufmgr.bo_use_48b_address_range = drm_intel_gem_bo_use_48b_address_range;
3782 /* Let's go with one relocation per every 2 dwords (but round down a bit
3783 * since a power of two will mean an extra page allocation for the reloc
3786 * Every 4 was too few for the blender benchmark.
3788 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
3790 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
3791 bufmgr_gem->bufmgr.bo_alloc_for_render =
3792 drm_intel_gem_bo_alloc_for_render;
3793 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
3794 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
3795 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
3796 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
3797 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
3798 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
3799 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
3800 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
3801 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
3802 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
3803 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
3804 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
3805 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
3806 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
3807 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
3808 /* Use the new one if available */
3810 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
3811 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
3813 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
3814 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
3815 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
3816 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_unref;
3817 bufmgr_gem->bufmgr.debug = 0;
3818 bufmgr_gem->bufmgr.check_aperture_space =
3819 drm_intel_gem_check_aperture_space;
3820 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
3821 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
3822 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
3823 drm_intel_gem_get_pipe_from_crtc_id;
3824 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
3826 init_cache_buckets(bufmgr_gem);
3828 DRMINITLISTHEAD(&bufmgr_gem->vma_cache);
3829 bufmgr_gem->vma_max = -1; /* unlimited by default */
3831 DRMLISTADD(&bufmgr_gem->managers, &bufmgr_list);
3834 pthread_mutex_unlock(&bufmgr_list_mutex);
3836 return bufmgr_gem != NULL ? &bufmgr_gem->bufmgr : NULL;