1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
42 #include <xf86atomic.h>
50 #include <sys/ioctl.h>
53 #include <sys/types.h>
56 #include "libdrm_lists.h"
57 #include "intel_bufmgr.h"
58 #include "intel_bufmgr_priv.h"
59 #include "intel_chipset.h"
64 #define DBG(...) do { \
65 if (bufmgr_gem->bufmgr.debug) \
66 fprintf(stderr, __VA_ARGS__); \
69 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
71 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
73 struct drm_intel_gem_bo_bucket {
78 typedef struct _drm_intel_bufmgr_gem {
79 drm_intel_bufmgr bufmgr;
87 struct drm_i915_gem_exec_object *exec_objects;
88 struct drm_i915_gem_exec_object2 *exec2_objects;
89 drm_intel_bo **exec_bos;
93 /** Array of lists of cached gem objects of power-of-two sizes */
94 struct drm_intel_gem_bo_bucket cache_bucket[14 * 4];
103 } drm_intel_bufmgr_gem;
105 #define DRM_INTEL_RELOC_FENCE (1<<0)
107 typedef struct _drm_intel_reloc_target_info {
110 } drm_intel_reloc_target;
112 struct _drm_intel_bo_gem {
120 * Kenel-assigned global name for this object
122 unsigned int global_name;
125 * Index of the buffer within the validation list while preparing a
126 * batchbuffer execution.
131 * Current tiling mode
133 uint32_t tiling_mode;
134 uint32_t swizzle_mode;
135 unsigned long stride;
139 /** Array passed to the DRM containing relocation information. */
140 struct drm_i915_gem_relocation_entry *relocs;
142 * Array of info structs corresponding to relocs[i].target_handle etc
144 drm_intel_reloc_target *reloc_target_info;
145 /** Number of entries in relocs */
147 /** Mapped address for the buffer, saved across map/unmap cycles */
149 /** GTT virtual address for the buffer, saved across map/unmap cycles */
156 * Boolean of whether this BO and its children have been included in
157 * the current drm_intel_bufmgr_check_aperture_space() total.
159 char included_in_check_aperture;
162 * Boolean of whether this buffer has been used as a relocation
163 * target and had its size accounted for, and thus can't have any
164 * further relocations added to it.
166 char used_as_reloc_target;
169 * Boolean of whether we have encountered an error whilst building the relocation tree.
174 * Boolean of whether this buffer can be re-used
179 * Size in bytes of this buffer and its relocation descendents.
181 * Used to avoid costly tree walking in
182 * drm_intel_bufmgr_check_aperture in the common case.
187 * Number of potential fence registers required by this buffer and its
190 int reloc_tree_fences;
194 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
197 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
200 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
201 uint32_t * swizzle_mode);
204 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
205 uint32_t tiling_mode,
208 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
211 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
213 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
216 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
217 uint32_t *tiling_mode)
219 unsigned long min_size, max_size;
222 if (*tiling_mode == I915_TILING_NONE)
225 /* 965+ just need multiples of page size for tiling */
226 if (bufmgr_gem->gen >= 4)
227 return ROUND_UP_TO(size, 4096);
229 /* Older chips need powers of two, of at least 512k or 1M */
230 if (bufmgr_gem->gen == 3) {
231 min_size = 1024*1024;
232 max_size = 128*1024*1024;
235 max_size = 64*1024*1024;
238 if (size > max_size) {
239 *tiling_mode = I915_TILING_NONE;
243 for (i = min_size; i < size; i <<= 1)
250 * Round a given pitch up to the minimum required for X tiling on a
251 * given chip. We use 512 as the minimum to allow for a later tiling
255 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
256 unsigned long pitch, uint32_t tiling_mode)
258 unsigned long tile_width;
261 /* If untiled, then just align it so that we can do rendering
262 * to it with the 3D engine.
264 if (tiling_mode == I915_TILING_NONE)
265 return ALIGN(pitch, 64);
267 if (tiling_mode == I915_TILING_X)
272 /* 965 is flexible */
273 if (bufmgr_gem->gen >= 4)
274 return ROUND_UP_TO(pitch, tile_width);
276 /* Pre-965 needs power of two tile width */
277 for (i = tile_width; i < pitch; i <<= 1)
283 static struct drm_intel_gem_bo_bucket *
284 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
289 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
290 struct drm_intel_gem_bo_bucket *bucket =
291 &bufmgr_gem->cache_bucket[i];
292 if (bucket->size >= size) {
301 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
305 for (i = 0; i < bufmgr_gem->exec_count; i++) {
306 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
307 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
309 if (bo_gem->relocs == NULL) {
310 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
315 for (j = 0; j < bo_gem->reloc_count; j++) {
316 drm_intel_bo *target_bo = bo_gem->reloc_target_info[j].bo;
317 drm_intel_bo_gem *target_gem =
318 (drm_intel_bo_gem *) target_bo;
320 DBG("%2d: %d (%s)@0x%08llx -> "
321 "%d (%s)@0x%08lx + 0x%08x\n",
323 bo_gem->gem_handle, bo_gem->name,
324 (unsigned long long)bo_gem->relocs[j].offset,
325 target_gem->gem_handle,
328 bo_gem->relocs[j].delta);
334 drm_intel_gem_bo_reference(drm_intel_bo *bo)
336 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
338 assert(atomic_read(&bo_gem->refcount) > 0);
339 atomic_inc(&bo_gem->refcount);
343 * Adds the given buffer to the list of buffers to be validated (moved into the
344 * appropriate memory type) with the next batch submission.
346 * If a buffer is validated multiple times in a batch submission, it ends up
347 * with the intersection of the memory type flags and the union of the
351 drm_intel_add_validate_buffer(drm_intel_bo *bo)
353 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
354 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
357 if (bo_gem->validate_index != -1)
360 /* Extend the array of validation entries as necessary. */
361 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
362 int new_size = bufmgr_gem->exec_size * 2;
367 bufmgr_gem->exec_objects =
368 realloc(bufmgr_gem->exec_objects,
369 sizeof(*bufmgr_gem->exec_objects) * new_size);
370 bufmgr_gem->exec_bos =
371 realloc(bufmgr_gem->exec_bos,
372 sizeof(*bufmgr_gem->exec_bos) * new_size);
373 bufmgr_gem->exec_size = new_size;
376 index = bufmgr_gem->exec_count;
377 bo_gem->validate_index = index;
378 /* Fill in array entry */
379 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
380 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
381 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
382 bufmgr_gem->exec_objects[index].alignment = 0;
383 bufmgr_gem->exec_objects[index].offset = 0;
384 bufmgr_gem->exec_bos[index] = bo;
385 bufmgr_gem->exec_count++;
389 drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
391 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
392 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
395 if (bo_gem->validate_index != -1) {
397 bufmgr_gem->exec2_objects[bo_gem->validate_index].flags |=
398 EXEC_OBJECT_NEEDS_FENCE;
402 /* Extend the array of validation entries as necessary. */
403 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
404 int new_size = bufmgr_gem->exec_size * 2;
409 bufmgr_gem->exec2_objects =
410 realloc(bufmgr_gem->exec2_objects,
411 sizeof(*bufmgr_gem->exec2_objects) * new_size);
412 bufmgr_gem->exec_bos =
413 realloc(bufmgr_gem->exec_bos,
414 sizeof(*bufmgr_gem->exec_bos) * new_size);
415 bufmgr_gem->exec_size = new_size;
418 index = bufmgr_gem->exec_count;
419 bo_gem->validate_index = index;
420 /* Fill in array entry */
421 bufmgr_gem->exec2_objects[index].handle = bo_gem->gem_handle;
422 bufmgr_gem->exec2_objects[index].relocation_count = bo_gem->reloc_count;
423 bufmgr_gem->exec2_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
424 bufmgr_gem->exec2_objects[index].alignment = 0;
425 bufmgr_gem->exec2_objects[index].offset = 0;
426 bufmgr_gem->exec_bos[index] = bo;
427 bufmgr_gem->exec2_objects[index].flags = 0;
428 bufmgr_gem->exec2_objects[index].rsvd1 = 0;
429 bufmgr_gem->exec2_objects[index].rsvd2 = 0;
431 bufmgr_gem->exec2_objects[index].flags |=
432 EXEC_OBJECT_NEEDS_FENCE;
434 bufmgr_gem->exec_count++;
437 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
441 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
442 drm_intel_bo_gem *bo_gem)
446 assert(!bo_gem->used_as_reloc_target);
448 /* The older chipsets are far-less flexible in terms of tiling,
449 * and require tiled buffer to be size aligned in the aperture.
450 * This means that in the worst possible case we will need a hole
451 * twice as large as the object in order for it to fit into the
452 * aperture. Optimal packing is for wimps.
454 size = bo_gem->bo.size;
455 if (bufmgr_gem->gen < 4 && bo_gem->tiling_mode != I915_TILING_NONE)
458 bo_gem->reloc_tree_size = size;
462 drm_intel_setup_reloc_list(drm_intel_bo *bo)
464 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
465 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
466 unsigned int max_relocs = bufmgr_gem->max_relocs;
468 if (bo->size / 4 < max_relocs)
469 max_relocs = bo->size / 4;
471 bo_gem->relocs = malloc(max_relocs *
472 sizeof(struct drm_i915_gem_relocation_entry));
473 bo_gem->reloc_target_info = malloc(max_relocs *
474 sizeof(drm_intel_reloc_target));
475 if (bo_gem->relocs == NULL || bo_gem->reloc_target_info == NULL) {
476 bo_gem->has_error = 1;
478 free (bo_gem->relocs);
479 bo_gem->relocs = NULL;
481 free (bo_gem->reloc_target_info);
482 bo_gem->reloc_target_info = NULL;
491 drm_intel_gem_bo_busy(drm_intel_bo *bo)
493 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
494 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
495 struct drm_i915_gem_busy busy;
498 memset(&busy, 0, sizeof(busy));
499 busy.handle = bo_gem->gem_handle;
502 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
503 } while (ret == -1 && errno == EINTR);
505 return (ret == 0 && busy.busy);
509 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
510 drm_intel_bo_gem *bo_gem, int state)
512 struct drm_i915_gem_madvise madv;
514 madv.handle = bo_gem->gem_handle;
517 ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
519 return madv.retained;
523 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
525 return drm_intel_gem_bo_madvise_internal
526 ((drm_intel_bufmgr_gem *) bo->bufmgr,
527 (drm_intel_bo_gem *) bo,
531 /* drop the oldest entries that have been purged by the kernel */
533 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
534 struct drm_intel_gem_bo_bucket *bucket)
536 while (!DRMLISTEMPTY(&bucket->head)) {
537 drm_intel_bo_gem *bo_gem;
539 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
540 bucket->head.next, head);
541 if (drm_intel_gem_bo_madvise_internal
542 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
545 DRMLISTDEL(&bo_gem->head);
546 drm_intel_gem_bo_free(&bo_gem->bo);
550 static drm_intel_bo *
551 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
555 uint32_t tiling_mode,
556 unsigned long stride)
558 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
559 drm_intel_bo_gem *bo_gem;
560 unsigned int page_size = getpagesize();
562 struct drm_intel_gem_bo_bucket *bucket;
563 int alloc_from_cache;
564 unsigned long bo_size;
567 if (flags & BO_ALLOC_FOR_RENDER)
570 /* Round the allocated size up to a power of two number of pages. */
571 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
573 /* If we don't have caching at this size, don't actually round the
576 if (bucket == NULL) {
578 if (bo_size < page_size)
581 bo_size = bucket->size;
584 pthread_mutex_lock(&bufmgr_gem->lock);
585 /* Get a buffer out of the cache if available */
587 alloc_from_cache = 0;
588 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
590 /* Allocate new render-target BOs from the tail (MRU)
591 * of the list, as it will likely be hot in the GPU
592 * cache and in the aperture for us.
594 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
595 bucket->head.prev, head);
596 DRMLISTDEL(&bo_gem->head);
597 alloc_from_cache = 1;
599 /* For non-render-target BOs (where we're probably
600 * going to map it first thing in order to fill it
601 * with data), check if the last BO in the cache is
602 * unbusy, and only reuse in that case. Otherwise,
603 * allocating a new buffer is probably faster than
604 * waiting for the GPU to finish.
606 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
607 bucket->head.next, head);
608 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
609 alloc_from_cache = 1;
610 DRMLISTDEL(&bo_gem->head);
614 if (alloc_from_cache) {
615 if (!drm_intel_gem_bo_madvise_internal
616 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
617 drm_intel_gem_bo_free(&bo_gem->bo);
618 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
623 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
626 drm_intel_gem_bo_free(&bo_gem->bo);
631 pthread_mutex_unlock(&bufmgr_gem->lock);
633 if (!alloc_from_cache) {
634 struct drm_i915_gem_create create;
636 bo_gem = calloc(1, sizeof(*bo_gem));
640 bo_gem->bo.size = bo_size;
641 memset(&create, 0, sizeof(create));
642 create.size = bo_size;
645 ret = ioctl(bufmgr_gem->fd,
646 DRM_IOCTL_I915_GEM_CREATE,
648 } while (ret == -1 && errno == EINTR);
649 bo_gem->gem_handle = create.handle;
650 bo_gem->bo.handle = bo_gem->gem_handle;
655 bo_gem->bo.bufmgr = bufmgr;
657 bo_gem->tiling_mode = I915_TILING_NONE;
658 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
661 if (drm_intel_gem_bo_set_tiling_internal(&bo_gem->bo,
664 drm_intel_gem_bo_free(&bo_gem->bo);
670 atomic_set(&bo_gem->refcount, 1);
671 bo_gem->validate_index = -1;
672 bo_gem->reloc_tree_fences = 0;
673 bo_gem->used_as_reloc_target = 0;
674 bo_gem->has_error = 0;
675 bo_gem->reusable = 1;
677 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
679 DBG("bo_create: buf %d (%s) %ldb\n",
680 bo_gem->gem_handle, bo_gem->name, size);
685 static drm_intel_bo *
686 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
689 unsigned int alignment)
691 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
693 I915_TILING_NONE, 0);
696 static drm_intel_bo *
697 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
700 unsigned int alignment)
702 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0,
703 I915_TILING_NONE, 0);
706 static drm_intel_bo *
707 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
708 int x, int y, int cpp, uint32_t *tiling_mode,
709 unsigned long *pitch, unsigned long flags)
711 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
713 unsigned long size, stride;
717 unsigned long aligned_y;
719 tiling = *tiling_mode;
721 /* If we're tiled, our allocations are in 8 or 32-row blocks,
722 * so failure to align our height means that we won't allocate
725 * If we're untiled, we still have to align to 2 rows high
726 * because the data port accesses 2x2 blocks even if the
727 * bottom row isn't to be rendered, so failure to align means
728 * we could walk off the end of the GTT and fault. This is
729 * documented on 965, and may be the case on older chipsets
730 * too so we try to be careful.
733 if (tiling == I915_TILING_NONE)
734 aligned_y = ALIGN(y, 2);
735 else if (tiling == I915_TILING_X)
736 aligned_y = ALIGN(y, 8);
737 else if (tiling == I915_TILING_Y)
738 aligned_y = ALIGN(y, 32);
741 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, tiling);
742 size = stride * aligned_y;
743 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
744 } while (*tiling_mode != tiling);
746 if (*tiling_mode == I915_TILING_NONE)
749 bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags,
750 *tiling_mode, stride);
759 * Returns a drm_intel_bo wrapping the given buffer object handle.
761 * This can be used when one application needs to pass a buffer object
765 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
769 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
770 drm_intel_bo_gem *bo_gem;
772 struct drm_gem_open open_arg;
773 struct drm_i915_gem_get_tiling get_tiling;
775 bo_gem = calloc(1, sizeof(*bo_gem));
779 memset(&open_arg, 0, sizeof(open_arg));
780 open_arg.name = handle;
782 ret = ioctl(bufmgr_gem->fd,
785 } while (ret == -1 && errno == EINTR);
787 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
788 name, handle, strerror(errno));
792 bo_gem->bo.size = open_arg.size;
793 bo_gem->bo.offset = 0;
794 bo_gem->bo.virtual = NULL;
795 bo_gem->bo.bufmgr = bufmgr;
797 atomic_set(&bo_gem->refcount, 1);
798 bo_gem->validate_index = -1;
799 bo_gem->gem_handle = open_arg.handle;
800 bo_gem->global_name = handle;
801 bo_gem->reusable = 0;
803 memset(&get_tiling, 0, sizeof(get_tiling));
804 get_tiling.handle = bo_gem->gem_handle;
805 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
807 drm_intel_gem_bo_unreference(&bo_gem->bo);
810 bo_gem->tiling_mode = get_tiling.tiling_mode;
811 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
812 /* XXX stride is unknown */
813 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
815 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
821 drm_intel_gem_bo_free(drm_intel_bo *bo)
823 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
824 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
825 struct drm_gem_close close;
828 if (bo_gem->mem_virtual)
829 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
830 if (bo_gem->gtt_virtual)
831 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
833 /* Close this object */
834 memset(&close, 0, sizeof(close));
835 close.handle = bo_gem->gem_handle;
836 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
839 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
840 bo_gem->gem_handle, bo_gem->name, strerror(errno));
845 /** Frees all cached buffers significantly older than @time. */
847 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
851 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
852 struct drm_intel_gem_bo_bucket *bucket =
853 &bufmgr_gem->cache_bucket[i];
855 while (!DRMLISTEMPTY(&bucket->head)) {
856 drm_intel_bo_gem *bo_gem;
858 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
859 bucket->head.next, head);
860 if (time - bo_gem->free_time <= 1)
863 DRMLISTDEL(&bo_gem->head);
865 drm_intel_gem_bo_free(&bo_gem->bo);
871 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
873 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
874 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
875 struct drm_intel_gem_bo_bucket *bucket;
878 /* Unreference all the target buffers */
879 for (i = 0; i < bo_gem->reloc_count; i++) {
880 if (bo_gem->reloc_target_info[i].bo != bo) {
881 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
882 reloc_target_info[i].bo,
886 bo_gem->reloc_count = 0;
887 bo_gem->used_as_reloc_target = 0;
889 DBG("bo_unreference final: %d (%s)\n",
890 bo_gem->gem_handle, bo_gem->name);
892 /* release memory associated with this object */
893 if (bo_gem->reloc_target_info) {
894 free(bo_gem->reloc_target_info);
895 bo_gem->reloc_target_info = NULL;
897 if (bo_gem->relocs) {
898 free(bo_gem->relocs);
899 bo_gem->relocs = NULL;
902 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
903 /* Put the buffer into our internal cache for reuse if we can. */
904 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
905 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
906 I915_MADV_DONTNEED)) {
907 bo_gem->free_time = time;
910 bo_gem->validate_index = -1;
912 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
914 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
916 drm_intel_gem_bo_free(bo);
920 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
923 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
925 assert(atomic_read(&bo_gem->refcount) > 0);
926 if (atomic_dec_and_test(&bo_gem->refcount))
927 drm_intel_gem_bo_unreference_final(bo, time);
930 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
932 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
934 assert(atomic_read(&bo_gem->refcount) > 0);
935 if (atomic_dec_and_test(&bo_gem->refcount)) {
936 drm_intel_bufmgr_gem *bufmgr_gem =
937 (drm_intel_bufmgr_gem *) bo->bufmgr;
938 struct timespec time;
940 clock_gettime(CLOCK_MONOTONIC, &time);
942 pthread_mutex_lock(&bufmgr_gem->lock);
943 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
944 pthread_mutex_unlock(&bufmgr_gem->lock);
948 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
950 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
951 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
952 struct drm_i915_gem_set_domain set_domain;
955 pthread_mutex_lock(&bufmgr_gem->lock);
957 /* Allow recursive mapping. Mesa may recursively map buffers with
958 * nested display loops.
960 if (!bo_gem->mem_virtual) {
961 struct drm_i915_gem_mmap mmap_arg;
963 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
965 memset(&mmap_arg, 0, sizeof(mmap_arg));
966 mmap_arg.handle = bo_gem->gem_handle;
968 mmap_arg.size = bo->size;
970 ret = ioctl(bufmgr_gem->fd,
971 DRM_IOCTL_I915_GEM_MMAP,
973 } while (ret == -1 && errno == EINTR);
977 "%s:%d: Error mapping buffer %d (%s): %s .\n",
978 __FILE__, __LINE__, bo_gem->gem_handle,
979 bo_gem->name, strerror(errno));
980 pthread_mutex_unlock(&bufmgr_gem->lock);
983 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
985 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
986 bo_gem->mem_virtual);
987 bo->virtual = bo_gem->mem_virtual;
989 set_domain.handle = bo_gem->gem_handle;
990 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
992 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
994 set_domain.write_domain = 0;
996 ret = ioctl(bufmgr_gem->fd,
997 DRM_IOCTL_I915_GEM_SET_DOMAIN,
999 } while (ret == -1 && errno == EINTR);
1002 fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
1003 __FILE__, __LINE__, bo_gem->gem_handle,
1005 pthread_mutex_unlock(&bufmgr_gem->lock);
1009 pthread_mutex_unlock(&bufmgr_gem->lock);
1014 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
1016 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1017 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1018 struct drm_i915_gem_set_domain set_domain;
1021 pthread_mutex_lock(&bufmgr_gem->lock);
1023 /* Get a mapping of the buffer if we haven't before. */
1024 if (bo_gem->gtt_virtual == NULL) {
1025 struct drm_i915_gem_mmap_gtt mmap_arg;
1027 DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
1030 memset(&mmap_arg, 0, sizeof(mmap_arg));
1031 mmap_arg.handle = bo_gem->gem_handle;
1033 /* Get the fake offset back... */
1035 ret = ioctl(bufmgr_gem->fd,
1036 DRM_IOCTL_I915_GEM_MMAP_GTT,
1038 } while (ret == -1 && errno == EINTR);
1042 "%s:%d: Error preparing buffer map %d (%s): %s .\n",
1044 bo_gem->gem_handle, bo_gem->name,
1046 pthread_mutex_unlock(&bufmgr_gem->lock);
1051 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
1052 MAP_SHARED, bufmgr_gem->fd,
1054 if (bo_gem->gtt_virtual == MAP_FAILED) {
1055 bo_gem->gtt_virtual = NULL;
1058 "%s:%d: Error mapping buffer %d (%s): %s .\n",
1060 bo_gem->gem_handle, bo_gem->name,
1062 pthread_mutex_unlock(&bufmgr_gem->lock);
1067 bo->virtual = bo_gem->gtt_virtual;
1069 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
1070 bo_gem->gtt_virtual);
1072 /* Now move it to the GTT domain so that the CPU caches are flushed */
1073 set_domain.handle = bo_gem->gem_handle;
1074 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1075 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
1077 ret = ioctl(bufmgr_gem->fd,
1078 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1080 } while (ret == -1 && errno == EINTR);
1084 fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
1085 __FILE__, __LINE__, bo_gem->gem_handle,
1089 pthread_mutex_unlock(&bufmgr_gem->lock);
1094 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
1096 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1097 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1103 assert(bo_gem->gtt_virtual != NULL);
1105 pthread_mutex_lock(&bufmgr_gem->lock);
1107 pthread_mutex_unlock(&bufmgr_gem->lock);
1112 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1114 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1115 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1116 struct drm_i915_gem_sw_finish sw_finish;
1122 assert(bo_gem->mem_virtual != NULL);
1124 pthread_mutex_lock(&bufmgr_gem->lock);
1126 /* Cause a flush to happen if the buffer's pinned for scanout, so the
1127 * results show up in a timely manner.
1129 sw_finish.handle = bo_gem->gem_handle;
1131 ret = ioctl(bufmgr_gem->fd,
1132 DRM_IOCTL_I915_GEM_SW_FINISH,
1134 } while (ret == -1 && errno == EINTR);
1135 ret = ret == -1 ? -errno : 0;
1138 pthread_mutex_unlock(&bufmgr_gem->lock);
1144 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1145 unsigned long size, const void *data)
1147 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1148 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1149 struct drm_i915_gem_pwrite pwrite;
1152 memset(&pwrite, 0, sizeof(pwrite));
1153 pwrite.handle = bo_gem->gem_handle;
1154 pwrite.offset = offset;
1156 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1158 ret = ioctl(bufmgr_gem->fd,
1159 DRM_IOCTL_I915_GEM_PWRITE,
1161 } while (ret == -1 && errno == EINTR);
1165 "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1166 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1167 (int)size, strerror(errno));
1174 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1176 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1177 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1180 get_pipe_from_crtc_id.crtc_id = crtc_id;
1181 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1182 &get_pipe_from_crtc_id);
1184 /* We return -1 here to signal that we don't
1185 * know which pipe is associated with this crtc.
1186 * This lets the caller know that this information
1187 * isn't available; using the wrong pipe for
1188 * vblank waiting can cause the chipset to lock up
1193 return get_pipe_from_crtc_id.pipe;
1197 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1198 unsigned long size, void *data)
1200 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1201 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1202 struct drm_i915_gem_pread pread;
1205 memset(&pread, 0, sizeof(pread));
1206 pread.handle = bo_gem->gem_handle;
1207 pread.offset = offset;
1209 pread.data_ptr = (uint64_t) (uintptr_t) data;
1211 ret = ioctl(bufmgr_gem->fd,
1212 DRM_IOCTL_I915_GEM_PREAD,
1214 } while (ret == -1 && errno == EINTR);
1218 "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1219 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1220 (int)size, strerror(errno));
1226 /** Waits for all GPU rendering to the object to have completed. */
1228 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1230 drm_intel_gem_bo_start_gtt_access(bo, 0);
1234 * Sets the object to the GTT read and possibly write domain, used by the X
1235 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1237 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1238 * can do tiled pixmaps this way.
1241 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1243 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1244 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1245 struct drm_i915_gem_set_domain set_domain;
1248 set_domain.handle = bo_gem->gem_handle;
1249 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1250 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1252 ret = ioctl(bufmgr_gem->fd,
1253 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1255 } while (ret == -1 && errno == EINTR);
1258 "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1259 __FILE__, __LINE__, bo_gem->gem_handle,
1260 set_domain.read_domains, set_domain.write_domain,
1266 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1268 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1271 free(bufmgr_gem->exec2_objects);
1272 free(bufmgr_gem->exec_objects);
1273 free(bufmgr_gem->exec_bos);
1275 pthread_mutex_destroy(&bufmgr_gem->lock);
1277 /* Free any cached buffer objects we were going to reuse */
1278 for (i = 0; i < bufmgr_gem->num_buckets; i++) {
1279 struct drm_intel_gem_bo_bucket *bucket =
1280 &bufmgr_gem->cache_bucket[i];
1281 drm_intel_bo_gem *bo_gem;
1283 while (!DRMLISTEMPTY(&bucket->head)) {
1284 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1285 bucket->head.next, head);
1286 DRMLISTDEL(&bo_gem->head);
1288 drm_intel_gem_bo_free(&bo_gem->bo);
1296 * Adds the target buffer to the validation list and adds the relocation
1297 * to the reloc_buffer's relocation list.
1299 * The relocation entry at the given offset must already contain the
1300 * precomputed relocation value, because the kernel will optimize out
1301 * the relocation entry write when the buffer hasn't moved from the
1302 * last known offset in target_bo.
1305 do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1306 drm_intel_bo *target_bo, uint32_t target_offset,
1307 uint32_t read_domains, uint32_t write_domain,
1310 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1311 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1312 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1314 if (bo_gem->has_error)
1317 if (target_bo_gem->has_error) {
1318 bo_gem->has_error = 1;
1322 if (target_bo_gem->tiling_mode == I915_TILING_NONE)
1325 /* We never use HW fences for rendering on 965+ */
1326 if (bufmgr_gem->gen >= 4)
1329 /* Create a new relocation list if needed */
1330 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1333 /* Check overflow */
1334 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1337 assert(offset <= bo->size - 4);
1338 assert((write_domain & (write_domain - 1)) == 0);
1340 /* Make sure that we're not adding a reloc to something whose size has
1341 * already been accounted for.
1343 assert(!bo_gem->used_as_reloc_target);
1344 if (target_bo_gem != bo_gem) {
1345 target_bo_gem->used_as_reloc_target = 1;
1346 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1348 /* An object needing a fence is a tiled buffer, so it won't have
1349 * relocs to other buffers.
1352 target_bo_gem->reloc_tree_fences = 1;
1353 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1355 /* Flag the target to disallow further relocations in it. */
1357 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1358 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1359 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1360 target_bo_gem->gem_handle;
1361 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1362 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1363 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1365 bo_gem->reloc_target_info[bo_gem->reloc_count].bo = target_bo;
1366 if (target_bo != bo)
1367 drm_intel_gem_bo_reference(target_bo);
1369 bo_gem->reloc_target_info[bo_gem->reloc_count].flags =
1370 DRM_INTEL_RELOC_FENCE;
1372 bo_gem->reloc_target_info[bo_gem->reloc_count].flags = 0;
1374 bo_gem->reloc_count++;
1380 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1381 drm_intel_bo *target_bo, uint32_t target_offset,
1382 uint32_t read_domains, uint32_t write_domain)
1384 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1386 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1387 read_domains, write_domain,
1388 !bufmgr_gem->fenced_relocs);
1392 drm_intel_gem_bo_emit_reloc_fence(drm_intel_bo *bo, uint32_t offset,
1393 drm_intel_bo *target_bo,
1394 uint32_t target_offset,
1395 uint32_t read_domains, uint32_t write_domain)
1397 return do_bo_emit_reloc(bo, offset, target_bo, target_offset,
1398 read_domains, write_domain, 1);
1402 * Walk the tree of relocations rooted at BO and accumulate the list of
1403 * validations to be performed and update the relocation buffers with
1404 * index values into the validation list.
1407 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1409 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1412 if (bo_gem->relocs == NULL)
1415 for (i = 0; i < bo_gem->reloc_count; i++) {
1416 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1418 if (target_bo == bo)
1421 /* Continue walking the tree depth-first. */
1422 drm_intel_gem_bo_process_reloc(target_bo);
1424 /* Add the target to the validate list */
1425 drm_intel_add_validate_buffer(target_bo);
1430 drm_intel_gem_bo_process_reloc2(drm_intel_bo *bo)
1432 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1435 if (bo_gem->relocs == NULL)
1438 for (i = 0; i < bo_gem->reloc_count; i++) {
1439 drm_intel_bo *target_bo = bo_gem->reloc_target_info[i].bo;
1442 if (target_bo == bo)
1445 /* Continue walking the tree depth-first. */
1446 drm_intel_gem_bo_process_reloc2(target_bo);
1448 need_fence = (bo_gem->reloc_target_info[i].flags &
1449 DRM_INTEL_RELOC_FENCE);
1451 /* Add the target to the validate list */
1452 drm_intel_add_validate_buffer2(target_bo, need_fence);
1458 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1462 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1463 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1464 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1466 /* Update the buffer offset */
1467 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1468 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1469 bo_gem->gem_handle, bo_gem->name, bo->offset,
1470 (unsigned long long)bufmgr_gem->exec_objects[i].
1472 bo->offset = bufmgr_gem->exec_objects[i].offset;
1478 drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
1482 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1483 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1484 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1486 /* Update the buffer offset */
1487 if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
1488 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1489 bo_gem->gem_handle, bo_gem->name, bo->offset,
1490 (unsigned long long)bufmgr_gem->exec2_objects[i].offset);
1491 bo->offset = bufmgr_gem->exec2_objects[i].offset;
1497 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1498 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1500 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1501 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1502 struct drm_i915_gem_execbuffer execbuf;
1505 if (bo_gem->has_error)
1508 pthread_mutex_lock(&bufmgr_gem->lock);
1509 /* Update indices and set up the validate list. */
1510 drm_intel_gem_bo_process_reloc(bo);
1512 /* Add the batch buffer to the validation list. There are no
1513 * relocations pointing to it.
1515 drm_intel_add_validate_buffer(bo);
1517 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1518 execbuf.buffer_count = bufmgr_gem->exec_count;
1519 execbuf.batch_start_offset = 0;
1520 execbuf.batch_len = used;
1521 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1522 execbuf.num_cliprects = num_cliprects;
1527 ret = ioctl(bufmgr_gem->fd,
1528 DRM_IOCTL_I915_GEM_EXECBUFFER,
1530 } while (ret != 0 && errno == EINTR);
1534 if (errno == ENOSPC) {
1536 "Execbuffer fails to pin. "
1537 "Estimate: %u. Actual: %u. Available: %u\n",
1538 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1541 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1544 (unsigned int)bufmgr_gem->gtt_size);
1547 drm_intel_update_buffer_offsets(bufmgr_gem);
1549 if (bufmgr_gem->bufmgr.debug)
1550 drm_intel_gem_dump_validation_list(bufmgr_gem);
1552 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1553 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1554 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1556 /* Disconnect the buffer from the validate list */
1557 bo_gem->validate_index = -1;
1558 bufmgr_gem->exec_bos[i] = NULL;
1560 bufmgr_gem->exec_count = 0;
1561 pthread_mutex_unlock(&bufmgr_gem->lock);
1567 drm_intel_gem_bo_mrb_exec2(drm_intel_bo *bo, int used,
1568 drm_clip_rect_t *cliprects, int num_cliprects, int DR4,
1571 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1572 struct drm_i915_gem_execbuffer2 execbuf;
1575 if ((ring_flag != I915_EXEC_RENDER) && (ring_flag != I915_EXEC_BSD))
1578 pthread_mutex_lock(&bufmgr_gem->lock);
1579 /* Update indices and set up the validate list. */
1580 drm_intel_gem_bo_process_reloc2(bo);
1582 /* Add the batch buffer to the validation list. There are no relocations
1585 drm_intel_add_validate_buffer2(bo, 0);
1587 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec2_objects;
1588 execbuf.buffer_count = bufmgr_gem->exec_count;
1589 execbuf.batch_start_offset = 0;
1590 execbuf.batch_len = used;
1591 execbuf.cliprects_ptr = (uintptr_t)cliprects;
1592 execbuf.num_cliprects = num_cliprects;
1595 execbuf.flags = ring_flag;
1600 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2,
1602 } while (ret != 0 && errno == EINTR);
1606 if (ret == -ENOMEM) {
1608 "Execbuffer fails to pin. "
1609 "Estimate: %u. Actual: %u. Available: %u\n",
1610 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1611 bufmgr_gem->exec_count),
1612 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1613 bufmgr_gem->exec_count),
1614 (unsigned int) bufmgr_gem->gtt_size);
1617 drm_intel_update_buffer_offsets2(bufmgr_gem);
1619 if (bufmgr_gem->bufmgr.debug)
1620 drm_intel_gem_dump_validation_list(bufmgr_gem);
1622 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1623 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1624 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1626 /* Disconnect the buffer from the validate list */
1627 bo_gem->validate_index = -1;
1628 bufmgr_gem->exec_bos[i] = NULL;
1630 bufmgr_gem->exec_count = 0;
1631 pthread_mutex_unlock(&bufmgr_gem->lock);
1637 drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
1638 drm_clip_rect_t *cliprects, int num_cliprects,
1641 return drm_intel_gem_bo_mrb_exec2(bo, used,
1642 cliprects, num_cliprects, DR4,
1647 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1649 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1650 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1651 struct drm_i915_gem_pin pin;
1654 memset(&pin, 0, sizeof(pin));
1655 pin.handle = bo_gem->gem_handle;
1656 pin.alignment = alignment;
1659 ret = ioctl(bufmgr_gem->fd,
1660 DRM_IOCTL_I915_GEM_PIN,
1662 } while (ret == -1 && errno == EINTR);
1667 bo->offset = pin.offset;
1672 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1674 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1675 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1676 struct drm_i915_gem_unpin unpin;
1679 memset(&unpin, 0, sizeof(unpin));
1680 unpin.handle = bo_gem->gem_handle;
1682 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1690 drm_intel_gem_bo_set_tiling_internal(drm_intel_bo *bo,
1691 uint32_t tiling_mode,
1694 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1695 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1696 struct drm_i915_gem_set_tiling set_tiling;
1699 if (tiling_mode == bo_gem->tiling_mode &&
1700 stride == bo_gem->stride)
1703 memset(&set_tiling, 0, sizeof(set_tiling));
1705 set_tiling.handle = bo_gem->gem_handle;
1706 set_tiling.tiling_mode = tiling_mode;
1707 set_tiling.stride = stride;
1709 ret = ioctl(bufmgr_gem->fd,
1710 DRM_IOCTL_I915_GEM_SET_TILING,
1712 } while (ret == -1 && errno == EINTR);
1716 bo_gem->tiling_mode = set_tiling.tiling_mode;
1717 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1718 bo_gem->stride = stride;
1723 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1726 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1727 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1730 if (bo_gem->global_name == 0)
1733 ret = drm_intel_gem_bo_set_tiling_internal(bo, *tiling_mode, stride);
1735 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1737 *tiling_mode = bo_gem->tiling_mode;
1742 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1743 uint32_t * swizzle_mode)
1745 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1747 *tiling_mode = bo_gem->tiling_mode;
1748 *swizzle_mode = bo_gem->swizzle_mode;
1753 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1755 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1756 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1757 struct drm_gem_flink flink;
1760 if (!bo_gem->global_name) {
1761 memset(&flink, 0, sizeof(flink));
1762 flink.handle = bo_gem->gem_handle;
1764 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1767 bo_gem->global_name = flink.name;
1768 bo_gem->reusable = 0;
1771 *name = bo_gem->global_name;
1776 * Enables unlimited caching of buffer objects for reuse.
1778 * This is potentially very memory expensive, as the cache at each bucket
1779 * size is only bounded by how many buffers of that size we've managed to have
1780 * in flight at once.
1783 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1785 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1787 bufmgr_gem->bo_reuse = 1;
1791 * Enable use of fenced reloc type.
1793 * New code should enable this to avoid unnecessary fence register
1794 * allocation. If this option is not enabled, all relocs will have fence
1795 * register allocated.
1798 drm_intel_bufmgr_gem_enable_fenced_relocs(drm_intel_bufmgr *bufmgr)
1800 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1802 if (bufmgr_gem->bufmgr.bo_exec == drm_intel_gem_bo_exec2)
1803 bufmgr_gem->fenced_relocs = 1;
1807 * Return the additional aperture space required by the tree of buffer objects
1811 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1813 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1817 if (bo == NULL || bo_gem->included_in_check_aperture)
1821 bo_gem->included_in_check_aperture = 1;
1823 for (i = 0; i < bo_gem->reloc_count; i++)
1825 drm_intel_gem_bo_get_aperture_space(bo_gem->
1826 reloc_target_info[i].bo);
1832 * Count the number of buffers in this list that need a fence reg
1834 * If the count is greater than the number of available regs, we'll have
1835 * to ask the caller to resubmit a batch with fewer tiled buffers.
1837 * This function over-counts if the same buffer is used multiple times.
1840 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
1843 unsigned int total = 0;
1845 for (i = 0; i < count; i++) {
1846 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1851 total += bo_gem->reloc_tree_fences;
1857 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1858 * for the next drm_intel_bufmgr_check_aperture_space() call.
1861 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1863 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1866 if (bo == NULL || !bo_gem->included_in_check_aperture)
1869 bo_gem->included_in_check_aperture = 0;
1871 for (i = 0; i < bo_gem->reloc_count; i++)
1872 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1873 reloc_target_info[i].bo);
1877 * Return a conservative estimate for the amount of aperture required
1878 * for a collection of buffers. This may double-count some buffers.
1881 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1884 unsigned int total = 0;
1886 for (i = 0; i < count; i++) {
1887 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1889 total += bo_gem->reloc_tree_size;
1895 * Return the amount of aperture needed for a collection of buffers.
1896 * This avoids double counting any buffers, at the cost of looking
1897 * at every buffer in the set.
1900 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
1903 unsigned int total = 0;
1905 for (i = 0; i < count; i++) {
1906 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1907 /* For the first buffer object in the array, we get an
1908 * accurate count back for its reloc_tree size (since nothing
1909 * had been flagged as being counted yet). We can save that
1910 * value out as a more conservative reloc_tree_size that
1911 * avoids double-counting target buffers. Since the first
1912 * buffer happens to usually be the batch buffer in our
1913 * callers, this can pull us back from doing the tree
1914 * walk on every new batch emit.
1917 drm_intel_bo_gem *bo_gem =
1918 (drm_intel_bo_gem *) bo_array[i];
1919 bo_gem->reloc_tree_size = total;
1923 for (i = 0; i < count; i++)
1924 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1929 * Return -1 if the batchbuffer should be flushed before attempting to
1930 * emit rendering referencing the buffers pointed to by bo_array.
1932 * This is required because if we try to emit a batchbuffer with relocations
1933 * to a tree of buffers that won't simultaneously fit in the aperture,
1934 * the rendering will return an error at a point where the software is not
1935 * prepared to recover from it.
1937 * However, we also want to emit the batchbuffer significantly before we reach
1938 * the limit, as a series of batchbuffers each of which references buffers
1939 * covering almost all of the aperture means that at each emit we end up
1940 * waiting to evict a buffer from the last rendering, and we get synchronous
1941 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1942 * get better parallelism.
1945 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1947 drm_intel_bufmgr_gem *bufmgr_gem =
1948 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
1949 unsigned int total = 0;
1950 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1953 /* Check for fence reg constraints if necessary */
1954 if (bufmgr_gem->available_fences) {
1955 total_fences = drm_intel_gem_total_fences(bo_array, count);
1956 if (total_fences > bufmgr_gem->available_fences)
1960 total = drm_intel_gem_estimate_batch_space(bo_array, count);
1962 if (total > threshold)
1963 total = drm_intel_gem_compute_batch_space(bo_array, count);
1965 if (total > threshold) {
1966 DBG("check_space: overflowed available aperture, "
1968 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1971 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
1972 (int)bufmgr_gem->gtt_size / 1024);
1978 * Disable buffer reuse for objects which are shared with the kernel
1979 * as scanout buffers
1982 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
1984 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1986 bo_gem->reusable = 0;
1991 drm_intel_gem_bo_is_reusable(drm_intel_bo *bo)
1993 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1995 return bo_gem->reusable;
1999 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2001 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
2004 for (i = 0; i < bo_gem->reloc_count; i++) {
2005 if (bo_gem->reloc_target_info[i].bo == target_bo)
2007 if (bo == bo_gem->reloc_target_info[i].bo)
2009 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_info[i].bo,
2017 /** Return true if target_bo is referenced by bo's relocation tree. */
2019 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
2021 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
2023 if (bo == NULL || target_bo == NULL)
2025 if (target_bo_gem->used_as_reloc_target)
2026 return _drm_intel_gem_bo_references(bo, target_bo);
2031 add_bucket(drm_intel_bufmgr_gem *bufmgr_gem, int size)
2033 unsigned int i = bufmgr_gem->num_buckets;
2035 assert(i < ARRAY_SIZE(bufmgr_gem->cache_bucket));
2037 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
2038 bufmgr_gem->cache_bucket[i].size = size;
2039 bufmgr_gem->num_buckets++;
2043 init_cache_buckets(drm_intel_bufmgr_gem *bufmgr_gem)
2045 unsigned long size, cache_max_size = 64 * 1024 * 1024;
2047 /* OK, so power of two buckets was too wasteful of memory.
2048 * Give 3 other sizes between each power of two, to hopefully
2049 * cover things accurately enough. (The alternative is
2050 * probably to just go for exact matching of sizes, and assume
2051 * that for things like composited window resize the tiled
2052 * width/height alignment and rounding of sizes to pages will
2053 * get us useful cache hit rates anyway)
2055 add_bucket(bufmgr_gem, 4096);
2056 add_bucket(bufmgr_gem, 4096 * 2);
2057 add_bucket(bufmgr_gem, 4096 * 3);
2059 /* Initialize the linked lists for BO reuse cache. */
2060 for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
2061 add_bucket(bufmgr_gem, size);
2063 add_bucket(bufmgr_gem, size + size * 1 / 4);
2064 add_bucket(bufmgr_gem, size + size * 2 / 4);
2065 add_bucket(bufmgr_gem, size + size * 3 / 4);
2070 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
2071 * and manage map buffer objections.
2073 * \param fd File descriptor of the opened DRM device.
2076 drm_intel_bufmgr_gem_init(int fd, int batch_size)
2078 drm_intel_bufmgr_gem *bufmgr_gem;
2079 struct drm_i915_gem_get_aperture aperture;
2080 drm_i915_getparam_t gp;
2082 int exec2 = 0, has_bsd = 0;
2084 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
2085 if (bufmgr_gem == NULL)
2088 bufmgr_gem->fd = fd;
2090 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
2095 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
2098 bufmgr_gem->gtt_size = aperture.aper_available_size;
2100 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
2102 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
2103 fprintf(stderr, "Assuming %dkB available aperture size.\n"
2104 "May lead to reduced performance or incorrect "
2106 (int)bufmgr_gem->gtt_size / 1024);
2109 gp.param = I915_PARAM_CHIPSET_ID;
2110 gp.value = &bufmgr_gem->pci_device;
2111 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2113 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
2114 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
2117 if (IS_GEN2(bufmgr_gem))
2118 bufmgr_gem->gen = 2;
2119 else if (IS_GEN3(bufmgr_gem))
2120 bufmgr_gem->gen = 3;
2121 else if (IS_GEN4(bufmgr_gem))
2122 bufmgr_gem->gen = 4;
2124 bufmgr_gem->gen = 6;
2126 gp.param = I915_PARAM_HAS_EXECBUF2;
2127 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2131 gp.param = I915_PARAM_HAS_BSD;
2132 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2136 if (bufmgr_gem->gen < 4) {
2137 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
2138 gp.value = &bufmgr_gem->available_fences;
2139 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
2141 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
2143 fprintf(stderr, "param: %d, val: %d\n", gp.param,
2145 bufmgr_gem->available_fences = 0;
2147 /* XXX The kernel reports the total number of fences,
2148 * including any that may be pinned.
2150 * We presume that there will be at least one pinned
2151 * fence for the scanout buffer, but there may be more
2152 * than one scanout and the user may be manually
2153 * pinning buffers. Let's move to execbuffer2 and
2154 * thereby forget the insanity of using fences...
2156 bufmgr_gem->available_fences -= 2;
2157 if (bufmgr_gem->available_fences < 0)
2158 bufmgr_gem->available_fences = 0;
2162 /* Let's go with one relocation per every 2 dwords (but round down a bit
2163 * since a power of two will mean an extra page allocation for the reloc
2166 * Every 4 was too few for the blender benchmark.
2168 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
2170 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
2171 bufmgr_gem->bufmgr.bo_alloc_for_render =
2172 drm_intel_gem_bo_alloc_for_render;
2173 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
2174 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
2175 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
2176 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
2177 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
2178 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
2179 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
2180 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
2181 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
2182 bufmgr_gem->bufmgr.bo_emit_reloc_fence = drm_intel_gem_bo_emit_reloc_fence;
2183 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
2184 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
2185 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
2186 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
2187 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
2188 /* Use the new one if available */
2190 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec2;
2192 bufmgr_gem->bufmgr.bo_mrb_exec = drm_intel_gem_bo_mrb_exec2;
2194 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
2195 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
2196 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
2197 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
2198 bufmgr_gem->bufmgr.debug = 0;
2199 bufmgr_gem->bufmgr.check_aperture_space =
2200 drm_intel_gem_check_aperture_space;
2201 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
2202 bufmgr_gem->bufmgr.bo_is_reusable = drm_intel_gem_bo_is_reusable;
2203 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
2204 drm_intel_gem_get_pipe_from_crtc_id;
2205 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
2207 init_cache_buckets(bufmgr_gem);
2209 return &bufmgr_gem->bufmgr;