1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
49 #include <sys/ioctl.h>
52 #include <sys/types.h>
55 #include "libdrm_lists.h"
56 #include "intel_atomic.h"
57 #include "intel_bufmgr.h"
58 #include "intel_bufmgr_priv.h"
59 #include "intel_chipset.h"
64 #define DBG(...) do { \
65 if (bufmgr_gem->bufmgr.debug) \
66 fprintf(stderr, __VA_ARGS__); \
69 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
71 struct drm_intel_gem_bo_bucket {
76 /* Only cache objects up to 64MB. Bigger than that, and the rounding of the
77 * size makes many operations fail that wouldn't otherwise.
79 #define DRM_INTEL_GEM_BO_BUCKETS 14
80 typedef struct _drm_intel_bufmgr_gem {
81 drm_intel_bufmgr bufmgr;
89 struct drm_i915_gem_exec_object *exec_objects;
90 drm_intel_bo **exec_bos;
94 /** Array of lists of cached gem objects of power-of-two sizes */
95 struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
101 } drm_intel_bufmgr_gem;
103 struct _drm_intel_bo_gem {
111 * Kenel-assigned global name for this object
113 unsigned int global_name;
116 * Index of the buffer within the validation list while preparing a
117 * batchbuffer execution.
122 * Current tiling mode
124 uint32_t tiling_mode;
125 uint32_t swizzle_mode;
129 /** Array passed to the DRM containing relocation information. */
130 struct drm_i915_gem_relocation_entry *relocs;
131 /** Array of bos corresponding to relocs[i].target_handle */
132 drm_intel_bo **reloc_target_bo;
133 /** Number of entries in relocs */
135 /** Mapped address for the buffer, saved across map/unmap cycles */
137 /** GTT virtual address for the buffer, saved across map/unmap cycles */
144 * Boolean of whether this BO and its children have been included in
145 * the current drm_intel_bufmgr_check_aperture_space() total.
147 char included_in_check_aperture;
150 * Boolean of whether this buffer has been used as a relocation
151 * target and had its size accounted for, and thus can't have any
152 * further relocations added to it.
154 char used_as_reloc_target;
157 * Boolean of whether we have encountered an error whilst building the relocation tree.
162 * Boolean of whether this buffer can be re-used
167 * Size in bytes of this buffer and its relocation descendents.
169 * Used to avoid costly tree walking in
170 * drm_intel_bufmgr_check_aperture in the common case.
175 * Number of potential fence registers required by this buffer and its
178 int reloc_tree_fences;
182 drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
185 drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
188 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
189 uint32_t * swizzle_mode);
192 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
195 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
198 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
200 static void drm_intel_gem_bo_free(drm_intel_bo *bo);
203 drm_intel_gem_bo_tile_size(drm_intel_bufmgr_gem *bufmgr_gem, unsigned long size,
204 uint32_t *tiling_mode)
206 unsigned long min_size, max_size;
209 if (*tiling_mode == I915_TILING_NONE)
212 /* 965+ just need multiples of page size for tiling */
213 if (!IS_GEN2(bufmgr_gem) && !IS_GEN3(bufmgr_gem))
214 return ROUND_UP_TO(size, 4096);
216 /* Older chips need powers of two, of at least 512k or 1M */
217 if (!IS_GEN2(bufmgr_gem)) {
218 min_size = 1024*1024;
219 max_size = 128*1024*1024;
222 max_size = 64*1024*1024;
225 if (size > max_size) {
226 *tiling_mode = I915_TILING_NONE;
230 for (i = min_size; i < size; i <<= 1)
237 * Round a given pitch up to the minimum required for X tiling on a
238 * given chip. We use 512 as the minimum to allow for a later tiling
242 drm_intel_gem_bo_tile_pitch(drm_intel_bufmgr_gem *bufmgr_gem,
243 unsigned long pitch, uint32_t tiling_mode)
245 unsigned long tile_width = 512;
248 if (tiling_mode == I915_TILING_NONE)
249 return ROUND_UP_TO(pitch, tile_width);
251 /* 965 is flexible */
252 if (!IS_GEN2(bufmgr_gem) && !IS_GEN3(bufmgr_gem))
253 return ROUND_UP_TO(pitch, tile_width);
255 /* Pre-965 needs power of two tile width */
256 for (i = tile_width; i < pitch; i <<= 1)
262 static struct drm_intel_gem_bo_bucket *
263 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
268 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
269 struct drm_intel_gem_bo_bucket *bucket =
270 &bufmgr_gem->cache_bucket[i];
271 if (bucket->size >= size) {
280 drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
284 for (i = 0; i < bufmgr_gem->exec_count; i++) {
285 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
286 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
288 if (bo_gem->relocs == NULL) {
289 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
294 for (j = 0; j < bo_gem->reloc_count; j++) {
295 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
296 drm_intel_bo_gem *target_gem =
297 (drm_intel_bo_gem *) target_bo;
299 DBG("%2d: %d (%s)@0x%08llx -> "
300 "%d (%s)@0x%08lx + 0x%08x\n",
302 bo_gem->gem_handle, bo_gem->name,
303 (unsigned long long)bo_gem->relocs[j].offset,
304 target_gem->gem_handle,
307 bo_gem->relocs[j].delta);
313 drm_intel_gem_bo_reference(drm_intel_bo *bo)
315 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
317 assert(atomic_read(&bo_gem->refcount) > 0);
318 atomic_inc(&bo_gem->refcount);
322 * Adds the given buffer to the list of buffers to be validated (moved into the
323 * appropriate memory type) with the next batch submission.
325 * If a buffer is validated multiple times in a batch submission, it ends up
326 * with the intersection of the memory type flags and the union of the
330 drm_intel_add_validate_buffer(drm_intel_bo *bo)
332 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
333 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
336 if (bo_gem->validate_index != -1)
339 /* Extend the array of validation entries as necessary. */
340 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
341 int new_size = bufmgr_gem->exec_size * 2;
346 bufmgr_gem->exec_objects =
347 realloc(bufmgr_gem->exec_objects,
348 sizeof(*bufmgr_gem->exec_objects) * new_size);
349 bufmgr_gem->exec_bos =
350 realloc(bufmgr_gem->exec_bos,
351 sizeof(*bufmgr_gem->exec_bos) * new_size);
352 bufmgr_gem->exec_size = new_size;
355 index = bufmgr_gem->exec_count;
356 bo_gem->validate_index = index;
357 /* Fill in array entry */
358 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
359 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
360 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
361 bufmgr_gem->exec_objects[index].alignment = 0;
362 bufmgr_gem->exec_objects[index].offset = 0;
363 bufmgr_gem->exec_bos[index] = bo;
364 bufmgr_gem->exec_count++;
367 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
371 drm_intel_bo_gem_set_in_aperture_size(drm_intel_bufmgr_gem *bufmgr_gem,
372 drm_intel_bo_gem *bo_gem)
376 assert(!bo_gem->used_as_reloc_target);
378 /* The older chipsets are far-less flexible in terms of tiling,
379 * and require tiled buffer to be size aligned in the aperture.
380 * This means that in the worst possible case we will need a hole
381 * twice as large as the object in order for it to fit into the
382 * aperture. Optimal packing is for wimps.
384 size = bo_gem->bo.size;
385 if ((IS_GEN2(bufmgr_gem) || IS_GEN3(bufmgr_gem))
386 && bo_gem->tiling_mode != I915_TILING_NONE)
389 bo_gem->reloc_tree_size = size;
393 drm_intel_setup_reloc_list(drm_intel_bo *bo)
395 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
396 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
397 unsigned int max_relocs = bufmgr_gem->max_relocs;
399 if (bo->size / 4 < max_relocs)
400 max_relocs = bo->size / 4;
402 bo_gem->relocs = malloc(max_relocs *
403 sizeof(struct drm_i915_gem_relocation_entry));
404 bo_gem->reloc_target_bo = malloc(max_relocs * sizeof(drm_intel_bo *));
405 if (bo_gem->relocs == NULL || bo_gem->reloc_target_bo == NULL) {
406 bo_gem->has_error = 1;
408 free (bo_gem->relocs);
409 bo_gem->relocs = NULL;
411 free (bo_gem->reloc_target_bo);
412 bo_gem->reloc_target_bo = NULL;
421 drm_intel_gem_bo_busy(drm_intel_bo *bo)
423 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
424 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
425 struct drm_i915_gem_busy busy;
428 memset(&busy, 0, sizeof(busy));
429 busy.handle = bo_gem->gem_handle;
432 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
433 } while (ret == -1 && errno == EINTR);
435 return (ret == 0 && busy.busy);
439 drm_intel_gem_bo_madvise_internal(drm_intel_bufmgr_gem *bufmgr_gem,
440 drm_intel_bo_gem *bo_gem, int state)
442 struct drm_i915_gem_madvise madv;
444 madv.handle = bo_gem->gem_handle;
447 ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
449 return madv.retained;
453 drm_intel_gem_bo_madvise(drm_intel_bo *bo, int madv)
455 return drm_intel_gem_bo_madvise_internal
456 ((drm_intel_bufmgr_gem *) bo->bufmgr,
457 (drm_intel_bo_gem *) bo,
461 /* drop the oldest entries that have been purged by the kernel */
463 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
464 struct drm_intel_gem_bo_bucket *bucket)
466 while (!DRMLISTEMPTY(&bucket->head)) {
467 drm_intel_bo_gem *bo_gem;
469 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
470 bucket->head.next, head);
471 if (drm_intel_gem_bo_madvise_internal
472 (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
475 DRMLISTDEL(&bo_gem->head);
476 drm_intel_gem_bo_free(&bo_gem->bo);
480 static drm_intel_bo *
481 drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
486 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
487 drm_intel_bo_gem *bo_gem;
488 unsigned int page_size = getpagesize();
490 struct drm_intel_gem_bo_bucket *bucket;
491 int alloc_from_cache;
492 unsigned long bo_size;
495 if (flags & BO_ALLOC_FOR_RENDER)
498 /* Round the allocated size up to a power of two number of pages. */
499 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
501 /* If we don't have caching at this size, don't actually round the
504 if (bucket == NULL) {
506 if (bo_size < page_size)
509 bo_size = bucket->size;
512 pthread_mutex_lock(&bufmgr_gem->lock);
513 /* Get a buffer out of the cache if available */
515 alloc_from_cache = 0;
516 if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
518 /* Allocate new render-target BOs from the tail (MRU)
519 * of the list, as it will likely be hot in the GPU
520 * cache and in the aperture for us.
522 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
523 bucket->head.prev, head);
524 DRMLISTDEL(&bo_gem->head);
525 alloc_from_cache = 1;
527 /* For non-render-target BOs (where we're probably
528 * going to map it first thing in order to fill it
529 * with data), check if the last BO in the cache is
530 * unbusy, and only reuse in that case. Otherwise,
531 * allocating a new buffer is probably faster than
532 * waiting for the GPU to finish.
534 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
535 bucket->head.next, head);
536 if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
537 alloc_from_cache = 1;
538 DRMLISTDEL(&bo_gem->head);
542 if (alloc_from_cache) {
543 if (!drm_intel_gem_bo_madvise_internal
544 (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
545 drm_intel_gem_bo_free(&bo_gem->bo);
546 drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
552 pthread_mutex_unlock(&bufmgr_gem->lock);
554 if (!alloc_from_cache) {
555 struct drm_i915_gem_create create;
557 bo_gem = calloc(1, sizeof(*bo_gem));
561 bo_gem->bo.size = bo_size;
562 memset(&create, 0, sizeof(create));
563 create.size = bo_size;
566 ret = ioctl(bufmgr_gem->fd,
567 DRM_IOCTL_I915_GEM_CREATE,
569 } while (ret == -1 && errno == EINTR);
570 bo_gem->gem_handle = create.handle;
571 bo_gem->bo.handle = bo_gem->gem_handle;
576 bo_gem->bo.bufmgr = bufmgr;
580 atomic_set(&bo_gem->refcount, 1);
581 bo_gem->validate_index = -1;
582 bo_gem->reloc_tree_fences = 0;
583 bo_gem->used_as_reloc_target = 0;
584 bo_gem->has_error = 0;
585 bo_gem->tiling_mode = I915_TILING_NONE;
586 bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
587 bo_gem->reusable = 1;
589 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
591 DBG("bo_create: buf %d (%s) %ldb\n",
592 bo_gem->gem_handle, bo_gem->name, size);
597 static drm_intel_bo *
598 drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
601 unsigned int alignment)
603 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size,
604 BO_ALLOC_FOR_RENDER);
607 static drm_intel_bo *
608 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
611 unsigned int alignment)
613 return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, 0);
616 static drm_intel_bo *
617 drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
618 int x, int y, int cpp, uint32_t *tiling_mode,
619 unsigned long *pitch, unsigned long flags)
621 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
623 unsigned long size, stride, aligned_y = y;
626 if (*tiling_mode == I915_TILING_NONE)
627 aligned_y = ALIGN(y, 2);
628 else if (*tiling_mode == I915_TILING_X)
629 aligned_y = ALIGN(y, 8);
630 else if (*tiling_mode == I915_TILING_Y)
631 aligned_y = ALIGN(y, 32);
634 stride = drm_intel_gem_bo_tile_pitch(bufmgr_gem, stride, *tiling_mode);
635 size = stride * aligned_y;
636 size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
638 bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
642 ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
644 drm_intel_gem_bo_unreference(bo);
654 * Returns a drm_intel_bo wrapping the given buffer object handle.
656 * This can be used when one application needs to pass a buffer object
660 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
664 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
665 drm_intel_bo_gem *bo_gem;
667 struct drm_gem_open open_arg;
668 struct drm_i915_gem_get_tiling get_tiling;
670 bo_gem = calloc(1, sizeof(*bo_gem));
674 memset(&open_arg, 0, sizeof(open_arg));
675 open_arg.name = handle;
677 ret = ioctl(bufmgr_gem->fd,
680 } while (ret == -1 && errno == EINTR);
682 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
683 name, handle, strerror(errno));
687 bo_gem->bo.size = open_arg.size;
688 bo_gem->bo.offset = 0;
689 bo_gem->bo.virtual = NULL;
690 bo_gem->bo.bufmgr = bufmgr;
692 atomic_set(&bo_gem->refcount, 1);
693 bo_gem->validate_index = -1;
694 bo_gem->gem_handle = open_arg.handle;
695 bo_gem->global_name = handle;
696 bo_gem->reusable = 0;
698 memset(&get_tiling, 0, sizeof(get_tiling));
699 get_tiling.handle = bo_gem->gem_handle;
700 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
702 drm_intel_gem_bo_unreference(&bo_gem->bo);
705 bo_gem->tiling_mode = get_tiling.tiling_mode;
706 bo_gem->swizzle_mode = get_tiling.swizzle_mode;
707 if (bo_gem->tiling_mode == I915_TILING_NONE)
708 bo_gem->reloc_tree_fences = 0;
710 bo_gem->reloc_tree_fences = 1;
711 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
713 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
719 drm_intel_gem_bo_free(drm_intel_bo *bo)
721 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
722 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
723 struct drm_gem_close close;
726 if (bo_gem->mem_virtual)
727 munmap(bo_gem->mem_virtual, bo_gem->bo.size);
728 if (bo_gem->gtt_virtual)
729 munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
731 /* Close this object */
732 memset(&close, 0, sizeof(close));
733 close.handle = bo_gem->gem_handle;
734 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
737 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
738 bo_gem->gem_handle, bo_gem->name, strerror(errno));
743 /** Frees all cached buffers significantly older than @time. */
745 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
749 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
750 struct drm_intel_gem_bo_bucket *bucket =
751 &bufmgr_gem->cache_bucket[i];
753 while (!DRMLISTEMPTY(&bucket->head)) {
754 drm_intel_bo_gem *bo_gem;
756 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
757 bucket->head.next, head);
758 if (time - bo_gem->free_time <= 1)
761 DRMLISTDEL(&bo_gem->head);
763 drm_intel_gem_bo_free(&bo_gem->bo);
769 drm_intel_gem_bo_unreference_final(drm_intel_bo *bo, time_t time)
771 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
772 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
773 struct drm_intel_gem_bo_bucket *bucket;
774 uint32_t tiling_mode;
777 /* Unreference all the target buffers */
778 for (i = 0; i < bo_gem->reloc_count; i++) {
779 drm_intel_gem_bo_unreference_locked_timed(bo_gem->
783 bo_gem->reloc_count = 0;
784 bo_gem->used_as_reloc_target = 0;
786 DBG("bo_unreference final: %d (%s)\n",
787 bo_gem->gem_handle, bo_gem->name);
789 /* release memory associated with this object */
790 if (bo_gem->reloc_target_bo) {
791 free(bo_gem->reloc_target_bo);
792 bo_gem->reloc_target_bo = NULL;
794 if (bo_gem->relocs) {
795 free(bo_gem->relocs);
796 bo_gem->relocs = NULL;
799 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
800 /* Put the buffer into our internal cache for reuse if we can. */
801 tiling_mode = I915_TILING_NONE;
802 if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
803 drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0 &&
804 drm_intel_gem_bo_madvise_internal(bufmgr_gem, bo_gem,
805 I915_MADV_DONTNEED)) {
806 bo_gem->free_time = time;
809 bo_gem->validate_index = -1;
811 DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
813 drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time);
815 drm_intel_gem_bo_free(bo);
819 static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
822 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
824 assert(atomic_read(&bo_gem->refcount) > 0);
825 if (atomic_dec_and_test(&bo_gem->refcount))
826 drm_intel_gem_bo_unreference_final(bo, time);
829 static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
831 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
833 assert(atomic_read(&bo_gem->refcount) > 0);
834 if (atomic_dec_and_test(&bo_gem->refcount)) {
835 drm_intel_bufmgr_gem *bufmgr_gem =
836 (drm_intel_bufmgr_gem *) bo->bufmgr;
837 struct timespec time;
839 clock_gettime(CLOCK_MONOTONIC, &time);
841 pthread_mutex_lock(&bufmgr_gem->lock);
842 drm_intel_gem_bo_unreference_final(bo, time.tv_sec);
843 pthread_mutex_unlock(&bufmgr_gem->lock);
847 static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
849 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
850 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
851 struct drm_i915_gem_set_domain set_domain;
854 pthread_mutex_lock(&bufmgr_gem->lock);
856 /* Allow recursive mapping. Mesa may recursively map buffers with
857 * nested display loops.
859 if (!bo_gem->mem_virtual) {
860 struct drm_i915_gem_mmap mmap_arg;
862 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
864 memset(&mmap_arg, 0, sizeof(mmap_arg));
865 mmap_arg.handle = bo_gem->gem_handle;
867 mmap_arg.size = bo->size;
869 ret = ioctl(bufmgr_gem->fd,
870 DRM_IOCTL_I915_GEM_MMAP,
872 } while (ret == -1 && errno == EINTR);
876 "%s:%d: Error mapping buffer %d (%s): %s .\n",
877 __FILE__, __LINE__, bo_gem->gem_handle,
878 bo_gem->name, strerror(errno));
879 pthread_mutex_unlock(&bufmgr_gem->lock);
882 bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
884 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
885 bo_gem->mem_virtual);
886 bo->virtual = bo_gem->mem_virtual;
888 set_domain.handle = bo_gem->gem_handle;
889 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
891 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
893 set_domain.write_domain = 0;
895 ret = ioctl(bufmgr_gem->fd,
896 DRM_IOCTL_I915_GEM_SET_DOMAIN,
898 } while (ret == -1 && errno == EINTR);
901 fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
902 __FILE__, __LINE__, bo_gem->gem_handle,
904 pthread_mutex_unlock(&bufmgr_gem->lock);
908 pthread_mutex_unlock(&bufmgr_gem->lock);
913 int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
915 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
916 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
917 struct drm_i915_gem_set_domain set_domain;
920 pthread_mutex_lock(&bufmgr_gem->lock);
922 /* Get a mapping of the buffer if we haven't before. */
923 if (bo_gem->gtt_virtual == NULL) {
924 struct drm_i915_gem_mmap_gtt mmap_arg;
926 DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
929 memset(&mmap_arg, 0, sizeof(mmap_arg));
930 mmap_arg.handle = bo_gem->gem_handle;
932 /* Get the fake offset back... */
934 ret = ioctl(bufmgr_gem->fd,
935 DRM_IOCTL_I915_GEM_MMAP_GTT,
937 } while (ret == -1 && errno == EINTR);
941 "%s:%d: Error preparing buffer map %d (%s): %s .\n",
943 bo_gem->gem_handle, bo_gem->name,
945 pthread_mutex_unlock(&bufmgr_gem->lock);
950 bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
951 MAP_SHARED, bufmgr_gem->fd,
953 if (bo_gem->gtt_virtual == MAP_FAILED) {
954 bo_gem->gtt_virtual = NULL;
957 "%s:%d: Error mapping buffer %d (%s): %s .\n",
959 bo_gem->gem_handle, bo_gem->name,
961 pthread_mutex_unlock(&bufmgr_gem->lock);
966 bo->virtual = bo_gem->gtt_virtual;
968 DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
969 bo_gem->gtt_virtual);
971 /* Now move it to the GTT domain so that the CPU caches are flushed */
972 set_domain.handle = bo_gem->gem_handle;
973 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
974 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
976 ret = ioctl(bufmgr_gem->fd,
977 DRM_IOCTL_I915_GEM_SET_DOMAIN,
979 } while (ret == -1 && errno == EINTR);
983 fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
984 __FILE__, __LINE__, bo_gem->gem_handle,
988 pthread_mutex_unlock(&bufmgr_gem->lock);
993 int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
995 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
996 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1002 assert(bo_gem->gtt_virtual != NULL);
1004 pthread_mutex_lock(&bufmgr_gem->lock);
1006 pthread_mutex_unlock(&bufmgr_gem->lock);
1011 static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
1013 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1014 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1015 struct drm_i915_gem_sw_finish sw_finish;
1021 assert(bo_gem->mem_virtual != NULL);
1023 pthread_mutex_lock(&bufmgr_gem->lock);
1025 /* Cause a flush to happen if the buffer's pinned for scanout, so the
1026 * results show up in a timely manner.
1028 sw_finish.handle = bo_gem->gem_handle;
1030 ret = ioctl(bufmgr_gem->fd,
1031 DRM_IOCTL_I915_GEM_SW_FINISH,
1033 } while (ret == -1 && errno == EINTR);
1036 pthread_mutex_unlock(&bufmgr_gem->lock);
1041 drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
1042 unsigned long size, const void *data)
1044 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1045 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1046 struct drm_i915_gem_pwrite pwrite;
1049 memset(&pwrite, 0, sizeof(pwrite));
1050 pwrite.handle = bo_gem->gem_handle;
1051 pwrite.offset = offset;
1053 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
1055 ret = ioctl(bufmgr_gem->fd,
1056 DRM_IOCTL_I915_GEM_PWRITE,
1058 } while (ret == -1 && errno == EINTR);
1061 "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
1062 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1063 (int)size, strerror(errno));
1069 drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
1071 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1072 struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
1075 get_pipe_from_crtc_id.crtc_id = crtc_id;
1076 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
1077 &get_pipe_from_crtc_id);
1079 /* We return -1 here to signal that we don't
1080 * know which pipe is associated with this crtc.
1081 * This lets the caller know that this information
1082 * isn't available; using the wrong pipe for
1083 * vblank waiting can cause the chipset to lock up
1088 return get_pipe_from_crtc_id.pipe;
1092 drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
1093 unsigned long size, void *data)
1095 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1096 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1097 struct drm_i915_gem_pread pread;
1100 memset(&pread, 0, sizeof(pread));
1101 pread.handle = bo_gem->gem_handle;
1102 pread.offset = offset;
1104 pread.data_ptr = (uint64_t) (uintptr_t) data;
1106 ret = ioctl(bufmgr_gem->fd,
1107 DRM_IOCTL_I915_GEM_PREAD,
1109 } while (ret == -1 && errno == EINTR);
1113 "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
1114 __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
1115 (int)size, strerror(errno));
1120 /** Waits for all GPU rendering to the object to have completed. */
1122 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
1124 drm_intel_gem_bo_start_gtt_access(bo, 0);
1128 * Sets the object to the GTT read and possibly write domain, used by the X
1129 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
1131 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
1132 * can do tiled pixmaps this way.
1135 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
1137 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1138 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1139 struct drm_i915_gem_set_domain set_domain;
1142 set_domain.handle = bo_gem->gem_handle;
1143 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
1144 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
1146 ret = ioctl(bufmgr_gem->fd,
1147 DRM_IOCTL_I915_GEM_SET_DOMAIN,
1149 } while (ret == -1 && errno == EINTR);
1152 "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
1153 __FILE__, __LINE__, bo_gem->gem_handle,
1154 set_domain.read_domains, set_domain.write_domain,
1160 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
1162 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1165 free(bufmgr_gem->exec_objects);
1166 free(bufmgr_gem->exec_bos);
1168 pthread_mutex_destroy(&bufmgr_gem->lock);
1170 /* Free any cached buffer objects we were going to reuse */
1171 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
1172 struct drm_intel_gem_bo_bucket *bucket =
1173 &bufmgr_gem->cache_bucket[i];
1174 drm_intel_bo_gem *bo_gem;
1176 while (!DRMLISTEMPTY(&bucket->head)) {
1177 bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
1178 bucket->head.next, head);
1179 DRMLISTDEL(&bo_gem->head);
1181 drm_intel_gem_bo_free(&bo_gem->bo);
1189 * Adds the target buffer to the validation list and adds the relocation
1190 * to the reloc_buffer's relocation list.
1192 * The relocation entry at the given offset must already contain the
1193 * precomputed relocation value, because the kernel will optimize out
1194 * the relocation entry write when the buffer hasn't moved from the
1195 * last known offset in target_bo.
1198 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
1199 drm_intel_bo *target_bo, uint32_t target_offset,
1200 uint32_t read_domains, uint32_t write_domain)
1202 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1203 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1204 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1206 if (bo_gem->has_error)
1209 if (target_bo_gem->has_error) {
1210 bo_gem->has_error = 1;
1214 /* Create a new relocation list if needed */
1215 if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
1218 /* Check overflow */
1219 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
1222 assert(offset <= bo->size - 4);
1223 assert((write_domain & (write_domain - 1)) == 0);
1225 /* Make sure that we're not adding a reloc to something whose size has
1226 * already been accounted for.
1228 assert(!bo_gem->used_as_reloc_target);
1229 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
1230 bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
1232 /* Flag the target to disallow further relocations in it. */
1233 target_bo_gem->used_as_reloc_target = 1;
1235 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
1236 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
1237 bo_gem->relocs[bo_gem->reloc_count].target_handle =
1238 target_bo_gem->gem_handle;
1239 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
1240 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
1241 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
1243 bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
1244 drm_intel_gem_bo_reference(target_bo);
1246 bo_gem->reloc_count++;
1252 * Walk the tree of relocations rooted at BO and accumulate the list of
1253 * validations to be performed and update the relocation buffers with
1254 * index values into the validation list.
1257 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
1259 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1262 if (bo_gem->relocs == NULL)
1265 for (i = 0; i < bo_gem->reloc_count; i++) {
1266 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
1268 /* Continue walking the tree depth-first. */
1269 drm_intel_gem_bo_process_reloc(target_bo);
1271 /* Add the target to the validate list */
1272 drm_intel_add_validate_buffer(target_bo);
1277 drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
1281 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1282 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1283 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1285 /* Update the buffer offset */
1286 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
1287 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
1288 bo_gem->gem_handle, bo_gem->name, bo->offset,
1289 (unsigned long long)bufmgr_gem->exec_objects[i].
1291 bo->offset = bufmgr_gem->exec_objects[i].offset;
1297 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
1298 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
1300 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1301 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1302 struct drm_i915_gem_execbuffer execbuf;
1305 if (bo_gem->has_error)
1308 pthread_mutex_lock(&bufmgr_gem->lock);
1309 /* Update indices and set up the validate list. */
1310 drm_intel_gem_bo_process_reloc(bo);
1312 /* Add the batch buffer to the validation list. There are no
1313 * relocations pointing to it.
1315 drm_intel_add_validate_buffer(bo);
1317 execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
1318 execbuf.buffer_count = bufmgr_gem->exec_count;
1319 execbuf.batch_start_offset = 0;
1320 execbuf.batch_len = used;
1321 execbuf.cliprects_ptr = (uintptr_t) cliprects;
1322 execbuf.num_cliprects = num_cliprects;
1327 ret = ioctl(bufmgr_gem->fd,
1328 DRM_IOCTL_I915_GEM_EXECBUFFER,
1330 } while (ret != 0 && errno == EINTR);
1334 if (errno == ENOSPC) {
1336 "Execbuffer fails to pin. "
1337 "Estimate: %u. Actual: %u. Available: %u\n",
1338 drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
1341 drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
1344 (unsigned int)bufmgr_gem->gtt_size);
1347 drm_intel_update_buffer_offsets(bufmgr_gem);
1349 if (bufmgr_gem->bufmgr.debug)
1350 drm_intel_gem_dump_validation_list(bufmgr_gem);
1352 for (i = 0; i < bufmgr_gem->exec_count; i++) {
1353 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
1354 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1356 /* Disconnect the buffer from the validate list */
1357 bo_gem->validate_index = -1;
1358 bufmgr_gem->exec_bos[i] = NULL;
1360 bufmgr_gem->exec_count = 0;
1361 pthread_mutex_unlock(&bufmgr_gem->lock);
1367 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
1369 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1370 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1371 struct drm_i915_gem_pin pin;
1374 memset(&pin, 0, sizeof(pin));
1375 pin.handle = bo_gem->gem_handle;
1376 pin.alignment = alignment;
1379 ret = ioctl(bufmgr_gem->fd,
1380 DRM_IOCTL_I915_GEM_PIN,
1382 } while (ret == -1 && errno == EINTR);
1387 bo->offset = pin.offset;
1392 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
1394 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1395 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1396 struct drm_i915_gem_unpin unpin;
1399 memset(&unpin, 0, sizeof(unpin));
1400 unpin.handle = bo_gem->gem_handle;
1402 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
1410 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1413 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1414 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1415 struct drm_i915_gem_set_tiling set_tiling;
1418 if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
1421 /* If we're going from non-tiling to tiling, bump fence count */
1422 if (bo_gem->tiling_mode == I915_TILING_NONE)
1423 bo_gem->reloc_tree_fences++;
1425 memset(&set_tiling, 0, sizeof(set_tiling));
1426 set_tiling.handle = bo_gem->gem_handle;
1429 set_tiling.tiling_mode = *tiling_mode;
1430 set_tiling.stride = stride;
1432 ret = ioctl(bufmgr_gem->fd,
1433 DRM_IOCTL_I915_GEM_SET_TILING,
1435 } while (ret == -1 && errno == EINTR);
1436 bo_gem->tiling_mode = set_tiling.tiling_mode;
1437 bo_gem->swizzle_mode = set_tiling.swizzle_mode;
1439 /* If we're going from tiling to non-tiling, drop fence count */
1440 if (bo_gem->tiling_mode == I915_TILING_NONE)
1441 bo_gem->reloc_tree_fences--;
1443 drm_intel_bo_gem_set_in_aperture_size(bufmgr_gem, bo_gem);
1445 *tiling_mode = bo_gem->tiling_mode;
1446 return ret == 0 ? 0 : -errno;
1450 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
1451 uint32_t * swizzle_mode)
1453 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1455 *tiling_mode = bo_gem->tiling_mode;
1456 *swizzle_mode = bo_gem->swizzle_mode;
1461 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
1463 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
1464 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1465 struct drm_gem_flink flink;
1468 if (!bo_gem->global_name) {
1469 memset(&flink, 0, sizeof(flink));
1470 flink.handle = bo_gem->gem_handle;
1472 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1475 bo_gem->global_name = flink.name;
1476 bo_gem->reusable = 0;
1479 *name = bo_gem->global_name;
1484 * Enables unlimited caching of buffer objects for reuse.
1486 * This is potentially very memory expensive, as the cache at each bucket
1487 * size is only bounded by how many buffers of that size we've managed to have
1488 * in flight at once.
1491 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1493 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
1495 bufmgr_gem->bo_reuse = 1;
1499 * Return the additional aperture space required by the tree of buffer objects
1503 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1505 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1509 if (bo == NULL || bo_gem->included_in_check_aperture)
1513 bo_gem->included_in_check_aperture = 1;
1515 for (i = 0; i < bo_gem->reloc_count; i++)
1517 drm_intel_gem_bo_get_aperture_space(bo_gem->
1518 reloc_target_bo[i]);
1524 * Count the number of buffers in this list that need a fence reg
1526 * If the count is greater than the number of available regs, we'll have
1527 * to ask the caller to resubmit a batch with fewer tiled buffers.
1529 * This function over-counts if the same buffer is used multiple times.
1532 drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
1535 unsigned int total = 0;
1537 for (i = 0; i < count; i++) {
1538 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1543 total += bo_gem->reloc_tree_fences;
1549 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1550 * for the next drm_intel_bufmgr_check_aperture_space() call.
1553 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1555 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1558 if (bo == NULL || !bo_gem->included_in_check_aperture)
1561 bo_gem->included_in_check_aperture = 0;
1563 for (i = 0; i < bo_gem->reloc_count; i++)
1564 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
1565 reloc_target_bo[i]);
1569 * Return a conservative estimate for the amount of aperture required
1570 * for a collection of buffers. This may double-count some buffers.
1573 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
1576 unsigned int total = 0;
1578 for (i = 0; i < count; i++) {
1579 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
1581 total += bo_gem->reloc_tree_size;
1587 * Return the amount of aperture needed for a collection of buffers.
1588 * This avoids double counting any buffers, at the cost of looking
1589 * at every buffer in the set.
1592 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
1595 unsigned int total = 0;
1597 for (i = 0; i < count; i++) {
1598 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1599 /* For the first buffer object in the array, we get an
1600 * accurate count back for its reloc_tree size (since nothing
1601 * had been flagged as being counted yet). We can save that
1602 * value out as a more conservative reloc_tree_size that
1603 * avoids double-counting target buffers. Since the first
1604 * buffer happens to usually be the batch buffer in our
1605 * callers, this can pull us back from doing the tree
1606 * walk on every new batch emit.
1609 drm_intel_bo_gem *bo_gem =
1610 (drm_intel_bo_gem *) bo_array[i];
1611 bo_gem->reloc_tree_size = total;
1615 for (i = 0; i < count; i++)
1616 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1621 * Return -1 if the batchbuffer should be flushed before attempting to
1622 * emit rendering referencing the buffers pointed to by bo_array.
1624 * This is required because if we try to emit a batchbuffer with relocations
1625 * to a tree of buffers that won't simultaneously fit in the aperture,
1626 * the rendering will return an error at a point where the software is not
1627 * prepared to recover from it.
1629 * However, we also want to emit the batchbuffer significantly before we reach
1630 * the limit, as a series of batchbuffers each of which references buffers
1631 * covering almost all of the aperture means that at each emit we end up
1632 * waiting to evict a buffer from the last rendering, and we get synchronous
1633 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1634 * get better parallelism.
1637 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1639 drm_intel_bufmgr_gem *bufmgr_gem =
1640 (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
1641 unsigned int total = 0;
1642 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1645 /* Check for fence reg constraints if necessary */
1646 if (bufmgr_gem->available_fences) {
1647 total_fences = drm_intel_gem_total_fences(bo_array, count);
1648 if (total_fences > bufmgr_gem->available_fences)
1652 total = drm_intel_gem_estimate_batch_space(bo_array, count);
1654 if (total > threshold)
1655 total = drm_intel_gem_compute_batch_space(bo_array, count);
1657 if (total > threshold) {
1658 DBG("check_space: overflowed available aperture, "
1660 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1663 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
1664 (int)bufmgr_gem->gtt_size / 1024);
1670 * Disable buffer reuse for objects which are shared with the kernel
1671 * as scanout buffers
1674 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
1676 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1678 bo_gem->reusable = 0;
1683 _drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1685 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
1688 for (i = 0; i < bo_gem->reloc_count; i++) {
1689 if (bo_gem->reloc_target_bo[i] == target_bo)
1691 if (_drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i],
1699 /** Return true if target_bo is referenced by bo's relocation tree. */
1701 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
1703 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
1705 if (bo == NULL || target_bo == NULL)
1707 if (target_bo_gem->used_as_reloc_target)
1708 return _drm_intel_gem_bo_references(bo, target_bo);
1713 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1714 * and manage map buffer objections.
1716 * \param fd File descriptor of the opened DRM device.
1719 drm_intel_bufmgr_gem_init(int fd, int batch_size)
1721 drm_intel_bufmgr_gem *bufmgr_gem;
1722 struct drm_i915_gem_get_aperture aperture;
1723 drm_i915_getparam_t gp;
1727 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
1728 if (bufmgr_gem == NULL)
1731 bufmgr_gem->fd = fd;
1733 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
1738 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1741 bufmgr_gem->gtt_size = aperture.aper_available_size;
1743 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
1745 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
1746 fprintf(stderr, "Assuming %dkB available aperture size.\n"
1747 "May lead to reduced performance or incorrect "
1749 (int)bufmgr_gem->gtt_size / 1024);
1752 gp.param = I915_PARAM_CHIPSET_ID;
1753 gp.value = &bufmgr_gem->pci_device;
1754 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1756 fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
1757 fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
1760 if (IS_GEN2(bufmgr_gem) || IS_GEN3(bufmgr_gem)) {
1761 gp.param = I915_PARAM_NUM_FENCES_AVAIL;
1762 gp.value = &bufmgr_gem->available_fences;
1763 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
1765 fprintf(stderr, "get fences failed: %d [%d]\n", ret,
1767 fprintf(stderr, "param: %d, val: %d\n", gp.param,
1769 bufmgr_gem->available_fences = 0;
1771 /* XXX The kernel reports the total number of fences,
1772 * including any that may be pinned.
1774 * We presume that there will be at least one pinned
1775 * fence for the scanout buffer, but there may be more
1776 * than one scanout and the user may be manually
1777 * pinning buffers. Let's move to execbuffer2 and
1778 * thereby forget the insanity of using fences...
1780 bufmgr_gem->available_fences -= 2;
1781 if (bufmgr_gem->available_fences < 0)
1782 bufmgr_gem->available_fences = 0;
1786 /* Let's go with one relocation per every 2 dwords (but round down a bit
1787 * since a power of two will mean an extra page allocation for the reloc
1790 * Every 4 was too few for the blender benchmark.
1792 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
1794 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
1795 bufmgr_gem->bufmgr.bo_alloc_for_render =
1796 drm_intel_gem_bo_alloc_for_render;
1797 bufmgr_gem->bufmgr.bo_alloc_tiled = drm_intel_gem_bo_alloc_tiled;
1798 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
1799 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
1800 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
1801 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
1802 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
1803 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
1804 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
1805 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
1806 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
1807 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
1808 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
1809 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
1810 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
1811 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
1812 bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
1813 bufmgr_gem->bufmgr.bo_madvise = drm_intel_gem_bo_madvise;
1814 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
1815 bufmgr_gem->bufmgr.debug = 0;
1816 bufmgr_gem->bufmgr.check_aperture_space =
1817 drm_intel_gem_check_aperture_space;
1818 bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
1819 bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
1820 drm_intel_gem_get_pipe_from_crtc_id;
1821 bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
1823 /* Initialize the linked lists for BO reuse cache. */
1824 for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
1825 DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
1826 bufmgr_gem->cache_bucket[i].size = size;
1829 return &bufmgr_gem->bufmgr;