1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
49 #include <sys/ioctl.h>
52 #include <sys/types.h>
55 #include "intel_bufmgr.h"
56 #include "intel_bufmgr_priv.h"
61 #define DBG(...) do { \
62 if (bufmgr_gem->bufmgr.debug) \
63 fprintf(stderr, __VA_ARGS__); \
66 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
68 struct drm_intel_gem_bo_bucket {
69 drm_intel_bo_gem *head, **tail;
71 * Limit on the number of entries in this bucket.
73 * 0 means that this caching at this bucket size is disabled.
74 * -1 means that there is no limit to caching at this size.
80 /* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
81 * is 1 << 16 pages, or 256MB.
83 #define DRM_INTEL_GEM_BO_BUCKETS 16
84 typedef struct _drm_intel_bufmgr_gem {
85 drm_intel_bufmgr bufmgr;
93 struct drm_i915_gem_exec_object *exec_objects;
94 drm_intel_bo **exec_bos;
98 /** Array of lists of cached gem objects of power-of-two sizes */
99 struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
102 } drm_intel_bufmgr_gem;
104 struct _drm_intel_bo_gem {
108 /** Boolean whether the mmap ioctl has been called for this buffer yet. */
114 * Kenel-assigned global name for this object
116 unsigned int global_name;
119 * Index of the buffer within the validation list while preparing a
120 * batchbuffer execution.
125 * Boolean whether we've started swrast
126 * Set when the buffer has been mapped
127 * Cleared when the buffer is unmapped
131 /** Array passed to the DRM containing relocation information. */
132 struct drm_i915_gem_relocation_entry *relocs;
133 /** Array of bos corresponding to relocs[i].target_handle */
134 drm_intel_bo **reloc_target_bo;
135 /** Number of entries in relocs */
137 /** Mapped address for the buffer */
141 drm_intel_bo_gem *next;
144 * Boolean of whether this BO and its children have been included in
145 * the current drm_intel_bufmgr_check_aperture_space() total.
147 char included_in_check_aperture;
150 * Boolean of whether this buffer has been used as a relocation
151 * target and had its size accounted for, and thus can't have any
152 * further relocations added to it.
154 char used_as_reloc_target;
157 * Size in bytes of this buffer and its relocation descendents.
159 * Used to avoid costly tree walking in drm_intel_bufmgr_check_aperture in
165 static void drm_intel_gem_bo_reference_locked(drm_intel_bo *bo);
181 static struct drm_intel_gem_bo_bucket *
182 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
187 /* We only do buckets in power of two increments */
188 if ((size & (size - 1)) != 0)
191 /* We should only see sizes rounded to pages. */
192 assert((size % 4096) == 0);
194 /* We always allocate in units of pages */
195 i = ffs(size / 4096) - 1;
196 if (i >= DRM_INTEL_GEM_BO_BUCKETS)
199 return &bufmgr_gem->cache_bucket[i];
203 static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
207 for (i = 0; i < bufmgr_gem->exec_count; i++) {
208 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
209 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
211 if (bo_gem->relocs == NULL) {
212 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
216 for (j = 0; j < bo_gem->reloc_count; j++) {
217 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
218 drm_intel_bo_gem *target_gem = (drm_intel_bo_gem *)target_bo;
220 DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
222 bo_gem->gem_handle, bo_gem->name,
223 (unsigned long long)bo_gem->relocs[j].offset,
224 target_gem->gem_handle, target_gem->name, target_bo->offset,
225 bo_gem->relocs[j].delta);
231 * Adds the given buffer to the list of buffers to be validated (moved into the
232 * appropriate memory type) with the next batch submission.
234 * If a buffer is validated multiple times in a batch submission, it ends up
235 * with the intersection of the memory type flags and the union of the
239 drm_intel_add_validate_buffer(drm_intel_bo *bo)
241 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
242 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
245 if (bo_gem->validate_index != -1)
248 /* Extend the array of validation entries as necessary. */
249 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
250 int new_size = bufmgr_gem->exec_size * 2;
255 bufmgr_gem->exec_objects =
256 realloc(bufmgr_gem->exec_objects,
257 sizeof(*bufmgr_gem->exec_objects) * new_size);
258 bufmgr_gem->exec_bos =
259 realloc(bufmgr_gem->exec_bos,
260 sizeof(*bufmgr_gem->exec_bos) * new_size);
261 bufmgr_gem->exec_size = new_size;
264 index = bufmgr_gem->exec_count;
265 bo_gem->validate_index = index;
266 /* Fill in array entry */
267 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
268 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
269 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
270 bufmgr_gem->exec_objects[index].alignment = 0;
271 bufmgr_gem->exec_objects[index].offset = 0;
272 bufmgr_gem->exec_bos[index] = bo;
273 drm_intel_gem_bo_reference_locked(bo);
274 bufmgr_gem->exec_count++;
278 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
282 drm_intel_setup_reloc_list(drm_intel_bo *bo)
284 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
285 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
287 bo_gem->relocs = malloc(bufmgr_gem->max_relocs *
288 sizeof(struct drm_i915_gem_relocation_entry));
289 bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs *
290 sizeof(drm_intel_bo *));
295 static drm_intel_bo *
296 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
297 unsigned long size, unsigned int alignment)
299 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
300 drm_intel_bo_gem *bo_gem;
301 unsigned int page_size = getpagesize();
303 struct drm_intel_gem_bo_bucket *bucket;
304 int alloc_from_cache = 0;
305 unsigned long bo_size;
307 /* Round the allocated size up to a power of two number of pages. */
308 bo_size = 1 << logbase2(size);
309 if (bo_size < page_size)
311 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
313 /* If we don't have caching at this size, don't actually round the
316 if (bucket == NULL || bucket->max_entries == 0) {
318 if (bo_size < page_size)
322 pthread_mutex_lock(&bufmgr_gem->lock);
323 /* Get a buffer out of the cache if available */
324 if (bucket != NULL && bucket->num_entries > 0) {
325 struct drm_i915_gem_busy busy;
327 bo_gem = bucket->head;
328 busy.handle = bo_gem->gem_handle;
330 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
331 alloc_from_cache = (ret == 0 && busy.busy == 0);
333 if (alloc_from_cache) {
334 bucket->head = bo_gem->next;
335 if (bo_gem->next == NULL)
336 bucket->tail = &bucket->head;
337 bucket->num_entries--;
340 pthread_mutex_unlock(&bufmgr_gem->lock);
342 if (!alloc_from_cache) {
343 struct drm_i915_gem_create create;
345 bo_gem = calloc(1, sizeof(*bo_gem));
349 bo_gem->bo.size = bo_size;
350 memset(&create, 0, sizeof(create));
351 create.size = bo_size;
353 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
354 bo_gem->gem_handle = create.handle;
355 bo_gem->bo.handle = bo_gem->gem_handle;
360 bo_gem->bo.bufmgr = bufmgr;
364 bo_gem->refcount = 1;
365 bo_gem->validate_index = -1;
366 bo_gem->reloc_tree_size = bo_gem->bo.size;
367 bo_gem->used_as_reloc_target = 0;
369 DBG("bo_create: buf %d (%s) %ldb\n",
370 bo_gem->gem_handle, bo_gem->name, size);
376 * Returns a drm_intel_bo wrapping the given buffer object handle.
378 * This can be used when one application needs to pass a buffer object
382 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
385 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
386 drm_intel_bo_gem *bo_gem;
388 struct drm_gem_open open_arg;
390 bo_gem = calloc(1, sizeof(*bo_gem));
394 memset(&open_arg, 0, sizeof(open_arg));
395 open_arg.name = handle;
396 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
398 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
399 name, handle, strerror(errno));
403 bo_gem->bo.size = open_arg.size;
404 bo_gem->bo.offset = 0;
405 bo_gem->bo.virtual = NULL;
406 bo_gem->bo.bufmgr = bufmgr;
408 bo_gem->refcount = 1;
409 bo_gem->validate_index = -1;
410 bo_gem->gem_handle = open_arg.handle;
411 bo_gem->global_name = handle;
413 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
419 drm_intel_gem_bo_reference(drm_intel_bo *bo)
421 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
422 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
424 pthread_mutex_lock(&bufmgr_gem->lock);
426 pthread_mutex_unlock(&bufmgr_gem->lock);
430 dri_gem_bo_reference_locked(dri_bo *bo)
432 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
438 drm_intel_gem_bo_reference_locked(drm_intel_bo *bo)
440 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
446 drm_intel_gem_bo_free(drm_intel_bo *bo)
448 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
449 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
450 struct drm_gem_close close;
454 munmap (bo_gem->virtual, bo_gem->bo.size);
456 /* Close this object */
457 close.handle = bo_gem->gem_handle;
458 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
461 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
462 bo_gem->gem_handle, bo_gem->name, strerror(errno));
468 drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
470 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
471 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
473 if (--bo_gem->refcount == 0) {
474 struct drm_intel_gem_bo_bucket *bucket;
476 if (bo_gem->relocs != NULL) {
479 /* Unreference all the target buffers */
480 for (i = 0; i < bo_gem->reloc_count; i++)
481 drm_intel_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
482 free(bo_gem->reloc_target_bo);
483 free(bo_gem->relocs);
486 DBG("bo_unreference final: %d (%s)\n",
487 bo_gem->gem_handle, bo_gem->name);
489 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
490 /* Put the buffer into our internal cache for reuse if we can. */
491 if (bucket != NULL &&
492 (bucket->max_entries == -1 ||
493 (bucket->max_entries > 0 &&
494 bucket->num_entries < bucket->max_entries)))
497 bo_gem->validate_index = -1;
498 bo_gem->relocs = NULL;
499 bo_gem->reloc_target_bo = NULL;
500 bo_gem->reloc_count = 0;
503 *bucket->tail = bo_gem;
504 bucket->tail = &bo_gem->next;
505 bucket->num_entries++;
507 drm_intel_gem_bo_free(bo);
513 drm_intel_gem_bo_unreference(drm_intel_bo *bo)
515 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
517 pthread_mutex_lock(&bufmgr_gem->lock);
518 drm_intel_gem_bo_unreference_locked(bo);
519 pthread_mutex_unlock(&bufmgr_gem->lock);
523 drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
525 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
526 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
527 struct drm_i915_gem_set_domain set_domain;
530 pthread_mutex_lock(&bufmgr_gem->lock);
532 /* Allow recursive mapping. Mesa may recursively map buffers with
533 * nested display loops.
535 if (!bo_gem->mapped) {
537 assert(bo->virtual == NULL);
539 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
541 if (bo_gem->virtual == NULL) {
542 struct drm_i915_gem_mmap mmap_arg;
544 memset(&mmap_arg, 0, sizeof(mmap_arg));
545 mmap_arg.handle = bo_gem->gem_handle;
547 mmap_arg.size = bo->size;
548 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
550 fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
552 bo_gem->gem_handle, bo_gem->name, strerror(errno));
554 bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
556 bo->virtual = bo_gem->virtual;
559 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
562 if (!bo_gem->swrast) {
563 set_domain.handle = bo_gem->gem_handle;
564 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
566 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
568 set_domain.write_domain = 0;
570 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
572 } while (ret == -1 && errno == EINTR);
574 fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
575 __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
580 pthread_mutex_unlock(&bufmgr_gem->lock);
586 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
588 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
589 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
590 struct drm_i915_gem_set_domain set_domain;
593 pthread_mutex_lock(&bufmgr_gem->lock);
595 /* Allow recursive mapping. Mesa may recursively map buffers with
596 * nested display loops.
598 if (!bo_gem->mapped) {
600 assert(bo->virtual == NULL);
602 DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
604 if (bo_gem->virtual == NULL) {
605 struct drm_i915_gem_mmap_gtt mmap_arg;
607 memset(&mmap_arg, 0, sizeof(mmap_arg));
608 mmap_arg.handle = bo_gem->gem_handle;
610 /* Get the fake offset back... */
611 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
615 "%s:%d: Error preparing buffer map %d (%s): %s .\n",
617 bo_gem->gem_handle, bo_gem->name,
619 pthread_mutex_unlock(&bufmgr_gem->lock);
624 bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
625 MAP_SHARED, bufmgr_gem->fd,
627 if (bo_gem->virtual == MAP_FAILED) {
629 "%s:%d: Error mapping buffer %d (%s): %s .\n",
631 bo_gem->gem_handle, bo_gem->name,
633 pthread_mutex_unlock(&bufmgr_gem->lock);
638 bo->virtual = bo_gem->virtual;
640 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
644 /* Now move it to the GTT domain so that the CPU caches are flushed */
645 set_domain.handle = bo_gem->gem_handle;
646 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
647 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
649 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
651 } while (ret == -1 && errno == EINTR);
654 fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
655 __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
658 pthread_mutex_unlock(&bufmgr_gem->lock);
664 drm_intel_gem_bo_unmap(drm_intel_bo *bo)
666 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
667 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
668 struct drm_i915_gem_sw_finish sw_finish;
674 assert(bo_gem->mapped);
676 pthread_mutex_lock(&bufmgr_gem->lock);
677 if (bo_gem->swrast) {
678 sw_finish.handle = bo_gem->gem_handle;
680 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
682 } while (ret == -1 && errno == EINTR);
685 pthread_mutex_unlock(&bufmgr_gem->lock);
690 drm_intel_gem_bo_subdata (drm_intel_bo *bo, unsigned long offset,
691 unsigned long size, const void *data)
693 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
694 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
695 struct drm_i915_gem_pwrite pwrite;
698 memset (&pwrite, 0, sizeof (pwrite));
699 pwrite.handle = bo_gem->gem_handle;
700 pwrite.offset = offset;
702 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
704 ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
705 } while (ret == -1 && errno == EINTR);
707 fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
709 bo_gem->gem_handle, (int) offset, (int) size,
716 drm_intel_gem_bo_get_subdata (drm_intel_bo *bo, unsigned long offset,
717 unsigned long size, void *data)
719 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
720 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
721 struct drm_i915_gem_pread pread;
724 memset (&pread, 0, sizeof (pread));
725 pread.handle = bo_gem->gem_handle;
726 pread.offset = offset;
728 pread.data_ptr = (uint64_t) (uintptr_t) data;
730 ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
731 } while (ret == -1 && errno == EINTR);
733 fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
735 bo_gem->gem_handle, (int) offset, (int) size,
741 /** Waits for all GPU rendering to the object to have completed. */
743 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
745 return drm_intel_gem_bo_start_gtt_access(bo, 0);
749 * Sets the object to the GTT read and possibly write domain, used by the X
750 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
752 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
753 * can do tiled pixmaps this way.
756 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
758 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
759 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
760 struct drm_i915_gem_set_domain set_domain;
763 set_domain.handle = bo_gem->gem_handle;
764 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
765 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
767 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
768 } while (ret == -1 && errno == EINTR);
770 fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
772 bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
778 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
780 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
783 free(bufmgr_gem->exec_objects);
784 free(bufmgr_gem->exec_bos);
786 pthread_mutex_destroy(&bufmgr_gem->lock);
788 /* Free any cached buffer objects we were going to reuse */
789 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
790 struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
791 drm_intel_bo_gem *bo_gem;
793 while ((bo_gem = bucket->head) != NULL) {
794 bucket->head = bo_gem->next;
795 if (bo_gem->next == NULL)
796 bucket->tail = &bucket->head;
797 bucket->num_entries--;
799 drm_intel_gem_bo_free(&bo_gem->bo);
807 * Adds the target buffer to the validation list and adds the relocation
808 * to the reloc_buffer's relocation list.
810 * The relocation entry at the given offset must already contain the
811 * precomputed relocation value, because the kernel will optimize out
812 * the relocation entry write when the buffer hasn't moved from the
813 * last known offset in target_bo.
816 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
817 drm_intel_bo *target_bo, uint32_t target_offset,
818 uint32_t read_domains, uint32_t write_domain)
820 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
821 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
822 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
824 pthread_mutex_lock(&bufmgr_gem->lock);
826 /* Create a new relocation list if needed */
827 if (bo_gem->relocs == NULL)
828 drm_intel_setup_reloc_list(bo);
831 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
834 assert (offset <= bo->size - 4);
835 assert ((write_domain & (write_domain-1)) == 0);
837 /* Make sure that we're not adding a reloc to something whose size has
838 * already been accounted for.
840 assert(!bo_gem->used_as_reloc_target);
841 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
843 /* Flag the target to disallow further relocations in it. */
844 target_bo_gem->used_as_reloc_target = 1;
846 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
847 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
848 bo_gem->relocs[bo_gem->reloc_count].target_handle =
849 target_bo_gem->gem_handle;
850 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
851 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
852 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
854 bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
855 drm_intel_gem_bo_reference_locked(target_bo);
857 bo_gem->reloc_count++;
859 pthread_mutex_unlock(&bufmgr_gem->lock);
865 * Walk the tree of relocations rooted at BO and accumulate the list of
866 * validations to be performed and update the relocation buffers with
867 * index values into the validation list.
870 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
872 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
875 if (bo_gem->relocs == NULL)
878 for (i = 0; i < bo_gem->reloc_count; i++) {
879 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
881 /* Continue walking the tree depth-first. */
882 drm_intel_gem_bo_process_reloc(target_bo);
884 /* Add the target to the validate list */
885 drm_intel_add_validate_buffer(target_bo);
890 drm_intel_update_buffer_offsets (drm_intel_bufmgr_gem *bufmgr_gem)
894 for (i = 0; i < bufmgr_gem->exec_count; i++) {
895 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
896 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
898 /* Update the buffer offset */
899 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
900 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
901 bo_gem->gem_handle, bo_gem->name, bo->offset,
902 (unsigned long long)bufmgr_gem->exec_objects[i].offset);
903 bo->offset = bufmgr_gem->exec_objects[i].offset;
909 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
910 drm_clip_rect_t *cliprects, int num_cliprects,
913 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
914 struct drm_i915_gem_execbuffer execbuf;
917 pthread_mutex_lock(&bufmgr_gem->lock);
918 /* Update indices and set up the validate list. */
919 drm_intel_gem_bo_process_reloc(bo);
921 /* Add the batch buffer to the validation list. There are no relocations
924 drm_intel_add_validate_buffer(bo);
926 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
927 execbuf.buffer_count = bufmgr_gem->exec_count;
928 execbuf.batch_start_offset = 0;
929 execbuf.batch_len = used;
930 execbuf.cliprects_ptr = (uintptr_t)cliprects;
931 execbuf.num_cliprects = num_cliprects;
936 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER, &execbuf);
937 } while (ret != 0 && errno == EAGAIN);
939 drm_intel_update_buffer_offsets (bufmgr_gem);
941 if (bufmgr_gem->bufmgr.debug)
942 drm_intel_gem_dump_validation_list(bufmgr_gem);
944 for (i = 0; i < bufmgr_gem->exec_count; i++) {
945 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
946 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
948 /* Need to call swrast on next bo_map */
951 /* Disconnect the buffer from the validate list */
952 bo_gem->validate_index = -1;
953 drm_intel_gem_bo_unreference_locked(bo);
954 bufmgr_gem->exec_bos[i] = NULL;
956 bufmgr_gem->exec_count = 0;
957 pthread_mutex_unlock(&bufmgr_gem->lock);
963 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
965 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
966 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
967 struct drm_i915_gem_pin pin;
970 pin.handle = bo_gem->gem_handle;
971 pin.alignment = alignment;
973 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
977 bo->offset = pin.offset;
982 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
984 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
985 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
986 struct drm_i915_gem_unpin unpin;
989 unpin.handle = bo_gem->gem_handle;
991 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
999 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
1002 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1003 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1004 struct drm_i915_gem_set_tiling set_tiling;
1007 set_tiling.handle = bo_gem->gem_handle;
1008 set_tiling.tiling_mode = *tiling_mode;
1009 set_tiling.stride = stride;
1011 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
1013 *tiling_mode = I915_TILING_NONE;
1017 *tiling_mode = set_tiling.tiling_mode;
1022 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
1023 uint32_t *swizzle_mode)
1025 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1026 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1027 struct drm_i915_gem_get_tiling get_tiling;
1030 get_tiling.handle = bo_gem->gem_handle;
1032 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
1034 *tiling_mode = I915_TILING_NONE;
1035 *swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
1039 *tiling_mode = get_tiling.tiling_mode;
1040 *swizzle_mode = get_tiling.swizzle_mode;
1045 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t *name)
1047 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1048 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1049 struct drm_gem_flink flink;
1052 if (!bo_gem->global_name) {
1053 flink.handle = bo_gem->gem_handle;
1055 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1058 bo_gem->global_name = flink.name;
1061 *name = bo_gem->global_name;
1066 * Enables unlimited caching of buffer objects for reuse.
1068 * This is potentially very memory expensive, as the cache at each bucket
1069 * size is only bounded by how many buffers of that size we've managed to have
1070 * in flight at once.
1073 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1075 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1078 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
1079 bufmgr_gem->cache_bucket[i].max_entries = -1;
1084 * Return the additional aperture space required by the tree of buffer objects
1088 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1090 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1094 if (bo == NULL || bo_gem->included_in_check_aperture)
1098 bo_gem->included_in_check_aperture = 1;
1100 for (i = 0; i < bo_gem->reloc_count; i++)
1101 total += drm_intel_gem_bo_get_aperture_space(bo_gem->reloc_target_bo[i]);
1107 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1108 * for the next drm_intel_bufmgr_check_aperture_space() call.
1111 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1113 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1116 if (bo == NULL || !bo_gem->included_in_check_aperture)
1119 bo_gem->included_in_check_aperture = 0;
1121 for (i = 0; i < bo_gem->reloc_count; i++)
1122 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->reloc_target_bo[i]);
1126 * Return -1 if the batchbuffer should be flushed before attempting to
1127 * emit rendering referencing the buffers pointed to by bo_array.
1129 * This is required because if we try to emit a batchbuffer with relocations
1130 * to a tree of buffers that won't simultaneously fit in the aperture,
1131 * the rendering will return an error at a point where the software is not
1132 * prepared to recover from it.
1134 * However, we also want to emit the batchbuffer significantly before we reach
1135 * the limit, as a series of batchbuffers each of which references buffers
1136 * covering almost all of the aperture means that at each emit we end up
1137 * waiting to evict a buffer from the last rendering, and we get synchronous
1138 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1139 * get better parallelism.
1142 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1144 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo_array[0]->bufmgr;
1145 unsigned int total = 0;
1146 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1149 for (i = 0; i < count; i++) {
1150 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
1152 total += bo_gem->reloc_tree_size;
1155 if (total > threshold) {
1157 for (i = 0; i < count; i++)
1158 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1160 for (i = 0; i < count; i++)
1161 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1164 if (total > bufmgr_gem->gtt_size * 3 / 4) {
1165 DBG("check_space: overflowed available aperture, %dkb vs %dkb\n",
1166 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1169 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024 ,
1170 (int)bufmgr_gem->gtt_size / 1024);
1176 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1177 * and manage map buffer objections.
1179 * \param fd File descriptor of the opened DRM device.
1182 drm_intel_bufmgr_gem_init(int fd, int batch_size)
1184 drm_intel_bufmgr_gem *bufmgr_gem;
1185 struct drm_i915_gem_get_aperture aperture;
1188 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
1189 bufmgr_gem->fd = fd;
1191 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
1196 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1199 bufmgr_gem->gtt_size = aperture.aper_available_size;
1201 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
1203 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
1204 fprintf(stderr, "Assuming %dkB available aperture size.\n"
1205 "May lead to reduced performance or incorrect rendering.\n",
1206 (int)bufmgr_gem->gtt_size / 1024);
1209 /* Let's go with one relocation per every 2 dwords (but round down a bit
1210 * since a power of two will mean an extra page allocation for the reloc
1213 * Every 4 was too few for the blender benchmark.
1215 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
1217 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
1218 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
1219 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
1220 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
1221 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
1222 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
1223 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
1224 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
1225 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
1226 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
1227 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
1228 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
1229 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
1230 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
1231 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
1232 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
1233 bufmgr_gem->bufmgr.debug = 0;
1234 bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space;
1235 /* Initialize the linked lists for BO reuse cache. */
1236 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++)
1237 bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
1239 return &bufmgr_gem->bufmgr;