1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
49 #include <sys/ioctl.h>
52 #include <sys/types.h>
55 #include "intel_bufmgr.h"
56 #include "intel_bufmgr_priv.h"
61 #define DBG(...) do { \
62 if (bufmgr_gem->bufmgr.debug) \
63 fprintf(stderr, __VA_ARGS__); \
66 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
68 struct drm_intel_gem_bo_bucket {
69 drm_intel_bo_gem *head, **tail;
71 * Limit on the number of entries in this bucket.
73 * 0 means that this caching at this bucket size is disabled.
74 * -1 means that there is no limit to caching at this size.
80 /* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
81 * is 1 << 16 pages, or 256MB.
83 #define DRM_INTEL_GEM_BO_BUCKETS 16
84 typedef struct _drm_intel_bufmgr_gem {
85 drm_intel_bufmgr bufmgr;
93 struct drm_i915_gem_exec_object *exec_objects;
94 drm_intel_bo **exec_bos;
98 /** Array of lists of cached gem objects of power-of-two sizes */
99 struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
102 } drm_intel_bufmgr_gem;
104 struct _drm_intel_bo_gem {
108 /** Boolean whether the mmap ioctl has been called for this buffer yet. */
113 * Kenel-assigned global name for this object
115 unsigned int global_name;
118 * Index of the buffer within the validation list while preparing a
119 * batchbuffer execution.
124 * Boolean whether we've started swrast
125 * Set when the buffer has been mapped
126 * Cleared when the buffer is unmapped
130 /** Array passed to the DRM containing relocation information. */
131 struct drm_i915_gem_relocation_entry *relocs;
132 /** Array of bos corresponding to relocs[i].target_handle */
133 drm_intel_bo **reloc_target_bo;
134 /** Number of entries in relocs */
136 /** Mapped address for the buffer, saved across map/unmap cycles */
140 drm_intel_bo_gem *next;
143 * Boolean of whether this BO and its children have been included in
144 * the current drm_intel_bufmgr_check_aperture_space() total.
146 char included_in_check_aperture;
149 * Boolean of whether this buffer has been used as a relocation
150 * target and had its size accounted for, and thus can't have any
151 * further relocations added to it.
153 char used_as_reloc_target;
156 * Size in bytes of this buffer and its relocation descendents.
158 * Used to avoid costly tree walking in drm_intel_bufmgr_check_aperture in
164 static void drm_intel_gem_bo_reference_locked(drm_intel_bo *bo);
180 static struct drm_intel_gem_bo_bucket *
181 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
186 /* We only do buckets in power of two increments */
187 if ((size & (size - 1)) != 0)
190 /* We should only see sizes rounded to pages. */
191 assert((size % 4096) == 0);
193 /* We always allocate in units of pages */
194 i = ffs(size / 4096) - 1;
195 if (i >= DRM_INTEL_GEM_BO_BUCKETS)
198 return &bufmgr_gem->cache_bucket[i];
202 static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
206 for (i = 0; i < bufmgr_gem->exec_count; i++) {
207 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
208 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
210 if (bo_gem->relocs == NULL) {
211 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
215 for (j = 0; j < bo_gem->reloc_count; j++) {
216 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
217 drm_intel_bo_gem *target_gem = (drm_intel_bo_gem *)target_bo;
219 DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
221 bo_gem->gem_handle, bo_gem->name,
222 (unsigned long long)bo_gem->relocs[j].offset,
223 target_gem->gem_handle, target_gem->name, target_bo->offset,
224 bo_gem->relocs[j].delta);
230 * Adds the given buffer to the list of buffers to be validated (moved into the
231 * appropriate memory type) with the next batch submission.
233 * If a buffer is validated multiple times in a batch submission, it ends up
234 * with the intersection of the memory type flags and the union of the
238 drm_intel_add_validate_buffer(drm_intel_bo *bo)
240 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
241 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
244 if (bo_gem->validate_index != -1)
247 /* Extend the array of validation entries as necessary. */
248 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
249 int new_size = bufmgr_gem->exec_size * 2;
254 bufmgr_gem->exec_objects =
255 realloc(bufmgr_gem->exec_objects,
256 sizeof(*bufmgr_gem->exec_objects) * new_size);
257 bufmgr_gem->exec_bos =
258 realloc(bufmgr_gem->exec_bos,
259 sizeof(*bufmgr_gem->exec_bos) * new_size);
260 bufmgr_gem->exec_size = new_size;
263 index = bufmgr_gem->exec_count;
264 bo_gem->validate_index = index;
265 /* Fill in array entry */
266 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
267 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
268 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
269 bufmgr_gem->exec_objects[index].alignment = 0;
270 bufmgr_gem->exec_objects[index].offset = 0;
271 bufmgr_gem->exec_bos[index] = bo;
272 drm_intel_gem_bo_reference_locked(bo);
273 bufmgr_gem->exec_count++;
277 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
281 drm_intel_setup_reloc_list(drm_intel_bo *bo)
283 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
284 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
286 bo_gem->relocs = malloc(bufmgr_gem->max_relocs *
287 sizeof(struct drm_i915_gem_relocation_entry));
288 bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs *
289 sizeof(drm_intel_bo *));
294 static drm_intel_bo *
295 drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
296 unsigned long size, unsigned int alignment)
298 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
299 drm_intel_bo_gem *bo_gem;
300 unsigned int page_size = getpagesize();
302 struct drm_intel_gem_bo_bucket *bucket;
303 int alloc_from_cache = 0;
304 unsigned long bo_size;
306 /* Round the allocated size up to a power of two number of pages. */
307 bo_size = 1 << logbase2(size);
308 if (bo_size < page_size)
310 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
312 /* If we don't have caching at this size, don't actually round the
315 if (bucket == NULL || bucket->max_entries == 0) {
317 if (bo_size < page_size)
321 pthread_mutex_lock(&bufmgr_gem->lock);
322 /* Get a buffer out of the cache if available */
323 if (bucket != NULL && bucket->num_entries > 0) {
324 struct drm_i915_gem_busy busy;
326 bo_gem = bucket->head;
327 busy.handle = bo_gem->gem_handle;
329 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
330 alloc_from_cache = (ret == 0 && busy.busy == 0);
332 if (alloc_from_cache) {
333 bucket->head = bo_gem->next;
334 if (bo_gem->next == NULL)
335 bucket->tail = &bucket->head;
336 bucket->num_entries--;
339 pthread_mutex_unlock(&bufmgr_gem->lock);
341 if (!alloc_from_cache) {
342 struct drm_i915_gem_create create;
344 bo_gem = calloc(1, sizeof(*bo_gem));
348 bo_gem->bo.size = bo_size;
349 memset(&create, 0, sizeof(create));
350 create.size = bo_size;
352 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
353 bo_gem->gem_handle = create.handle;
358 bo_gem->bo.bufmgr = bufmgr;
362 bo_gem->refcount = 1;
363 bo_gem->validate_index = -1;
364 bo_gem->reloc_tree_size = bo_gem->bo.size;
365 bo_gem->used_as_reloc_target = 0;
367 DBG("bo_create: buf %d (%s) %ldb\n",
368 bo_gem->gem_handle, bo_gem->name, size);
374 * Returns a drm_intel_bo wrapping the given buffer object handle.
376 * This can be used when one application needs to pass a buffer object
380 drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
383 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
384 drm_intel_bo_gem *bo_gem;
386 struct drm_gem_open open_arg;
388 bo_gem = calloc(1, sizeof(*bo_gem));
392 memset(&open_arg, 0, sizeof(open_arg));
393 open_arg.name = handle;
394 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
396 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
397 name, handle, strerror(errno));
401 bo_gem->bo.size = open_arg.size;
402 bo_gem->bo.offset = 0;
403 bo_gem->bo.virtual = NULL;
404 bo_gem->bo.bufmgr = bufmgr;
406 bo_gem->refcount = 1;
407 bo_gem->validate_index = -1;
408 bo_gem->gem_handle = open_arg.handle;
409 bo_gem->global_name = handle;
411 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
417 drm_intel_gem_bo_reference(drm_intel_bo *bo)
419 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
420 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
422 pthread_mutex_lock(&bufmgr_gem->lock);
424 pthread_mutex_unlock(&bufmgr_gem->lock);
428 drm_intel_gem_bo_reference_locked(drm_intel_bo *bo)
430 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
436 drm_intel_gem_bo_free(drm_intel_bo *bo)
438 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
439 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
440 struct drm_gem_close close;
444 munmap (bo_gem->virtual, bo_gem->bo.size);
446 /* Close this object */
447 close.handle = bo_gem->gem_handle;
448 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
451 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
452 bo_gem->gem_handle, bo_gem->name, strerror(errno));
458 drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
460 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
461 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
463 if (--bo_gem->refcount == 0) {
464 struct drm_intel_gem_bo_bucket *bucket;
466 if (bo_gem->relocs != NULL) {
469 /* Unreference all the target buffers */
470 for (i = 0; i < bo_gem->reloc_count; i++)
471 drm_intel_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
472 free(bo_gem->reloc_target_bo);
473 free(bo_gem->relocs);
476 DBG("bo_unreference final: %d (%s)\n",
477 bo_gem->gem_handle, bo_gem->name);
479 bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
480 /* Put the buffer into our internal cache for reuse if we can. */
481 if (bucket != NULL &&
482 (bucket->max_entries == -1 ||
483 (bucket->max_entries > 0 &&
484 bucket->num_entries < bucket->max_entries)))
487 bo_gem->validate_index = -1;
488 bo_gem->relocs = NULL;
489 bo_gem->reloc_target_bo = NULL;
490 bo_gem->reloc_count = 0;
493 *bucket->tail = bo_gem;
494 bucket->tail = &bo_gem->next;
495 bucket->num_entries++;
497 drm_intel_gem_bo_free(bo);
503 drm_intel_gem_bo_unreference(drm_intel_bo *bo)
505 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
507 pthread_mutex_lock(&bufmgr_gem->lock);
508 drm_intel_gem_bo_unreference_locked(bo);
509 pthread_mutex_unlock(&bufmgr_gem->lock);
513 drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
515 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
516 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
517 struct drm_i915_gem_set_domain set_domain;
520 pthread_mutex_lock(&bufmgr_gem->lock);
522 /* Allow recursive mapping. Mesa may recursively map buffers with
523 * nested display loops.
525 if (!bo_gem->virtual) {
526 struct drm_i915_gem_mmap mmap_arg;
528 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
530 memset(&mmap_arg, 0, sizeof(mmap_arg));
531 mmap_arg.handle = bo_gem->gem_handle;
533 mmap_arg.size = bo->size;
534 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
536 fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
538 bo_gem->gem_handle, bo_gem->name, strerror(errno));
540 bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
543 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
545 bo->virtual = bo_gem->virtual;
547 if (!bo_gem->swrast) {
548 set_domain.handle = bo_gem->gem_handle;
549 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
551 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
553 set_domain.write_domain = 0;
555 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
557 } while (ret == -1 && errno == EINTR);
559 fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
560 __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
565 pthread_mutex_unlock(&bufmgr_gem->lock);
571 drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
573 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
574 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
575 struct drm_i915_gem_set_domain set_domain;
578 pthread_mutex_lock(&bufmgr_gem->lock);
580 /* Get a mapping of the buffer if we haven't before. */
581 if (bo_gem->virtual == NULL) {
582 struct drm_i915_gem_mmap_gtt mmap_arg;
584 DBG("bo_map_gtt: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
586 memset(&mmap_arg, 0, sizeof(mmap_arg));
587 mmap_arg.handle = bo_gem->gem_handle;
589 /* Get the fake offset back... */
590 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
593 "%s:%d: Error preparing buffer map %d (%s): %s .\n",
595 bo_gem->gem_handle, bo_gem->name,
597 pthread_mutex_unlock(&bufmgr_gem->lock);
602 bo_gem->virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
603 MAP_SHARED, bufmgr_gem->fd,
605 if (bo_gem->virtual == MAP_FAILED) {
607 "%s:%d: Error mapping buffer %d (%s): %s .\n",
609 bo_gem->gem_handle, bo_gem->name,
611 pthread_mutex_unlock(&bufmgr_gem->lock);
616 bo->virtual = bo_gem->virtual;
618 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
621 /* Now move it to the GTT domain so that the CPU caches are flushed */
622 set_domain.handle = bo_gem->gem_handle;
623 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
624 set_domain.write_domain = I915_GEM_DOMAIN_GTT;
626 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
628 } while (ret == -1 && errno == EINTR);
631 fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
632 __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
635 pthread_mutex_unlock(&bufmgr_gem->lock);
641 drm_intel_gem_bo_unmap(drm_intel_bo *bo)
643 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
644 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
645 struct drm_i915_gem_sw_finish sw_finish;
651 assert(bo_gem->virtual != NULL);
653 pthread_mutex_lock(&bufmgr_gem->lock);
654 if (bo_gem->swrast) {
655 sw_finish.handle = bo_gem->gem_handle;
657 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
659 } while (ret == -1 && errno == EINTR);
662 pthread_mutex_unlock(&bufmgr_gem->lock);
667 drm_intel_gem_bo_subdata (drm_intel_bo *bo, unsigned long offset,
668 unsigned long size, const void *data)
670 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
671 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
672 struct drm_i915_gem_pwrite pwrite;
675 memset (&pwrite, 0, sizeof (pwrite));
676 pwrite.handle = bo_gem->gem_handle;
677 pwrite.offset = offset;
679 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
681 ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
682 } while (ret == -1 && errno == EINTR);
684 fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
686 bo_gem->gem_handle, (int) offset, (int) size,
693 drm_intel_gem_bo_get_subdata (drm_intel_bo *bo, unsigned long offset,
694 unsigned long size, void *data)
696 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
697 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
698 struct drm_i915_gem_pread pread;
701 memset (&pread, 0, sizeof (pread));
702 pread.handle = bo_gem->gem_handle;
703 pread.offset = offset;
705 pread.data_ptr = (uint64_t) (uintptr_t) data;
707 ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
708 } while (ret == -1 && errno == EINTR);
710 fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
712 bo_gem->gem_handle, (int) offset, (int) size,
718 /** Waits for all GPU rendering to the object to have completed. */
720 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
722 return drm_intel_gem_bo_start_gtt_access(bo, 0);
726 * Sets the object to the GTT read and possibly write domain, used by the X
727 * 2D driver in the absence of kernel support to do drm_intel_gem_bo_map_gtt().
729 * In combination with drm_intel_gem_bo_pin() and manual fence management, we
730 * can do tiled pixmaps this way.
733 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
735 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
736 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
737 struct drm_i915_gem_set_domain set_domain;
740 set_domain.handle = bo_gem->gem_handle;
741 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
742 set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
744 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
745 } while (ret == -1 && errno == EINTR);
747 fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
749 bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
755 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
757 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
760 free(bufmgr_gem->exec_objects);
761 free(bufmgr_gem->exec_bos);
763 pthread_mutex_destroy(&bufmgr_gem->lock);
765 /* Free any cached buffer objects we were going to reuse */
766 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
767 struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
768 drm_intel_bo_gem *bo_gem;
770 while ((bo_gem = bucket->head) != NULL) {
771 bucket->head = bo_gem->next;
772 if (bo_gem->next == NULL)
773 bucket->tail = &bucket->head;
774 bucket->num_entries--;
776 drm_intel_gem_bo_free(&bo_gem->bo);
784 * Adds the target buffer to the validation list and adds the relocation
785 * to the reloc_buffer's relocation list.
787 * The relocation entry at the given offset must already contain the
788 * precomputed relocation value, because the kernel will optimize out
789 * the relocation entry write when the buffer hasn't moved from the
790 * last known offset in target_bo.
793 drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
794 drm_intel_bo *target_bo, uint32_t target_offset,
795 uint32_t read_domains, uint32_t write_domain)
797 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
798 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
799 drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
801 pthread_mutex_lock(&bufmgr_gem->lock);
803 /* Create a new relocation list if needed */
804 if (bo_gem->relocs == NULL)
805 drm_intel_setup_reloc_list(bo);
808 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
811 assert (offset <= bo->size - 4);
812 assert ((write_domain & (write_domain-1)) == 0);
814 /* Make sure that we're not adding a reloc to something whose size has
815 * already been accounted for.
817 assert(!bo_gem->used_as_reloc_target);
818 bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
820 /* Flag the target to disallow further relocations in it. */
821 target_bo_gem->used_as_reloc_target = 1;
823 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
824 bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
825 bo_gem->relocs[bo_gem->reloc_count].target_handle =
826 target_bo_gem->gem_handle;
827 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
828 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
829 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
831 bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
832 drm_intel_gem_bo_reference_locked(target_bo);
834 bo_gem->reloc_count++;
836 pthread_mutex_unlock(&bufmgr_gem->lock);
842 * Walk the tree of relocations rooted at BO and accumulate the list of
843 * validations to be performed and update the relocation buffers with
844 * index values into the validation list.
847 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
849 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
852 if (bo_gem->relocs == NULL)
855 for (i = 0; i < bo_gem->reloc_count; i++) {
856 drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
858 /* Continue walking the tree depth-first. */
859 drm_intel_gem_bo_process_reloc(target_bo);
861 /* Add the target to the validate list */
862 drm_intel_add_validate_buffer(target_bo);
867 drm_intel_update_buffer_offsets (drm_intel_bufmgr_gem *bufmgr_gem)
871 for (i = 0; i < bufmgr_gem->exec_count; i++) {
872 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
873 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
875 /* Update the buffer offset */
876 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
877 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
878 bo_gem->gem_handle, bo_gem->name, bo->offset,
879 (unsigned long long)bufmgr_gem->exec_objects[i].offset);
880 bo->offset = bufmgr_gem->exec_objects[i].offset;
886 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
887 drm_clip_rect_t *cliprects, int num_cliprects,
890 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
891 struct drm_i915_gem_execbuffer execbuf;
894 pthread_mutex_lock(&bufmgr_gem->lock);
895 /* Update indices and set up the validate list. */
896 drm_intel_gem_bo_process_reloc(bo);
898 /* Add the batch buffer to the validation list. There are no relocations
901 drm_intel_add_validate_buffer(bo);
903 execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
904 execbuf.buffer_count = bufmgr_gem->exec_count;
905 execbuf.batch_start_offset = 0;
906 execbuf.batch_len = used;
907 execbuf.cliprects_ptr = (uintptr_t)cliprects;
908 execbuf.num_cliprects = num_cliprects;
913 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER, &execbuf);
914 } while (ret != 0 && errno == EAGAIN);
916 drm_intel_update_buffer_offsets (bufmgr_gem);
918 if (bufmgr_gem->bufmgr.debug)
919 drm_intel_gem_dump_validation_list(bufmgr_gem);
921 for (i = 0; i < bufmgr_gem->exec_count; i++) {
922 drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
923 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
925 /* Need to call swrast on next bo_map */
928 /* Disconnect the buffer from the validate list */
929 bo_gem->validate_index = -1;
930 drm_intel_gem_bo_unreference_locked(bo);
931 bufmgr_gem->exec_bos[i] = NULL;
933 bufmgr_gem->exec_count = 0;
934 pthread_mutex_unlock(&bufmgr_gem->lock);
940 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
942 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
943 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
944 struct drm_i915_gem_pin pin;
947 pin.handle = bo_gem->gem_handle;
948 pin.alignment = alignment;
950 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
954 bo->offset = pin.offset;
959 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
961 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
962 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
963 struct drm_i915_gem_unpin unpin;
966 unpin.handle = bo_gem->gem_handle;
968 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
976 drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
979 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
980 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
981 struct drm_i915_gem_set_tiling set_tiling;
984 set_tiling.handle = bo_gem->gem_handle;
985 set_tiling.tiling_mode = *tiling_mode;
986 set_tiling.stride = stride;
988 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
990 *tiling_mode = I915_TILING_NONE;
994 *tiling_mode = set_tiling.tiling_mode;
999 drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
1000 uint32_t *swizzle_mode)
1002 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1003 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1004 struct drm_i915_gem_get_tiling get_tiling;
1007 get_tiling.handle = bo_gem->gem_handle;
1009 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
1011 *tiling_mode = I915_TILING_NONE;
1012 *swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
1016 *tiling_mode = get_tiling.tiling_mode;
1017 *swizzle_mode = get_tiling.swizzle_mode;
1022 drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t *name)
1024 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
1025 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1026 struct drm_gem_flink flink;
1029 if (!bo_gem->global_name) {
1030 flink.handle = bo_gem->gem_handle;
1032 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
1035 bo_gem->global_name = flink.name;
1038 *name = bo_gem->global_name;
1043 * Enables unlimited caching of buffer objects for reuse.
1045 * This is potentially very memory expensive, as the cache at each bucket
1046 * size is only bounded by how many buffers of that size we've managed to have
1047 * in flight at once.
1050 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
1052 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
1055 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
1056 bufmgr_gem->cache_bucket[i].max_entries = -1;
1061 * Return the additional aperture space required by the tree of buffer objects
1065 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
1067 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1071 if (bo == NULL || bo_gem->included_in_check_aperture)
1075 bo_gem->included_in_check_aperture = 1;
1077 for (i = 0; i < bo_gem->reloc_count; i++)
1078 total += drm_intel_gem_bo_get_aperture_space(bo_gem->reloc_target_bo[i]);
1084 * Clear the flag set by drm_intel_gem_bo_get_aperture_space() so we're ready
1085 * for the next drm_intel_bufmgr_check_aperture_space() call.
1088 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
1090 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
1093 if (bo == NULL || !bo_gem->included_in_check_aperture)
1096 bo_gem->included_in_check_aperture = 0;
1098 for (i = 0; i < bo_gem->reloc_count; i++)
1099 drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->reloc_target_bo[i]);
1103 * Return -1 if the batchbuffer should be flushed before attempting to
1104 * emit rendering referencing the buffers pointed to by bo_array.
1106 * This is required because if we try to emit a batchbuffer with relocations
1107 * to a tree of buffers that won't simultaneously fit in the aperture,
1108 * the rendering will return an error at a point where the software is not
1109 * prepared to recover from it.
1111 * However, we also want to emit the batchbuffer significantly before we reach
1112 * the limit, as a series of batchbuffers each of which references buffers
1113 * covering almost all of the aperture means that at each emit we end up
1114 * waiting to evict a buffer from the last rendering, and we get synchronous
1115 * performance. By emitting smaller batchbuffers, we eat some CPU overhead to
1116 * get better parallelism.
1119 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
1121 drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo_array[0]->bufmgr;
1122 unsigned int total = 0;
1123 unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
1126 for (i = 0; i < count; i++) {
1127 drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
1129 total += bo_gem->reloc_tree_size;
1132 if (total > threshold) {
1134 for (i = 0; i < count; i++)
1135 total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
1137 for (i = 0; i < count; i++)
1138 drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
1141 if (total > bufmgr_gem->gtt_size * 3 / 4) {
1142 DBG("check_space: overflowed available aperture, %dkb vs %dkb\n",
1143 total / 1024, (int)bufmgr_gem->gtt_size / 1024);
1146 DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024 ,
1147 (int)bufmgr_gem->gtt_size / 1024);
1153 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
1154 * and manage map buffer objections.
1156 * \param fd File descriptor of the opened DRM device.
1159 drm_intel_bufmgr_gem_init(int fd, int batch_size)
1161 drm_intel_bufmgr_gem *bufmgr_gem;
1162 struct drm_i915_gem_get_aperture aperture;
1165 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
1166 bufmgr_gem->fd = fd;
1168 if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
1173 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
1176 bufmgr_gem->gtt_size = aperture.aper_available_size;
1178 fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
1180 bufmgr_gem->gtt_size = 128 * 1024 * 1024;
1181 fprintf(stderr, "Assuming %dkB available aperture size.\n"
1182 "May lead to reduced performance or incorrect rendering.\n",
1183 (int)bufmgr_gem->gtt_size / 1024);
1186 /* Let's go with one relocation per every 2 dwords (but round down a bit
1187 * since a power of two will mean an extra page allocation for the reloc
1190 * Every 4 was too few for the blender benchmark.
1192 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
1194 bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
1195 bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
1196 bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
1197 bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
1198 bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
1199 bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
1200 bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
1201 bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
1202 bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
1203 bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
1204 bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
1205 bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
1206 bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
1207 bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
1208 bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
1209 bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
1210 bufmgr_gem->bufmgr.debug = 0;
1211 bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space;
1212 /* Initialize the linked lists for BO reuse cache. */
1213 for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++)
1214 bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
1216 return &bufmgr_gem->bufmgr;