1 /**************************************************************************
3 * Copyright © 2007 Red Hat Inc.
4 * Copyright © 2007 Intel Corporation
5 * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the
10 * "Software"), to deal in the Software without restriction, including
11 * without limitation the rights to use, copy, modify, merge, publish,
12 * distribute, sub license, and/or sell copies of the Software, and to
13 * permit persons to whom the Software is furnished to do so, subject to
14 * the following conditions:
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * The above copyright notice and this permission notice (including the
25 * next paragraph) shall be included in all copies or substantial portions
29 **************************************************************************/
31 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
32 * Keith Whitwell <keithw-at-tungstengraphics-dot-com>
33 * Eric Anholt <eric@anholt.net>
34 * Dave Airlie <airlied@linux.ie>
43 #include <sys/ioctl.h>
47 #include "dri_bufmgr.h"
48 #include "intel_bufmgr.h"
53 #define DBG(...) do { \
54 if (bufmgr_gem->bufmgr.debug) \
55 fprintf(stderr, __VA_ARGS__); \
58 typedef struct _dri_bo_gem dri_bo_gem;
60 struct dri_gem_bo_bucket {
61 dri_bo_gem *head, **tail;
63 * Limit on the number of entries in this bucket.
65 * 0 means that this caching at this bucket size is disabled.
66 * -1 means that there is no limit to caching at this size.
72 /* Arbitrarily chosen, 16 means that the maximum size we'll cache for reuse
73 * is 1 << 16 pages, or 256MB.
75 #define INTEL_GEM_BO_BUCKETS 16
76 typedef struct _dri_bufmgr_gem {
79 struct intel_bufmgr intel_bufmgr;
85 struct drm_i915_gem_exec_object *exec_objects;
90 /** Array of lists of cached gem objects of power-of-two sizes */
91 struct dri_gem_bo_bucket cache_bucket[INTEL_GEM_BO_BUCKETS];
93 struct drm_i915_gem_execbuffer exec_arg;
100 /** Boolean whether the mmap ioctl has been called for this buffer yet. */
106 * Index of the buffer within the validation list while preparing a
107 * batchbuffer execution.
112 * Boolean whether we've started swrast
113 * Set when the buffer has been mapped
114 * Cleared when the buffer is unmapped
118 /** Array passed to the DRM containing relocation information. */
119 struct drm_i915_gem_relocation_entry *relocs;
120 /** Array of bos corresponding to relocs[i].target_handle */
121 dri_bo **reloc_target_bo;
122 /** Number of entries in relocs */
124 /** Mapped address for the buffer */
145 static struct dri_gem_bo_bucket *
146 dri_gem_bo_bucket_for_size(dri_bufmgr_gem *bufmgr_gem, unsigned long size)
150 /* We only do buckets in power of two increments */
151 if ((size & (size - 1)) != 0)
154 /* We should only see sizes rounded to pages. */
155 assert((size % 4096) == 0);
157 /* We always allocate in units of pages */
158 i = ffs(size / 4096) - 1;
159 if (i >= INTEL_GEM_BO_BUCKETS)
162 return &bufmgr_gem->cache_bucket[i];
166 static void dri_gem_dump_validation_list(dri_bufmgr_gem *bufmgr_gem)
170 for (i = 0; i < bufmgr_gem->exec_count; i++) {
171 dri_bo *bo = bufmgr_gem->exec_bos[i];
172 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
174 if (bo_gem->relocs == NULL) {
175 DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
179 for (j = 0; j < bo_gem->reloc_count; j++) {
180 dri_bo *target_bo = bo_gem->reloc_target_bo[j];
181 dri_bo_gem *target_gem = (dri_bo_gem *)target_bo;
183 DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
185 bo_gem->gem_handle, bo_gem->name, bo_gem->relocs[j].offset,
186 target_gem->gem_handle, target_gem->name, target_bo->offset,
187 bo_gem->relocs[j].delta);
193 * Adds the given buffer to the list of buffers to be validated (moved into the
194 * appropriate memory type) with the next batch submission.
196 * If a buffer is validated multiple times in a batch submission, it ends up
197 * with the intersection of the memory type flags and the union of the
201 intel_add_validate_buffer(dri_bo *bo)
203 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
204 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
207 if (bo_gem->validate_index != -1)
210 /* Extend the array of validation entries as necessary. */
211 if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
212 int new_size = bufmgr_gem->exec_size * 2;
217 bufmgr_gem->exec_objects =
218 realloc(bufmgr_gem->exec_objects,
219 sizeof(*bufmgr_gem->exec_objects) * new_size);
220 bufmgr_gem->exec_bos =
221 realloc(bufmgr_gem->exec_bos,
222 sizeof(*bufmgr_gem->exec_bos) * new_size);
223 bufmgr_gem->exec_size = new_size;
226 index = bufmgr_gem->exec_count;
227 bo_gem->validate_index = index;
228 /* Fill in array entry */
229 bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
230 bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
231 bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
232 bufmgr_gem->exec_objects[index].alignment = 0;
233 bufmgr_gem->exec_objects[index].offset = 0;
234 bufmgr_gem->exec_bos[index] = bo;
235 dri_bo_reference(bo);
236 bufmgr_gem->exec_count++;
240 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
244 intel_setup_reloc_list(dri_bo *bo)
246 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
247 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
249 bo_gem->relocs = malloc(bufmgr_gem->max_relocs *
250 sizeof(struct drm_i915_gem_relocation_entry));
251 bo_gem->reloc_target_bo = malloc(bufmgr_gem->max_relocs * sizeof(dri_bo *));
257 dri_gem_bo_alloc(dri_bufmgr *bufmgr, const char *name,
258 unsigned long size, unsigned int alignment)
260 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
262 unsigned int page_size = getpagesize();
264 struct dri_gem_bo_bucket *bucket;
265 int alloc_from_cache = 0;
266 unsigned long bo_size;
268 /* Round the allocated size up to a power of two number of pages. */
269 bo_size = 1 << logbase2(size);
270 if (bo_size < page_size)
272 bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo_size);
274 /* If we don't have caching at this size, don't actually round the
277 if (bucket == NULL || bucket->max_entries == 0) {
279 if (bo_size < page_size)
283 /* Get a buffer out of the cache if available */
284 if (bucket != NULL && bucket->num_entries > 0) {
285 struct drm_i915_gem_busy busy;
287 bo_gem = bucket->head;
288 busy.handle = bo_gem->gem_handle;
290 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
291 alloc_from_cache = (ret == 0 && busy.busy == 0);
293 if (alloc_from_cache) {
294 bucket->head = bo_gem->next;
295 if (bo_gem->next == NULL)
296 bucket->tail = &bucket->head;
297 bucket->num_entries--;
301 if (!alloc_from_cache) {
302 struct drm_i915_gem_create create;
304 bo_gem = calloc(1, sizeof(*bo_gem));
308 bo_gem->bo.size = bo_size;
309 memset(&create, 0, sizeof(create));
310 create.size = bo_size;
312 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
313 bo_gem->gem_handle = create.handle;
318 bo_gem->bo.bufmgr = bufmgr;
322 bo_gem->refcount = 1;
323 bo_gem->validate_index = -1;
325 DBG("bo_create: buf %d (%s) %ldb\n",
326 bo_gem->gem_handle, bo_gem->name, size);
332 * Returns a dri_bo wrapping the given buffer object handle.
334 * This can be used when one application needs to pass a buffer object
338 intel_bo_gem_create_from_name(dri_bufmgr *bufmgr, const char *name,
341 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
344 struct drm_gem_open open_arg;
346 bo_gem = calloc(1, sizeof(*bo_gem));
350 memset(&open_arg, 0, sizeof(open_arg));
351 open_arg.name = handle;
352 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
354 fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
355 name, handle, strerror(-ret));
359 bo_gem->bo.size = open_arg.size;
360 bo_gem->bo.offset = 0;
361 bo_gem->bo.virtual = NULL;
362 bo_gem->bo.bufmgr = bufmgr;
364 bo_gem->refcount = 1;
365 bo_gem->validate_index = -1;
366 bo_gem->gem_handle = open_arg.handle;
368 DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
374 dri_gem_bo_reference(dri_bo *bo)
376 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
382 dri_gem_bo_free(dri_bo *bo)
384 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
385 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
386 struct drm_gem_close close;
390 munmap (bo_gem->virtual, bo_gem->bo.size);
392 /* Close this object */
393 close.handle = bo_gem->gem_handle;
394 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
397 "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
398 bo_gem->gem_handle, bo_gem->name, strerror(-ret));
404 dri_gem_bo_unreference(dri_bo *bo)
406 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
407 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
412 if (--bo_gem->refcount == 0) {
413 struct dri_gem_bo_bucket *bucket;
415 if (bo_gem->relocs != NULL) {
418 /* Unreference all the target buffers */
419 for (i = 0; i < bo_gem->reloc_count; i++)
420 dri_bo_unreference(bo_gem->reloc_target_bo[i]);
421 free(bo_gem->reloc_target_bo);
422 free(bo_gem->relocs);
425 DBG("bo_unreference final: %d (%s)\n",
426 bo_gem->gem_handle, bo_gem->name);
428 bucket = dri_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
429 /* Put the buffer into our internal cache for reuse if we can. */
430 if (bucket != NULL &&
431 (bucket->max_entries == -1 ||
432 (bucket->max_entries > 0 &&
433 bucket->num_entries < bucket->max_entries)))
436 bo_gem->validate_index = -1;
437 bo_gem->relocs = NULL;
438 bo_gem->reloc_target_bo = NULL;
439 bo_gem->reloc_count = 0;
442 *bucket->tail = bo_gem;
443 bucket->tail = &bo_gem->next;
444 bucket->num_entries++;
454 dri_gem_bo_map(dri_bo *bo, int write_enable)
456 dri_bufmgr_gem *bufmgr_gem;
457 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
458 struct drm_i915_gem_set_domain set_domain;
461 bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
463 /* Allow recursive mapping. Mesa may recursively map buffers with
464 * nested display loops.
466 if (!bo_gem->mapped) {
468 assert(bo->virtual == NULL);
470 DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
472 if (bo_gem->virtual == NULL) {
473 struct drm_i915_gem_mmap mmap_arg;
475 memset(&mmap_arg, 0, sizeof(mmap_arg));
476 mmap_arg.handle = bo_gem->gem_handle;
478 mmap_arg.size = bo->size;
479 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
481 fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
483 bo_gem->gem_handle, bo_gem->name, strerror(errno));
485 bo_gem->virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
487 bo->virtual = bo_gem->virtual;
490 DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name, bo_gem->virtual);
493 if (!bo_gem->swrast) {
494 set_domain.handle = bo_gem->gem_handle;
495 set_domain.read_domains = I915_GEM_DOMAIN_CPU;
497 set_domain.write_domain = I915_GEM_DOMAIN_CPU;
499 set_domain.write_domain = 0;
501 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
503 } while (ret == -1 && errno == EINTR);
505 fprintf (stderr, "%s:%d: Error setting swrast %d: %s\n",
506 __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
515 dri_gem_bo_unmap(dri_bo *bo)
517 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
518 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
519 struct drm_i915_gem_sw_finish sw_finish;
525 assert(bo_gem->mapped);
527 if (bo_gem->swrast) {
528 sw_finish.handle = bo_gem->gem_handle;
530 ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
532 } while (ret == -1 && errno == EINTR);
539 dri_gem_bo_subdata (dri_bo *bo, unsigned long offset,
540 unsigned long size, const void *data)
542 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
543 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
544 struct drm_i915_gem_pwrite pwrite;
547 memset (&pwrite, 0, sizeof (pwrite));
548 pwrite.handle = bo_gem->gem_handle;
549 pwrite.offset = offset;
551 pwrite.data_ptr = (uint64_t) (uintptr_t) data;
553 ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
554 } while (ret == -1 && errno == EINTR);
556 fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
558 bo_gem->gem_handle, (int) offset, (int) size,
565 dri_gem_bo_get_subdata (dri_bo *bo, unsigned long offset,
566 unsigned long size, void *data)
568 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
569 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
570 struct drm_i915_gem_pread pread;
573 memset (&pread, 0, sizeof (pread));
574 pread.handle = bo_gem->gem_handle;
575 pread.offset = offset;
577 pread.data_ptr = (uint64_t) (uintptr_t) data;
579 ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
580 } while (ret == -1 && errno == EINTR);
582 fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
584 bo_gem->gem_handle, (int) offset, (int) size,
591 dri_gem_bo_wait_rendering(dri_bo *bo)
593 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
594 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
595 struct drm_i915_gem_set_domain set_domain;
598 set_domain.handle = bo_gem->gem_handle;
599 set_domain.read_domains = I915_GEM_DOMAIN_GTT;
600 set_domain.write_domain = 0;
601 ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
603 fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
605 bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
611 dri_bufmgr_gem_destroy(dri_bufmgr *bufmgr)
613 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
616 free(bufmgr_gem->exec_objects);
617 free(bufmgr_gem->exec_bos);
619 /* Free any cached buffer objects we were going to reuse */
620 for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
621 struct dri_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
624 while ((bo_gem = bucket->head) != NULL) {
625 bucket->head = bo_gem->next;
626 if (bo_gem->next == NULL)
627 bucket->tail = &bucket->head;
628 bucket->num_entries--;
630 dri_gem_bo_free(&bo_gem->bo);
638 * Adds the target buffer to the validation list and adds the relocation
639 * to the reloc_buffer's relocation list.
641 * The relocation entry at the given offset must already contain the
642 * precomputed relocation value, because the kernel will optimize out
643 * the relocation entry write when the buffer hasn't moved from the
644 * last known offset in target_bo.
647 dri_gem_emit_reloc(dri_bo *bo, uint32_t read_domains, uint32_t write_domain,
648 uint32_t delta, uint32_t offset, dri_bo *target_bo)
650 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bo->bufmgr;
651 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
652 dri_bo_gem *target_bo_gem = (dri_bo_gem *)target_bo;
654 /* Create a new relocation list if needed */
655 if (bo_gem->relocs == NULL)
656 intel_setup_reloc_list(bo);
659 assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
662 assert (offset <= bo->size - 4);
663 assert ((write_domain & (write_domain-1)) == 0);
665 bo_gem->relocs[bo_gem->reloc_count].offset = offset;
666 bo_gem->relocs[bo_gem->reloc_count].delta = delta;
667 bo_gem->relocs[bo_gem->reloc_count].target_handle =
668 target_bo_gem->gem_handle;
669 bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
670 bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
671 bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
673 bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
674 dri_bo_reference(target_bo);
676 bo_gem->reloc_count++;
681 * Walk the tree of relocations rooted at BO and accumulate the list of
682 * validations to be performed and update the relocation buffers with
683 * index values into the validation list.
686 dri_gem_bo_process_reloc(dri_bo *bo)
688 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
691 if (bo_gem->relocs == NULL)
694 for (i = 0; i < bo_gem->reloc_count; i++) {
695 dri_bo *target_bo = bo_gem->reloc_target_bo[i];
697 /* Continue walking the tree depth-first. */
698 dri_gem_bo_process_reloc(target_bo);
700 /* Add the target to the validate list */
701 intel_add_validate_buffer(target_bo);
706 dri_gem_process_reloc(dri_bo *batch_buf)
708 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *) batch_buf->bufmgr;
710 /* Update indices and set up the validate list. */
711 dri_gem_bo_process_reloc(batch_buf);
713 /* Add the batch buffer to the validation list. There are no relocations
716 intel_add_validate_buffer(batch_buf);
718 bufmgr_gem->exec_arg.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
719 bufmgr_gem->exec_arg.buffer_count = bufmgr_gem->exec_count;
720 bufmgr_gem->exec_arg.batch_start_offset = 0;
721 bufmgr_gem->exec_arg.batch_len = 0; /* written in intel_exec_ioctl */
723 return &bufmgr_gem->exec_arg;
727 intel_update_buffer_offsets (dri_bufmgr_gem *bufmgr_gem)
731 for (i = 0; i < bufmgr_gem->exec_count; i++) {
732 dri_bo *bo = bufmgr_gem->exec_bos[i];
733 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
735 /* Update the buffer offset */
736 if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
737 DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
738 bo_gem->gem_handle, bo_gem->name, bo->offset,
739 bufmgr_gem->exec_objects[i].offset);
740 bo->offset = bufmgr_gem->exec_objects[i].offset;
746 dri_gem_post_submit(dri_bo *batch_buf)
748 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)batch_buf->bufmgr;
751 intel_update_buffer_offsets (bufmgr_gem);
753 if (bufmgr_gem->bufmgr.debug)
754 dri_gem_dump_validation_list(bufmgr_gem);
756 for (i = 0; i < bufmgr_gem->exec_count; i++) {
757 dri_bo *bo = bufmgr_gem->exec_bos[i];
758 dri_bo_gem *bo_gem = (dri_bo_gem *)bo;
760 /* Need to call swrast on next bo_map */
763 /* Disconnect the buffer from the validate list */
764 bo_gem->validate_index = -1;
765 dri_bo_unreference(bo);
766 bufmgr_gem->exec_bos[i] = NULL;
768 bufmgr_gem->exec_count = 0;
772 * Enables unlimited caching of buffer objects for reuse.
774 * This is potentially very memory expensive, as the cache at each bucket
775 * size is only bounded by how many buffers of that size we've managed to have
779 intel_bufmgr_gem_enable_reuse(dri_bufmgr *bufmgr)
781 dri_bufmgr_gem *bufmgr_gem = (dri_bufmgr_gem *)bufmgr;
784 for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++) {
785 bufmgr_gem->cache_bucket[i].max_entries = -1;
793 dri_gem_check_aperture_space(dri_bo *bo)
799 * Initializes the GEM buffer manager, which uses the kernel to allocate, map,
800 * and manage map buffer objections.
802 * \param fd File descriptor of the opened DRM device.
805 intel_bufmgr_gem_init(int fd, int batch_size)
807 dri_bufmgr_gem *bufmgr_gem;
810 bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
813 /* Let's go with one relocation per every 2 dwords (but round down a bit
814 * since a power of two will mean an extra page allocation for the reloc
817 * Every 4 was too few for the blender benchmark.
819 bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
821 bufmgr_gem->bufmgr.bo_alloc = dri_gem_bo_alloc;
822 bufmgr_gem->bufmgr.bo_reference = dri_gem_bo_reference;
823 bufmgr_gem->bufmgr.bo_unreference = dri_gem_bo_unreference;
824 bufmgr_gem->bufmgr.bo_map = dri_gem_bo_map;
825 bufmgr_gem->bufmgr.bo_unmap = dri_gem_bo_unmap;
826 bufmgr_gem->bufmgr.bo_subdata = dri_gem_bo_subdata;
827 bufmgr_gem->bufmgr.bo_get_subdata = dri_gem_bo_get_subdata;
828 bufmgr_gem->bufmgr.bo_wait_rendering = dri_gem_bo_wait_rendering;
829 bufmgr_gem->bufmgr.destroy = dri_bufmgr_gem_destroy;
830 bufmgr_gem->bufmgr.process_relocs = dri_gem_process_reloc;
831 bufmgr_gem->bufmgr.post_submit = dri_gem_post_submit;
832 bufmgr_gem->bufmgr.debug = 0;
833 bufmgr_gem->bufmgr.check_aperture_space = dri_gem_check_aperture_space;
834 bufmgr_gem->intel_bufmgr.emit_reloc = dri_gem_emit_reloc;
835 /* Initialize the linked lists for BO reuse cache. */
836 for (i = 0; i < INTEL_GEM_BO_BUCKETS; i++)
837 bufmgr_gem->cache_bucket[i].tail = &bufmgr_gem->cache_bucket[i].head;
839 return &bufmgr_gem->bufmgr;
843 intel_bo_emit_reloc(dri_bo *reloc_buf,
844 uint32_t read_domains, uint32_t write_domain,
845 uint32_t delta, uint32_t offset, dri_bo *target_buf)
847 struct intel_bufmgr *intel_bufmgr;
849 intel_bufmgr = (struct intel_bufmgr *)(reloc_buf->bufmgr + 1);
851 return intel_bufmgr->emit_reloc(reloc_buf, read_domains, write_domain,
852 delta, offset, target_buf);