2 * Copyright 2006 VMware, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the
7 * "Software"), to deal in the Software without restriction, including
8 * without limitation the rights to use, copy, modify, merge, publish,
9 * distribute, sublicense, and/or sell copies of the Software, and to
10 * permit persons to whom the Software is furnished to do so, subject to
11 * the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
18 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
20 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
22 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
23 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include "intel_batchbuffer.h"
27 #include "intel_buffer_objects.h"
28 #include "brw_bufmgr.h"
29 #include "intel_buffers.h"
30 #include "intel_fbo.h"
31 #include "brw_context.h"
32 #include "brw_defines.h"
33 #include "brw_state.h"
34 #include "common/gen_decoder.h"
36 #include "util/hash_table.h"
41 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
43 #define BATCH_SZ (8192*sizeof(uint32_t))
46 intel_batchbuffer_reset(struct intel_batchbuffer *batch,
47 struct brw_bufmgr *bufmgr,
51 uint_key_compare(const void *a, const void *b)
57 uint_key_hash(const void *key)
59 return (uintptr_t) key;
63 intel_batchbuffer_init(struct intel_screen *screen,
64 struct intel_batchbuffer *batch)
66 struct brw_bufmgr *bufmgr = screen->bufmgr;
67 const struct gen_device_info *devinfo = &screen->devinfo;
69 if (!devinfo->has_llc) {
70 batch->cpu_map = malloc(BATCH_SZ);
71 batch->map = batch->cpu_map;
72 batch->map_next = batch->cpu_map;
75 batch->reloc_count = 0;
76 batch->reloc_array_size = 250;
77 batch->relocs = malloc(batch->reloc_array_size *
78 sizeof(struct drm_i915_gem_relocation_entry));
79 batch->exec_count = 0;
80 batch->exec_array_size = 100;
82 malloc(batch->exec_array_size * sizeof(batch->exec_bos[0]));
83 batch->validation_list =
84 malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));
86 if (INTEL_DEBUG & DEBUG_BATCH) {
87 batch->state_batch_sizes =
88 _mesa_hash_table_create(NULL, uint_key_hash, uint_key_compare);
91 batch->use_batch_first =
92 screen->kernel_features & KERNEL_ALLOWS_EXEC_BATCH_FIRST;
94 /* PIPE_CONTROL needs a w/a but only on gen6 */
95 batch->valid_reloc_flags = EXEC_OBJECT_WRITE;
96 if (devinfo->gen == 6)
97 batch->valid_reloc_flags |= EXEC_OBJECT_NEEDS_GTT;
99 intel_batchbuffer_reset(batch, bufmgr, devinfo->has_llc);
102 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
105 add_exec_bo(struct intel_batchbuffer *batch, struct brw_bo *bo)
107 unsigned index = READ_ONCE(bo->index);
109 if (index < batch->exec_count && batch->exec_bos[index] == bo)
112 /* May have been shared between multiple active batches */
113 for (index = 0; index < batch->exec_count; index++) {
114 if (batch->exec_bos[index] == bo)
118 brw_bo_reference(bo);
120 if (batch->exec_count == batch->exec_array_size) {
121 batch->exec_array_size *= 2;
123 realloc(batch->exec_bos,
124 batch->exec_array_size * sizeof(batch->exec_bos[0]));
125 batch->validation_list =
126 realloc(batch->validation_list,
127 batch->exec_array_size * sizeof(batch->validation_list[0]));
130 batch->validation_list[batch->exec_count] =
131 (struct drm_i915_gem_exec_object2) {
132 .handle = bo->gem_handle,
133 .alignment = bo->align,
134 .offset = bo->gtt_offset,
138 bo->index = batch->exec_count;
139 batch->exec_bos[batch->exec_count] = bo;
140 batch->aperture_space += bo->size;
142 return batch->exec_count++;
146 intel_batchbuffer_reset(struct intel_batchbuffer *batch,
147 struct brw_bufmgr *bufmgr,
150 if (batch->last_bo != NULL) {
151 brw_bo_unreference(batch->last_bo);
152 batch->last_bo = NULL;
154 batch->last_bo = batch->bo;
156 batch->bo = brw_bo_alloc(bufmgr, "batchbuffer", BATCH_SZ, 4096);
158 batch->map = brw_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
160 batch->map_next = batch->map;
162 add_exec_bo(batch, batch->bo);
163 assert(batch->bo->index == 0);
165 batch->reserved_space = BATCH_RESERVED;
166 batch->state_batch_offset = batch->bo->size;
167 batch->needs_sol_reset = false;
168 batch->state_base_address_emitted = false;
170 /* We don't know what ring the new batch will be sent to until we see the
171 * first BEGIN_BATCH or BEGIN_BATCH_BLT. Mark it as unknown.
173 batch->ring = UNKNOWN_RING;
175 if (batch->state_batch_sizes)
176 _mesa_hash_table_clear(batch->state_batch_sizes, NULL);
180 intel_batchbuffer_reset_and_clear_render_cache(struct brw_context *brw)
182 const struct gen_device_info *devinfo = &brw->screen->devinfo;
184 intel_batchbuffer_reset(&brw->batch, brw->bufmgr, devinfo->has_llc);
185 brw_render_cache_set_clear(brw);
189 intel_batchbuffer_save_state(struct brw_context *brw)
191 brw->batch.saved.map_next = brw->batch.map_next;
192 brw->batch.saved.reloc_count = brw->batch.reloc_count;
193 brw->batch.saved.exec_count = brw->batch.exec_count;
197 intel_batchbuffer_reset_to_saved(struct brw_context *brw)
199 for (int i = brw->batch.saved.exec_count;
200 i < brw->batch.exec_count; i++) {
201 brw_bo_unreference(brw->batch.exec_bos[i]);
203 brw->batch.reloc_count = brw->batch.saved.reloc_count;
204 brw->batch.exec_count = brw->batch.saved.exec_count;
206 brw->batch.map_next = brw->batch.saved.map_next;
207 if (USED_BATCH(brw->batch) == 0)
208 brw->batch.ring = UNKNOWN_RING;
212 intel_batchbuffer_free(struct intel_batchbuffer *batch)
214 free(batch->cpu_map);
216 for (int i = 0; i < batch->exec_count; i++) {
217 brw_bo_unreference(batch->exec_bos[i]);
220 free(batch->exec_bos);
221 free(batch->validation_list);
223 brw_bo_unreference(batch->last_bo);
224 brw_bo_unreference(batch->bo);
225 if (batch->state_batch_sizes)
226 _mesa_hash_table_destroy(batch->state_batch_sizes, NULL);
230 intel_batchbuffer_require_space(struct brw_context *brw, GLuint sz,
231 enum brw_gpu_ring ring)
233 const struct gen_device_info *devinfo = &brw->screen->devinfo;
235 /* If we're switching rings, implicitly flush the batch. */
236 if (unlikely(ring != brw->batch.ring) && brw->batch.ring != UNKNOWN_RING &&
238 intel_batchbuffer_flush(brw);
242 assert(sz < BATCH_SZ - BATCH_RESERVED);
244 if (intel_batchbuffer_space(&brw->batch) < sz)
245 intel_batchbuffer_flush(brw);
247 /* The intel_batchbuffer_flush() calls above might have changed
248 * brw->batch.ring to UNKNOWN_RING, so we need to set it here at the end.
250 brw->batch.ring = ring;
255 #define BLUE_HEADER CSI "0;44m"
256 #define NORMAL CSI "0m"
260 decode_struct(struct brw_context *brw, struct gen_spec *spec,
261 const char *struct_name, uint32_t *data,
262 uint32_t gtt_offset, uint32_t offset, bool color)
264 struct gen_group *group = gen_spec_find_struct(spec, struct_name);
268 fprintf(stderr, "%s\n", struct_name);
269 gen_print_group(stderr, group, gtt_offset + offset,
270 &data[offset / 4], color);
274 decode_structs(struct brw_context *brw, struct gen_spec *spec,
275 const char *struct_name,
276 uint32_t *data, uint32_t gtt_offset, uint32_t offset,
277 int struct_size, bool color)
279 struct gen_group *group = gen_spec_find_struct(spec, struct_name);
283 int entries = brw_state_batch_size(brw, offset) / struct_size;
284 for (int i = 0; i < entries; i++) {
285 fprintf(stderr, "%s %d\n", struct_name, i);
286 gen_print_group(stderr, group, gtt_offset + offset,
287 &data[(offset + i * struct_size) / 4], color);
292 do_batch_dump(struct brw_context *brw)
294 const struct gen_device_info *devinfo = &brw->screen->devinfo;
295 struct intel_batchbuffer *batch = &brw->batch;
296 struct gen_spec *spec = gen_spec_load(&brw->screen->devinfo);
298 if (batch->ring != RENDER_RING)
301 void *map = brw_bo_map(brw, batch->bo, MAP_READ);
304 "WARNING: failed to map batchbuffer, "
305 "dumping uploaded data instead.\n");
308 uint32_t *data = map ? map : batch->map;
309 uint32_t *end = data + USED_BATCH(*batch);
310 uint32_t gtt_offset = map ? batch->bo->gtt_offset : 0;
313 bool color = INTEL_DEBUG & DEBUG_COLOR;
314 const char *header_color = color ? BLUE_HEADER : "";
315 const char *reset_color = color ? NORMAL : "";
317 for (uint32_t *p = data; p < end; p += length) {
318 struct gen_group *inst = gen_spec_find_instruction(spec, p);
319 length = gen_group_get_length(inst, p);
320 assert(inst == NULL || length > 0);
321 length = MAX2(1, length);
323 fprintf(stderr, "unknown instruction %08x\n", p[0]);
327 uint64_t offset = gtt_offset + 4 * (p - data);
329 fprintf(stderr, "%s0x%08"PRIx64": 0x%08x: %-80s%s\n", header_color,
330 offset, p[0], gen_group_get_name(inst), reset_color);
332 gen_print_group(stderr, inst, offset, p, color);
334 switch (gen_group_get_opcode(inst) >> 16) {
335 case _3DSTATE_PIPELINED_POINTERS:
336 /* Note: these Gen4-5 pointers are full relocations rather than
337 * offsets from the start of the batch. So we need to subtract
338 * gtt_offset (the start of the batch) to obtain an offset we
339 * can add to the map and get at the data.
341 decode_struct(brw, spec, "VS_STATE", data, gtt_offset,
342 (p[1] & ~0x1fu) - gtt_offset, color);
344 decode_struct(brw, spec, "GS_STATE", data, gtt_offset,
345 (p[2] & ~0x1fu) - gtt_offset, color);
348 decode_struct(brw, spec, "CLIP_STATE", data, gtt_offset,
349 (p[3] & ~0x1fu) - gtt_offset, color);
351 decode_struct(brw, spec, "SF_STATE", data, gtt_offset,
352 (p[4] & ~0x1fu) - gtt_offset, color);
353 decode_struct(brw, spec, "WM_STATE", data, gtt_offset,
354 (p[5] & ~0x1fu) - gtt_offset, color);
355 decode_struct(brw, spec, "COLOR_CALC_STATE", data, gtt_offset,
356 (p[6] & ~0x3fu) - gtt_offset, color);
358 case _3DSTATE_BINDING_TABLE_POINTERS_VS:
359 case _3DSTATE_BINDING_TABLE_POINTERS_HS:
360 case _3DSTATE_BINDING_TABLE_POINTERS_DS:
361 case _3DSTATE_BINDING_TABLE_POINTERS_GS:
362 case _3DSTATE_BINDING_TABLE_POINTERS_PS: {
363 struct gen_group *group =
364 gen_spec_find_struct(spec, "RENDER_SURFACE_STATE");
368 uint32_t bt_offset = p[1] & ~0x1fu;
369 int bt_entries = brw_state_batch_size(brw, bt_offset) / 4;
370 uint32_t *bt_pointers = &data[bt_offset / 4];
371 for (int i = 0; i < bt_entries; i++) {
372 fprintf(stderr, "SURFACE_STATE - BTI = %d\n", i);
373 gen_print_group(stderr, group, gtt_offset + bt_pointers[i],
374 &data[bt_pointers[i] / 4], color);
378 case _3DSTATE_SAMPLER_STATE_POINTERS_VS:
379 case _3DSTATE_SAMPLER_STATE_POINTERS_HS:
380 case _3DSTATE_SAMPLER_STATE_POINTERS_DS:
381 case _3DSTATE_SAMPLER_STATE_POINTERS_GS:
382 case _3DSTATE_SAMPLER_STATE_POINTERS_PS:
383 decode_structs(brw, spec, "SAMPLER_STATE", data,
384 gtt_offset, p[1] & ~0x1fu, 4 * 4, color);
386 case _3DSTATE_VIEWPORT_STATE_POINTERS:
387 decode_structs(brw, spec, "CLIP_VIEWPORT", data,
388 gtt_offset, p[1] & ~0x3fu, 4 * 4, color);
389 decode_structs(brw, spec, "SF_VIEWPORT", data,
390 gtt_offset, p[1] & ~0x3fu, 8 * 4, color);
391 decode_structs(brw, spec, "CC_VIEWPORT", data,
392 gtt_offset, p[3] & ~0x3fu, 2 * 4, color);
394 case _3DSTATE_VIEWPORT_STATE_POINTERS_CC:
395 decode_structs(brw, spec, "CC_VIEWPORT", data,
396 gtt_offset, p[1] & ~0x3fu, 2 * 4, color);
398 case _3DSTATE_VIEWPORT_STATE_POINTERS_SF_CL:
399 decode_structs(brw, spec, "SF_CLIP_VIEWPORT", data,
400 gtt_offset, p[1] & ~0x3fu, 16 * 4, color);
402 case _3DSTATE_SCISSOR_STATE_POINTERS:
403 decode_structs(brw, spec, "SCISSOR_RECT", data,
404 gtt_offset, p[1] & ~0x1fu, 2 * 4, color);
406 case _3DSTATE_BLEND_STATE_POINTERS:
407 /* TODO: handle Gen8+ extra dword at the beginning */
408 decode_structs(brw, spec, "BLEND_STATE", data,
409 gtt_offset, p[1] & ~0x3fu, 8 * 4, color);
411 case _3DSTATE_CC_STATE_POINTERS:
412 if (devinfo->gen >= 7) {
413 decode_struct(brw, spec, "COLOR_CALC_STATE", data,
414 gtt_offset, p[1] & ~0x3fu, color);
415 } else if (devinfo->gen == 6) {
416 decode_structs(brw, spec, "BLEND_STATE", data,
417 gtt_offset, p[1] & ~0x3fu, 2 * 4, color);
418 decode_struct(brw, spec, "DEPTH_STENCIL_STATE", data,
419 gtt_offset, p[2] & ~0x3fu, color);
420 decode_struct(brw, spec, "COLOR_CALC_STATE", data,
421 gtt_offset, p[3] & ~0x3fu, color);
424 case _3DSTATE_DEPTH_STENCIL_STATE_POINTERS:
425 decode_struct(brw, spec, "DEPTH_STENCIL_STATE", data,
426 gtt_offset, p[1] & ~0x3fu, color);
432 brw_bo_unmap(batch->bo);
436 static void do_batch_dump(struct brw_context *brw) { }
440 * Called when starting a new batch buffer.
443 brw_new_batch(struct brw_context *brw)
445 /* Unreference any BOs held by the previous batch, and reset counts. */
446 for (int i = 0; i < brw->batch.exec_count; i++) {
447 brw_bo_unreference(brw->batch.exec_bos[i]);
448 brw->batch.exec_bos[i] = NULL;
450 brw->batch.reloc_count = 0;
451 brw->batch.exec_count = 0;
452 brw->batch.aperture_space = 0;
454 /* Create a new batchbuffer and reset the associated state: */
455 intel_batchbuffer_reset_and_clear_render_cache(brw);
457 /* If the kernel supports hardware contexts, then most hardware state is
458 * preserved between batches; we only need to re-emit state that is required
459 * to be in every batch. Otherwise we need to re-emit all the state that
460 * would otherwise be stored in the context (which for all intents and
461 * purposes means everything).
463 if (brw->hw_ctx == 0)
464 brw->ctx.NewDriverState |= BRW_NEW_CONTEXT;
466 brw->ctx.NewDriverState |= BRW_NEW_BATCH;
468 brw->ib.index_size = -1;
470 /* We need to periodically reap the shader time results, because rollover
471 * happens every few seconds. We also want to see results every once in a
472 * while, because many programs won't cleanly destroy our context, so the
473 * end-of-run printout may not happen.
475 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
476 brw_collect_and_report_shader_time(brw);
480 * Called from intel_batchbuffer_flush before emitting MI_BATCHBUFFER_END and
483 * This function can emit state (say, to preserve registers that aren't saved
484 * between batches). All of this state MUST fit in the reserved space at the
485 * end of the batchbuffer. If you add more GPU state, increase the reserved
486 * space by updating the BATCH_RESERVED macro.
489 brw_finish_batch(struct brw_context *brw)
491 const struct gen_device_info *devinfo = &brw->screen->devinfo;
493 /* Capture the closing pipeline statistics register values necessary to
494 * support query objects (in the non-hardware context world).
496 brw_emit_query_end(brw);
498 if (brw->batch.ring == RENDER_RING) {
499 /* Work around L3 state leaks into contexts set MI_RESTORE_INHIBIT which
500 * assume that the L3 cache is configured according to the hardware
503 if (devinfo->gen >= 7)
504 gen7_restore_default_l3_config(brw);
506 if (devinfo->is_haswell) {
507 /* From the Haswell PRM, Volume 2b, Command Reference: Instructions,
508 * 3DSTATE_CC_STATE_POINTERS > "Note":
510 * "SW must program 3DSTATE_CC_STATE_POINTERS command at the end of every
511 * 3D batch buffer followed by a PIPE_CONTROL with RC flush and CS stall."
513 * From the example in the docs, it seems to expect a regular pipe control
514 * flush here as well. We may have done it already, but meh.
516 * See also WaAvoidRCZCounterRollover.
518 brw_emit_mi_flush(brw);
520 OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (2 - 2));
521 OUT_BATCH(brw->cc.state_offset | 1);
523 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_RENDER_TARGET_FLUSH |
524 PIPE_CONTROL_CS_STALL);
530 throttle(struct brw_context *brw)
532 /* Wait for the swapbuffers before the one we just emitted, so we
533 * don't get too many swaps outstanding for apps that are GPU-heavy
536 * We're using intelDRI2Flush (called from the loader before
537 * swapbuffer) and glFlush (for front buffer rendering) as the
538 * indicator that a frame is done and then throttle when we get
539 * here as we prepare to render the next frame. At this point for
540 * round trips for swap/copy and getting new buffers are done and
541 * we'll spend less time waiting on the GPU.
543 * Unfortunately, we don't have a handle to the batch containing
544 * the swap, and getting our hands on that doesn't seem worth it,
545 * so we just use the first batch we emitted after the last swap.
547 if (brw->need_swap_throttle && brw->throttle_batch[0]) {
548 if (brw->throttle_batch[1]) {
549 if (!brw->disable_throttling) {
550 /* Pass NULL rather than brw so we avoid perf_debug warnings;
551 * stalling is common and expected here...
553 brw_bo_wait_rendering(brw->throttle_batch[1]);
555 brw_bo_unreference(brw->throttle_batch[1]);
557 brw->throttle_batch[1] = brw->throttle_batch[0];
558 brw->throttle_batch[0] = NULL;
559 brw->need_swap_throttle = false;
560 /* Throttling here is more precise than the throttle ioctl, so skip it */
561 brw->need_flush_throttle = false;
564 if (brw->need_flush_throttle) {
565 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
566 drmCommandNone(dri_screen->fd, DRM_I915_GEM_THROTTLE);
567 brw->need_flush_throttle = false;
573 struct intel_batchbuffer *batch,
580 struct drm_i915_gem_execbuffer2 execbuf = {
581 .buffers_ptr = (uintptr_t) batch->validation_list,
582 .buffer_count = batch->exec_count,
583 .batch_start_offset = 0,
586 .rsvd1 = ctx_id, /* rsvd1 is actually the context ID */
589 unsigned long cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2;
591 if (in_fence != -1) {
592 execbuf.rsvd2 = in_fence;
593 execbuf.flags |= I915_EXEC_FENCE_IN;
596 if (out_fence != NULL) {
597 cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2_WR;
599 execbuf.flags |= I915_EXEC_FENCE_OUT;
602 int ret = drmIoctl(fd, cmd, &execbuf);
606 for (int i = 0; i < batch->exec_count; i++) {
607 struct brw_bo *bo = batch->exec_bos[i];
612 /* Update brw_bo::gtt_offset */
613 if (batch->validation_list[i].offset != bo->gtt_offset) {
614 DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n",
615 bo->gem_handle, bo->gtt_offset,
616 batch->validation_list[i].offset);
617 bo->gtt_offset = batch->validation_list[i].offset;
621 if (ret == 0 && out_fence != NULL)
622 *out_fence = execbuf.rsvd2 >> 32;
628 do_flush_locked(struct brw_context *brw, int in_fence_fd, int *out_fence_fd)
630 const struct gen_device_info *devinfo = &brw->screen->devinfo;
631 __DRIscreen *dri_screen = brw->screen->driScrnPriv;
632 struct intel_batchbuffer *batch = &brw->batch;
635 if (devinfo->has_llc) {
636 brw_bo_unmap(batch->bo);
638 ret = brw_bo_subdata(batch->bo, 0, 4 * USED_BATCH(*batch), batch->map);
639 if (ret == 0 && batch->state_batch_offset != batch->bo->size) {
640 ret = brw_bo_subdata(batch->bo,
641 batch->state_batch_offset,
642 batch->bo->size - batch->state_batch_offset,
643 (char *)batch->map + batch->state_batch_offset);
647 if (!brw->screen->no_hw) {
648 /* The requirement for using I915_EXEC_NO_RELOC are:
650 * The addresses written in the objects must match the corresponding
651 * reloc.gtt_offset which in turn must match the corresponding
654 * Any render targets written to in the batch must be flagged with
657 * To avoid stalling, execobject.offset should match the current
658 * address of that object within the active context.
660 int flags = I915_EXEC_NO_RELOC;
662 if (devinfo->gen >= 6 && batch->ring == BLT_RING) {
663 flags |= I915_EXEC_BLT;
665 flags |= I915_EXEC_RENDER;
667 if (batch->needs_sol_reset)
668 flags |= I915_EXEC_GEN7_SOL_RESET;
671 uint32_t hw_ctx = batch->ring == RENDER_RING ? brw->hw_ctx : 0;
673 struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[0];
674 assert(entry->handle == batch->bo->gem_handle);
675 entry->relocation_count = batch->reloc_count;
676 entry->relocs_ptr = (uintptr_t) batch->relocs;
678 if (batch->use_batch_first) {
679 flags |= I915_EXEC_BATCH_FIRST | I915_EXEC_HANDLE_LUT;
681 /* Move the batch to the end of the validation list */
682 struct drm_i915_gem_exec_object2 tmp;
683 const unsigned index = batch->exec_count - 1;
686 *entry = batch->validation_list[index];
687 batch->validation_list[index] = tmp;
690 ret = execbuffer(dri_screen->fd, batch, hw_ctx,
691 4 * USED_BATCH(*batch),
692 in_fence_fd, out_fence_fd, flags);
698 if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
701 if (brw->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
702 brw_check_for_reset(brw);
705 fprintf(stderr, "intel_do_flush_locked failed: %s\n", strerror(-ret));
713 * The in_fence_fd is ignored if -1. Otherwise this function takes ownership
716 * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
717 * of the returned fd.
720 _intel_batchbuffer_flush_fence(struct brw_context *brw,
721 int in_fence_fd, int *out_fence_fd,
722 const char *file, int line)
726 if (USED_BATCH(brw->batch) == 0)
729 if (brw->throttle_batch[0] == NULL) {
730 brw->throttle_batch[0] = brw->batch.bo;
731 brw_bo_reference(brw->throttle_batch[0]);
734 if (unlikely(INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT))) {
735 int bytes_for_commands = 4 * USED_BATCH(brw->batch);
736 int bytes_for_state = brw->batch.bo->size - brw->batch.state_batch_offset;
737 int total_bytes = bytes_for_commands + bytes_for_state;
738 fprintf(stderr, "%s:%d: Batchbuffer flush with %4db (pkt) + "
739 "%4db (state) = %4db (%0.1f%%)\n", file, line,
740 bytes_for_commands, bytes_for_state,
742 100.0f * total_bytes / BATCH_SZ);
745 brw->batch.reserved_space = 0;
747 brw_finish_batch(brw);
749 /* Mark the end of the buffer. */
750 intel_batchbuffer_emit_dword(&brw->batch, MI_BATCH_BUFFER_END);
751 if (USED_BATCH(brw->batch) & 1) {
752 /* Round batchbuffer usage to 2 DWORDs. */
753 intel_batchbuffer_emit_dword(&brw->batch, MI_NOOP);
756 intel_upload_finish(brw);
758 /* Check that we didn't just wrap our batchbuffer at a bad time. */
759 assert(!brw->no_batch_wrap);
761 ret = do_flush_locked(brw, in_fence_fd, out_fence_fd);
763 if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
764 fprintf(stderr, "waiting for idle\n");
765 brw_bo_wait_rendering(brw->batch.bo);
768 /* Start a new batch buffer. */
775 brw_batch_has_aperture_space(struct brw_context *brw, unsigned extra_space)
777 return brw->batch.aperture_space + extra_space <=
778 brw->screen->aperture_threshold;
782 brw_batch_references(struct intel_batchbuffer *batch, struct brw_bo *bo)
784 unsigned index = READ_ONCE(bo->index);
785 if (index < batch->exec_count && batch->exec_bos[index] == bo)
788 for (int i = 0; i < batch->exec_count; i++) {
789 if (batch->exec_bos[i] == bo)
795 /* This is the only way buffers get added to the validate list.
798 brw_emit_reloc(struct intel_batchbuffer *batch, uint32_t batch_offset,
799 struct brw_bo *target, uint32_t target_offset,
800 unsigned int reloc_flags)
802 assert(target != NULL);
804 if (batch->reloc_count == batch->reloc_array_size) {
805 batch->reloc_array_size *= 2;
806 batch->relocs = realloc(batch->relocs,
807 batch->reloc_array_size *
808 sizeof(struct drm_i915_gem_relocation_entry));
812 assert(batch_offset <= BATCH_SZ - sizeof(uint32_t));
814 unsigned int index = add_exec_bo(batch, target);
815 struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
818 entry->flags |= reloc_flags & batch->valid_reloc_flags;
820 batch->relocs[batch->reloc_count++] =
821 (struct drm_i915_gem_relocation_entry) {
822 .offset = batch_offset,
823 .delta = target_offset,
824 .target_handle = batch->use_batch_first ? index : target->gem_handle,
825 .presumed_offset = entry->offset,
828 /* Using the old buffer offset, write in what the right data would be, in
829 * case the buffer doesn't move and we can short-circuit the relocation
830 * processing in the kernel
832 return entry->offset + target_offset;
836 intel_batchbuffer_data(struct brw_context *brw,
837 const void *data, GLuint bytes, enum brw_gpu_ring ring)
839 assert((bytes & 3) == 0);
840 intel_batchbuffer_require_space(brw, bytes, ring);
841 memcpy(brw->batch.map_next, data, bytes);
842 brw->batch.map_next += bytes >> 2;
846 load_sized_register_mem(struct brw_context *brw,
852 const struct gen_device_info *devinfo = &brw->screen->devinfo;
855 /* MI_LOAD_REGISTER_MEM only exists on Gen7+. */
856 assert(devinfo->gen >= 7);
858 if (devinfo->gen >= 8) {
859 BEGIN_BATCH(4 * size);
860 for (i = 0; i < size; i++) {
861 OUT_BATCH(GEN7_MI_LOAD_REGISTER_MEM | (4 - 2));
862 OUT_BATCH(reg + i * 4);
863 OUT_RELOC64(bo, 0, offset + i * 4);
867 BEGIN_BATCH(3 * size);
868 for (i = 0; i < size; i++) {
869 OUT_BATCH(GEN7_MI_LOAD_REGISTER_MEM | (3 - 2));
870 OUT_BATCH(reg + i * 4);
871 OUT_RELOC(bo, 0, offset + i * 4);
878 brw_load_register_mem(struct brw_context *brw,
883 load_sized_register_mem(brw, reg, bo, offset, 1);
887 brw_load_register_mem64(struct brw_context *brw,
892 load_sized_register_mem(brw, reg, bo, offset, 2);
896 * Write an arbitrary 32-bit register to a buffer via MI_STORE_REGISTER_MEM.
899 brw_store_register_mem32(struct brw_context *brw,
900 struct brw_bo *bo, uint32_t reg, uint32_t offset)
902 const struct gen_device_info *devinfo = &brw->screen->devinfo;
904 assert(devinfo->gen >= 6);
906 if (devinfo->gen >= 8) {
908 OUT_BATCH(MI_STORE_REGISTER_MEM | (4 - 2));
910 OUT_RELOC64(bo, RELOC_WRITE, offset);
914 OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
916 OUT_RELOC(bo, RELOC_WRITE | RELOC_NEEDS_GGTT, offset);
922 * Write an arbitrary 64-bit register to a buffer via MI_STORE_REGISTER_MEM.
925 brw_store_register_mem64(struct brw_context *brw,
926 struct brw_bo *bo, uint32_t reg, uint32_t offset)
928 const struct gen_device_info *devinfo = &brw->screen->devinfo;
930 assert(devinfo->gen >= 6);
932 /* MI_STORE_REGISTER_MEM only stores a single 32-bit value, so to
933 * read a full 64-bit register, we need to do two of them.
935 if (devinfo->gen >= 8) {
937 OUT_BATCH(MI_STORE_REGISTER_MEM | (4 - 2));
939 OUT_RELOC64(bo, RELOC_WRITE, offset);
940 OUT_BATCH(MI_STORE_REGISTER_MEM | (4 - 2));
941 OUT_BATCH(reg + sizeof(uint32_t));
942 OUT_RELOC64(bo, RELOC_WRITE, offset + sizeof(uint32_t));
946 OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
948 OUT_RELOC(bo, RELOC_WRITE | RELOC_NEEDS_GGTT, offset);
949 OUT_BATCH(MI_STORE_REGISTER_MEM | (3 - 2));
950 OUT_BATCH(reg + sizeof(uint32_t));
951 OUT_RELOC(bo, RELOC_WRITE | RELOC_NEEDS_GGTT, offset + sizeof(uint32_t));
957 * Write a 32-bit register using immediate data.
960 brw_load_register_imm32(struct brw_context *brw, uint32_t reg, uint32_t imm)
962 const struct gen_device_info *devinfo = &brw->screen->devinfo;
964 assert(devinfo->gen >= 6);
967 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
974 * Write a 64-bit register using immediate data.
977 brw_load_register_imm64(struct brw_context *brw, uint32_t reg, uint64_t imm)
979 const struct gen_device_info *devinfo = &brw->screen->devinfo;
981 assert(devinfo->gen >= 6);
984 OUT_BATCH(MI_LOAD_REGISTER_IMM | (5 - 2));
986 OUT_BATCH(imm & 0xffffffff);
988 OUT_BATCH(imm >> 32);
993 * Copies a 32-bit register.
996 brw_load_register_reg(struct brw_context *brw, uint32_t src, uint32_t dest)
998 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1000 assert(devinfo->gen >= 8 || devinfo->is_haswell);
1003 OUT_BATCH(MI_LOAD_REGISTER_REG | (3 - 2));
1010 * Copies a 64-bit register.
1013 brw_load_register_reg64(struct brw_context *brw, uint32_t src, uint32_t dest)
1015 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1017 assert(devinfo->gen >= 8 || devinfo->is_haswell);
1020 OUT_BATCH(MI_LOAD_REGISTER_REG | (3 - 2));
1023 OUT_BATCH(MI_LOAD_REGISTER_REG | (3 - 2));
1024 OUT_BATCH(src + sizeof(uint32_t));
1025 OUT_BATCH(dest + sizeof(uint32_t));
1030 * Write 32-bits of immediate data to a GPU memory buffer.
1033 brw_store_data_imm32(struct brw_context *brw, struct brw_bo *bo,
1034 uint32_t offset, uint32_t imm)
1036 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1038 assert(devinfo->gen >= 6);
1041 OUT_BATCH(MI_STORE_DATA_IMM | (4 - 2));
1042 if (devinfo->gen >= 8)
1043 OUT_RELOC64(bo, RELOC_WRITE, offset);
1045 OUT_BATCH(0); /* MBZ */
1046 OUT_RELOC(bo, RELOC_WRITE, offset);
1053 * Write 64-bits of immediate data to a GPU memory buffer.
1056 brw_store_data_imm64(struct brw_context *brw, struct brw_bo *bo,
1057 uint32_t offset, uint64_t imm)
1059 const struct gen_device_info *devinfo = &brw->screen->devinfo;
1061 assert(devinfo->gen >= 6);
1064 OUT_BATCH(MI_STORE_DATA_IMM | (5 - 2));
1065 if (devinfo->gen >= 8)
1066 OUT_RELOC64(bo, 0, offset);
1068 OUT_BATCH(0); /* MBZ */
1069 OUT_RELOC(bo, RELOC_WRITE, offset);
1071 OUT_BATCH(imm & 0xffffffffu);
1072 OUT_BATCH(imm >> 32);