2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 #include "vk_format_info.h"
34 /** \file anv_cmd_buffer.c
36 * This file contains all of the stuff for emitting commands into a command
37 * buffer. This includes implementations of most of the vkCmd*
38 * entrypoints. This file is concerned entirely with state emission and
39 * not with the command buffer data structure itself. As far as this file
40 * is concerned, most of anv_cmd_buffer is magic.
43 /* TODO: These are taken from GLES. We should check the Vulkan spec */
44 const struct anv_dynamic_state default_dynamic_state = {
57 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
62 .stencil_compare_mask = {
66 .stencil_write_mask = {
70 .stencil_reference = {
77 anv_dynamic_state_copy(struct anv_dynamic_state *dest,
78 const struct anv_dynamic_state *src,
81 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
82 dest->viewport.count = src->viewport.count;
83 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
87 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
88 dest->scissor.count = src->scissor.count;
89 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
93 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
94 dest->line_width = src->line_width;
96 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
97 dest->depth_bias = src->depth_bias;
99 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
100 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
102 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
103 dest->depth_bounds = src->depth_bounds;
105 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
106 dest->stencil_compare_mask = src->stencil_compare_mask;
108 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
109 dest->stencil_write_mask = src->stencil_write_mask;
111 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
112 dest->stencil_reference = src->stencil_reference;
116 anv_cmd_state_init(struct anv_cmd_buffer *cmd_buffer)
118 struct anv_cmd_state *state = &cmd_buffer->state;
120 memset(state, 0, sizeof(*state));
122 state->current_pipeline = UINT32_MAX;
123 state->restart_index = UINT32_MAX;
124 state->gfx.dynamic = default_dynamic_state;
128 anv_cmd_pipeline_state_finish(struct anv_cmd_buffer *cmd_buffer,
129 struct anv_cmd_pipeline_state *pipe_state)
131 for (uint32_t i = 0; i < ARRAY_SIZE(pipe_state->push_descriptors); i++) {
132 if (pipe_state->push_descriptors[i]) {
133 anv_descriptor_set_layout_unref(cmd_buffer->device,
134 pipe_state->push_descriptors[i]->set.layout);
135 vk_free(&cmd_buffer->pool->alloc, pipe_state->push_descriptors[i]);
141 anv_cmd_state_finish(struct anv_cmd_buffer *cmd_buffer)
143 struct anv_cmd_state *state = &cmd_buffer->state;
145 anv_cmd_pipeline_state_finish(cmd_buffer, &state->gfx.base);
146 anv_cmd_pipeline_state_finish(cmd_buffer, &state->compute.base);
148 for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++)
149 vk_free(&cmd_buffer->pool->alloc, state->push_constants[i]);
151 vk_free(&cmd_buffer->pool->alloc, state->attachments);
155 anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
157 anv_cmd_state_finish(cmd_buffer);
158 anv_cmd_state_init(cmd_buffer);
162 * This function updates the size of the push constant buffer we need to emit.
163 * This is called in various parts of the driver to ensure that different
164 * pieces of push constant data get emitted as needed. However, it is important
165 * that we never shrink the size of the buffer. For example, a compute shader
166 * dispatch will always call this for the base group id, which has an
167 * offset in the push constant buffer that is smaller than the offset for
168 * storage image data. If the compute shader has storage images, we will call
169 * this again with a larger size during binding table emission. However,
170 * if we dispatch the compute shader again without dirtying our descriptors,
171 * we would still call this function with a smaller size for the base group
172 * id, and not for the images, which would incorrectly shrink the size of the
173 * push constant data we emit with that dispatch, making us drop the image data.
176 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
177 gl_shader_stage stage, uint32_t size)
179 struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
182 *ptr = vk_alloc(&cmd_buffer->pool->alloc, size, 8,
183 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
185 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
186 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
189 } else if ((*ptr)->size < size) {
190 *ptr = vk_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
191 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
193 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
194 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
202 static VkResult anv_create_cmd_buffer(
203 struct anv_device * device,
204 struct anv_cmd_pool * pool,
205 VkCommandBufferLevel level,
206 VkCommandBuffer* pCommandBuffer)
208 struct anv_cmd_buffer *cmd_buffer;
211 cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
212 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
213 if (cmd_buffer == NULL)
214 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
216 cmd_buffer->batch.status = VK_SUCCESS;
218 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
219 cmd_buffer->device = device;
220 cmd_buffer->pool = pool;
221 cmd_buffer->level = level;
223 result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
224 if (result != VK_SUCCESS)
227 anv_state_stream_init(&cmd_buffer->surface_state_stream,
228 &device->surface_state_pool, 4096);
229 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
230 &device->dynamic_state_pool, 16384);
232 anv_cmd_state_init(cmd_buffer);
235 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
237 /* Init the pool_link so we can safefly call list_del when we destroy
240 list_inithead(&cmd_buffer->pool_link);
243 *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
248 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
253 VkResult anv_AllocateCommandBuffers(
255 const VkCommandBufferAllocateInfo* pAllocateInfo,
256 VkCommandBuffer* pCommandBuffers)
258 ANV_FROM_HANDLE(anv_device, device, _device);
259 ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
261 VkResult result = VK_SUCCESS;
264 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
265 result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
266 &pCommandBuffers[i]);
267 if (result != VK_SUCCESS)
271 if (result != VK_SUCCESS) {
272 anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
274 for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
275 pCommandBuffers[i] = VK_NULL_HANDLE;
282 anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
284 list_del(&cmd_buffer->pool_link);
286 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
288 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
289 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
291 anv_cmd_state_finish(cmd_buffer);
293 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
296 void anv_FreeCommandBuffers(
298 VkCommandPool commandPool,
299 uint32_t commandBufferCount,
300 const VkCommandBuffer* pCommandBuffers)
302 for (uint32_t i = 0; i < commandBufferCount; i++) {
303 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
308 anv_cmd_buffer_destroy(cmd_buffer);
313 anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
315 cmd_buffer->usage_flags = 0;
316 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
317 anv_cmd_state_reset(cmd_buffer);
319 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
320 anv_state_stream_init(&cmd_buffer->surface_state_stream,
321 &cmd_buffer->device->surface_state_pool, 4096);
323 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
324 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
325 &cmd_buffer->device->dynamic_state_pool, 16384);
329 VkResult anv_ResetCommandBuffer(
330 VkCommandBuffer commandBuffer,
331 VkCommandBufferResetFlags flags)
333 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
334 return anv_cmd_buffer_reset(cmd_buffer);
337 #define anv_genX_call(devinfo, func, ...) \
338 switch ((devinfo)->gen) { \
340 if ((devinfo)->is_haswell) { \
341 gen75_##func(__VA_ARGS__); \
343 gen7_##func(__VA_ARGS__); \
347 gen8_##func(__VA_ARGS__); \
350 gen9_##func(__VA_ARGS__); \
353 gen10_##func(__VA_ARGS__); \
356 gen11_##func(__VA_ARGS__); \
359 assert(!"Unknown hardware generation"); \
363 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
365 anv_genX_call(&cmd_buffer->device->info,
366 cmd_buffer_emit_state_base_address,
371 anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
372 const struct anv_image *image,
373 VkImageAspectFlagBits aspect,
374 enum isl_aux_usage aux_usage,
377 uint32_t layer_count)
379 anv_genX_call(&cmd_buffer->device->info,
380 cmd_buffer_mark_image_written,
381 cmd_buffer, image, aspect, aux_usage,
382 level, base_layer, layer_count);
386 anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer)
388 anv_genX_call(&cmd_buffer->device->info,
389 cmd_emit_conditional_render_predicate,
393 void anv_CmdBindPipeline(
394 VkCommandBuffer commandBuffer,
395 VkPipelineBindPoint pipelineBindPoint,
396 VkPipeline _pipeline)
398 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
399 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
401 switch (pipelineBindPoint) {
402 case VK_PIPELINE_BIND_POINT_COMPUTE:
403 cmd_buffer->state.compute.base.pipeline = pipeline;
404 cmd_buffer->state.compute.pipeline_dirty = true;
405 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
406 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
409 case VK_PIPELINE_BIND_POINT_GRAPHICS:
410 cmd_buffer->state.gfx.base.pipeline = pipeline;
411 cmd_buffer->state.gfx.vb_dirty |= pipeline->vb_used;
412 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
413 cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
414 cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
416 /* Apply the dynamic state from the pipeline */
417 cmd_buffer->state.gfx.dirty |= pipeline->dynamic_state_mask;
418 anv_dynamic_state_copy(&cmd_buffer->state.gfx.dynamic,
419 &pipeline->dynamic_state,
420 pipeline->dynamic_state_mask);
424 assert(!"invalid bind point");
429 void anv_CmdSetViewport(
430 VkCommandBuffer commandBuffer,
431 uint32_t firstViewport,
432 uint32_t viewportCount,
433 const VkViewport* pViewports)
435 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
437 const uint32_t total_count = firstViewport + viewportCount;
438 if (cmd_buffer->state.gfx.dynamic.viewport.count < total_count)
439 cmd_buffer->state.gfx.dynamic.viewport.count = total_count;
441 memcpy(cmd_buffer->state.gfx.dynamic.viewport.viewports + firstViewport,
442 pViewports, viewportCount * sizeof(*pViewports));
444 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
447 void anv_CmdSetScissor(
448 VkCommandBuffer commandBuffer,
449 uint32_t firstScissor,
450 uint32_t scissorCount,
451 const VkRect2D* pScissors)
453 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
455 const uint32_t total_count = firstScissor + scissorCount;
456 if (cmd_buffer->state.gfx.dynamic.scissor.count < total_count)
457 cmd_buffer->state.gfx.dynamic.scissor.count = total_count;
459 memcpy(cmd_buffer->state.gfx.dynamic.scissor.scissors + firstScissor,
460 pScissors, scissorCount * sizeof(*pScissors));
462 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
465 void anv_CmdSetLineWidth(
466 VkCommandBuffer commandBuffer,
469 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
471 cmd_buffer->state.gfx.dynamic.line_width = lineWidth;
472 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
475 void anv_CmdSetDepthBias(
476 VkCommandBuffer commandBuffer,
477 float depthBiasConstantFactor,
478 float depthBiasClamp,
479 float depthBiasSlopeFactor)
481 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
483 cmd_buffer->state.gfx.dynamic.depth_bias.bias = depthBiasConstantFactor;
484 cmd_buffer->state.gfx.dynamic.depth_bias.clamp = depthBiasClamp;
485 cmd_buffer->state.gfx.dynamic.depth_bias.slope = depthBiasSlopeFactor;
487 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
490 void anv_CmdSetBlendConstants(
491 VkCommandBuffer commandBuffer,
492 const float blendConstants[4])
494 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
496 memcpy(cmd_buffer->state.gfx.dynamic.blend_constants,
497 blendConstants, sizeof(float) * 4);
499 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
502 void anv_CmdSetDepthBounds(
503 VkCommandBuffer commandBuffer,
504 float minDepthBounds,
505 float maxDepthBounds)
507 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
509 cmd_buffer->state.gfx.dynamic.depth_bounds.min = minDepthBounds;
510 cmd_buffer->state.gfx.dynamic.depth_bounds.max = maxDepthBounds;
512 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
515 void anv_CmdSetStencilCompareMask(
516 VkCommandBuffer commandBuffer,
517 VkStencilFaceFlags faceMask,
518 uint32_t compareMask)
520 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
522 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
523 cmd_buffer->state.gfx.dynamic.stencil_compare_mask.front = compareMask;
524 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
525 cmd_buffer->state.gfx.dynamic.stencil_compare_mask.back = compareMask;
527 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
530 void anv_CmdSetStencilWriteMask(
531 VkCommandBuffer commandBuffer,
532 VkStencilFaceFlags faceMask,
535 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
537 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
538 cmd_buffer->state.gfx.dynamic.stencil_write_mask.front = writeMask;
539 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
540 cmd_buffer->state.gfx.dynamic.stencil_write_mask.back = writeMask;
542 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
545 void anv_CmdSetStencilReference(
546 VkCommandBuffer commandBuffer,
547 VkStencilFaceFlags faceMask,
550 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
552 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
553 cmd_buffer->state.gfx.dynamic.stencil_reference.front = reference;
554 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
555 cmd_buffer->state.gfx.dynamic.stencil_reference.back = reference;
557 cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
561 anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
562 VkPipelineBindPoint bind_point,
563 struct anv_pipeline_layout *layout,
565 struct anv_descriptor_set *set,
566 uint32_t *dynamic_offset_count,
567 const uint32_t **dynamic_offsets)
569 struct anv_descriptor_set_layout *set_layout =
570 layout->set[set_index].layout;
572 struct anv_cmd_pipeline_state *pipe_state;
573 if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
574 pipe_state = &cmd_buffer->state.compute.base;
576 assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
577 pipe_state = &cmd_buffer->state.gfx.base;
579 pipe_state->descriptors[set_index] = set;
581 if (dynamic_offsets) {
582 if (set_layout->dynamic_offset_count > 0) {
583 uint32_t dynamic_offset_start =
584 layout->set[set_index].dynamic_offset_start;
586 /* Assert that everything is in range */
587 assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
588 assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
589 ARRAY_SIZE(pipe_state->dynamic_offsets));
591 typed_memcpy(&pipe_state->dynamic_offsets[dynamic_offset_start],
592 *dynamic_offsets, set_layout->dynamic_offset_count);
594 *dynamic_offsets += set_layout->dynamic_offset_count;
595 *dynamic_offset_count -= set_layout->dynamic_offset_count;
599 if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
600 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
602 assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
603 cmd_buffer->state.descriptors_dirty |=
604 set_layout->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
607 /* Pipeline layout objects are required to live at least while any command
608 * buffers that use them are in recording state. We need to grab a reference
609 * to the pipeline layout being bound here so we can compute correct dynamic
610 * offsets for VK_DESCRIPTOR_TYPE_*_DYNAMIC in dynamic_offset_for_binding()
611 * when we record draw commands that come after this.
613 pipe_state->layout = layout;
616 void anv_CmdBindDescriptorSets(
617 VkCommandBuffer commandBuffer,
618 VkPipelineBindPoint pipelineBindPoint,
619 VkPipelineLayout _layout,
621 uint32_t descriptorSetCount,
622 const VkDescriptorSet* pDescriptorSets,
623 uint32_t dynamicOffsetCount,
624 const uint32_t* pDynamicOffsets)
626 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
627 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
629 assert(firstSet + descriptorSetCount <= MAX_SETS);
631 for (uint32_t i = 0; i < descriptorSetCount; i++) {
632 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
633 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
634 layout, firstSet + i, set,
640 void anv_CmdBindVertexBuffers(
641 VkCommandBuffer commandBuffer,
642 uint32_t firstBinding,
643 uint32_t bindingCount,
644 const VkBuffer* pBuffers,
645 const VkDeviceSize* pOffsets)
647 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
648 struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
650 /* We have to defer setting up vertex buffer since we need the buffer
651 * stride from the pipeline. */
653 assert(firstBinding + bindingCount <= MAX_VBS);
654 for (uint32_t i = 0; i < bindingCount; i++) {
655 vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
656 vb[firstBinding + i].offset = pOffsets[i];
657 cmd_buffer->state.gfx.vb_dirty |= 1 << (firstBinding + i);
661 void anv_CmdBindTransformFeedbackBuffersEXT(
662 VkCommandBuffer commandBuffer,
663 uint32_t firstBinding,
664 uint32_t bindingCount,
665 const VkBuffer* pBuffers,
666 const VkDeviceSize* pOffsets,
667 const VkDeviceSize* pSizes)
669 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
670 struct anv_xfb_binding *xfb = cmd_buffer->state.xfb_bindings;
672 /* We have to defer setting up vertex buffer since we need the buffer
673 * stride from the pipeline. */
675 assert(firstBinding + bindingCount <= MAX_XFB_BUFFERS);
676 for (uint32_t i = 0; i < bindingCount; i++) {
677 if (pBuffers[i] == VK_NULL_HANDLE) {
678 xfb[firstBinding + i].buffer = NULL;
680 ANV_FROM_HANDLE(anv_buffer, buffer, pBuffers[i]);
681 xfb[firstBinding + i].buffer = buffer;
682 xfb[firstBinding + i].offset = pOffsets[i];
683 xfb[firstBinding + i].size =
684 anv_buffer_get_range(buffer, pOffsets[i],
685 pSizes ? pSizes[i] : VK_WHOLE_SIZE);
691 anv_isl_format_for_descriptor_type(VkDescriptorType type)
694 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
695 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
696 return ISL_FORMAT_R32G32B32A32_FLOAT;
698 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
699 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
700 return ISL_FORMAT_RAW;
703 unreachable("Invalid descriptor type");
708 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
709 const void *data, uint32_t size, uint32_t alignment)
711 struct anv_state state;
713 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
714 memcpy(state.map, data, size);
716 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
722 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
723 uint32_t *a, uint32_t *b,
724 uint32_t dwords, uint32_t alignment)
726 struct anv_state state;
729 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
730 dwords * 4, alignment);
732 for (uint32_t i = 0; i < dwords; i++)
735 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
741 anv_push_constant_value(struct anv_push_constants *data, uint32_t param)
743 if (BRW_PARAM_IS_BUILTIN(param)) {
745 case BRW_PARAM_BUILTIN_ZERO:
747 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X:
748 return data->base_work_group_id[0];
749 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y:
750 return data->base_work_group_id[1];
751 case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z:
752 return data->base_work_group_id[2];
754 unreachable("Invalid param builtin");
757 uint32_t offset = ANV_PARAM_PUSH_OFFSET(param);
758 assert(offset % sizeof(uint32_t) == 0);
759 if (offset < data->size)
760 return *(uint32_t *)((uint8_t *)data + offset);
767 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
768 gl_shader_stage stage)
770 struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
772 /* If we don't have this stage, bail. */
773 if (!anv_pipeline_has_stage(pipeline, stage))
774 return (struct anv_state) { .offset = 0 };
776 struct anv_push_constants *data =
777 cmd_buffer->state.push_constants[stage];
778 const struct brw_stage_prog_data *prog_data =
779 pipeline->shaders[stage]->prog_data;
781 /* If we don't actually have any push constants, bail. */
782 if (data == NULL || prog_data == NULL || prog_data->nr_params == 0)
783 return (struct anv_state) { .offset = 0 };
785 struct anv_state state =
786 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
787 prog_data->nr_params * sizeof(float),
788 32 /* bottom 5 bits MBZ */);
790 /* Walk through the param array and fill the buffer with data */
791 uint32_t *u32_map = state.map;
792 for (unsigned i = 0; i < prog_data->nr_params; i++)
793 u32_map[i] = anv_push_constant_value(data, prog_data->param[i]);
799 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
801 struct anv_push_constants *data =
802 cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
803 struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
804 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
805 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
807 /* If we don't actually have any push constants, bail. */
808 if (cs_prog_data->push.total.size == 0)
809 return (struct anv_state) { .offset = 0 };
811 const unsigned push_constant_alignment =
812 cmd_buffer->device->info.gen < 8 ? 32 : 64;
813 const unsigned aligned_total_push_constants_size =
814 ALIGN(cs_prog_data->push.total.size, push_constant_alignment);
815 struct anv_state state =
816 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
817 aligned_total_push_constants_size,
818 push_constant_alignment);
820 /* Walk through the param array and fill the buffer with data */
821 uint32_t *u32_map = state.map;
823 if (cs_prog_data->push.cross_thread.size > 0) {
825 i < cs_prog_data->push.cross_thread.dwords;
827 assert(prog_data->param[i] != BRW_PARAM_BUILTIN_SUBGROUP_ID);
828 u32_map[i] = anv_push_constant_value(data, prog_data->param[i]);
832 if (cs_prog_data->push.per_thread.size > 0) {
833 for (unsigned t = 0; t < cs_prog_data->threads; t++) {
835 8 * (cs_prog_data->push.per_thread.regs * t +
836 cs_prog_data->push.cross_thread.regs);
837 unsigned src = cs_prog_data->push.cross_thread.dwords;
838 for ( ; src < prog_data->nr_params; src++, dst++) {
839 if (prog_data->param[src] == BRW_PARAM_BUILTIN_SUBGROUP_ID) {
843 anv_push_constant_value(data, prog_data->param[src]);
852 void anv_CmdPushConstants(
853 VkCommandBuffer commandBuffer,
854 VkPipelineLayout layout,
855 VkShaderStageFlags stageFlags,
860 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
862 anv_foreach_stage(stage, stageFlags) {
864 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer,
866 if (result != VK_SUCCESS)
869 memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
873 cmd_buffer->state.push_constants_dirty |= stageFlags;
876 VkResult anv_CreateCommandPool(
878 const VkCommandPoolCreateInfo* pCreateInfo,
879 const VkAllocationCallbacks* pAllocator,
880 VkCommandPool* pCmdPool)
882 ANV_FROM_HANDLE(anv_device, device, _device);
883 struct anv_cmd_pool *pool;
885 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
886 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
888 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
891 pool->alloc = *pAllocator;
893 pool->alloc = device->alloc;
895 list_inithead(&pool->cmd_buffers);
897 *pCmdPool = anv_cmd_pool_to_handle(pool);
902 void anv_DestroyCommandPool(
904 VkCommandPool commandPool,
905 const VkAllocationCallbacks* pAllocator)
907 ANV_FROM_HANDLE(anv_device, device, _device);
908 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
913 list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
914 &pool->cmd_buffers, pool_link) {
915 anv_cmd_buffer_destroy(cmd_buffer);
918 vk_free2(&device->alloc, pAllocator, pool);
921 VkResult anv_ResetCommandPool(
923 VkCommandPool commandPool,
924 VkCommandPoolResetFlags flags)
926 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
928 list_for_each_entry(struct anv_cmd_buffer, cmd_buffer,
929 &pool->cmd_buffers, pool_link) {
930 anv_cmd_buffer_reset(cmd_buffer);
936 void anv_TrimCommandPool(
938 VkCommandPool commandPool,
939 VkCommandPoolTrimFlags flags)
941 /* Nothing for us to do here. Our pools stay pretty tidy. */
945 * Return NULL if the current subpass has no depthstencil attachment.
947 const struct anv_image_view *
948 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
950 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
951 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
953 if (subpass->depth_stencil_attachment == NULL)
956 const struct anv_image_view *iview =
957 fb->attachments[subpass->depth_stencil_attachment->attachment];
959 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
960 VK_IMAGE_ASPECT_STENCIL_BIT));
965 static struct anv_descriptor_set *
966 anv_cmd_buffer_push_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
967 VkPipelineBindPoint bind_point,
968 struct anv_descriptor_set_layout *layout,
971 struct anv_cmd_pipeline_state *pipe_state;
972 if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
973 pipe_state = &cmd_buffer->state.compute.base;
975 assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
976 pipe_state = &cmd_buffer->state.gfx.base;
979 struct anv_push_descriptor_set **push_set =
980 &pipe_state->push_descriptors[_set];
982 if (*push_set == NULL) {
983 *push_set = vk_zalloc(&cmd_buffer->pool->alloc,
984 sizeof(struct anv_push_descriptor_set), 8,
985 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
986 if (*push_set == NULL) {
987 anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
992 struct anv_descriptor_set *set = &(*push_set)->set;
994 if (set->layout != layout) {
996 anv_descriptor_set_layout_unref(cmd_buffer->device, set->layout);
997 anv_descriptor_set_layout_ref(layout);
998 set->layout = layout;
1000 set->size = anv_descriptor_set_layout_size(layout);
1001 set->buffer_view_count = layout->buffer_view_count;
1002 set->buffer_views = (*push_set)->buffer_views;
1004 if (layout->descriptor_buffer_size &&
1005 ((*push_set)->set_used_on_gpu ||
1006 set->desc_mem.alloc_size < layout->descriptor_buffer_size)) {
1007 /* The previous buffer is either actively used by some GPU command (so
1008 * we can't modify it) or is too small. Allocate a new one.
1010 struct anv_state desc_mem =
1011 anv_state_stream_alloc(&cmd_buffer->dynamic_state_stream,
1012 layout->descriptor_buffer_size, 32);
1013 if (set->desc_mem.alloc_size) {
1014 /* TODO: Do we really need to copy all the time? */
1015 memcpy(desc_mem.map, set->desc_mem.map,
1016 MIN2(desc_mem.alloc_size, set->desc_mem.alloc_size));
1018 set->desc_mem = desc_mem;
1020 struct anv_address addr = {
1021 .bo = cmd_buffer->dynamic_state_stream.state_pool->block_pool.bo,
1022 .offset = set->desc_mem.offset,
1025 const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
1026 set->desc_surface_state =
1027 anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
1028 isl_dev->ss.size, isl_dev->ss.align);
1029 anv_fill_buffer_surface_state(cmd_buffer->device,
1030 set->desc_surface_state,
1031 ISL_FORMAT_R32G32B32A32_FLOAT,
1032 addr, layout->descriptor_buffer_size, 1);
1038 void anv_CmdPushDescriptorSetKHR(
1039 VkCommandBuffer commandBuffer,
1040 VkPipelineBindPoint pipelineBindPoint,
1041 VkPipelineLayout _layout,
1043 uint32_t descriptorWriteCount,
1044 const VkWriteDescriptorSet* pDescriptorWrites)
1046 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1047 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
1049 assert(_set < MAX_SETS);
1051 struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
1053 struct anv_descriptor_set *set =
1054 anv_cmd_buffer_push_descriptor_set(cmd_buffer, pipelineBindPoint,
1059 /* Go through the user supplied descriptors. */
1060 for (uint32_t i = 0; i < descriptorWriteCount; i++) {
1061 const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
1063 switch (write->descriptorType) {
1064 case VK_DESCRIPTOR_TYPE_SAMPLER:
1065 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1066 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
1067 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1068 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
1069 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1070 anv_descriptor_set_write_image_view(cmd_buffer->device, set,
1071 write->pImageInfo + j,
1072 write->descriptorType,
1074 write->dstArrayElement + j);
1078 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1079 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1080 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1081 ANV_FROM_HANDLE(anv_buffer_view, bview,
1082 write->pTexelBufferView[j]);
1084 anv_descriptor_set_write_buffer_view(cmd_buffer->device, set,
1085 write->descriptorType,
1088 write->dstArrayElement + j);
1092 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1093 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1094 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1095 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1096 for (uint32_t j = 0; j < write->descriptorCount; j++) {
1097 assert(write->pBufferInfo[j].buffer);
1098 ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
1101 anv_descriptor_set_write_buffer(cmd_buffer->device, set,
1102 &cmd_buffer->surface_state_stream,
1103 write->descriptorType,
1106 write->dstArrayElement + j,
1107 write->pBufferInfo[j].offset,
1108 write->pBufferInfo[j].range);
1117 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
1118 layout, _set, set, NULL, NULL);
1121 void anv_CmdPushDescriptorSetWithTemplateKHR(
1122 VkCommandBuffer commandBuffer,
1123 VkDescriptorUpdateTemplate descriptorUpdateTemplate,
1124 VkPipelineLayout _layout,
1128 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1129 ANV_FROM_HANDLE(anv_descriptor_update_template, template,
1130 descriptorUpdateTemplate);
1131 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
1133 assert(_set < MAX_PUSH_DESCRIPTORS);
1135 struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
1137 struct anv_descriptor_set *set =
1138 anv_cmd_buffer_push_descriptor_set(cmd_buffer, template->bind_point,
1143 anv_descriptor_set_write_template(cmd_buffer->device, set,
1144 &cmd_buffer->surface_state_stream,
1148 anv_cmd_buffer_bind_descriptor_set(cmd_buffer, template->bind_point,
1149 layout, _set, set, NULL, NULL);
1152 void anv_CmdSetDeviceMask(
1153 VkCommandBuffer commandBuffer,
1154 uint32_t deviceMask)