2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 /** \file anv_cmd_buffer.c
34 * This file contains all of the stuff for emitting commands into a command
35 * buffer. This includes implementations of most of the vkCmd*
36 * entrypoints. This file is concerned entirely with state emission and
37 * not with the command buffer data structure itself. As far as this file
38 * is concerned, most of anv_cmd_buffer is magic.
41 /* TODO: These are taken from GLES. We should check the Vulkan spec */
42 const struct anv_dynamic_state default_dynamic_state = {
55 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
60 .stencil_compare_mask = {
64 .stencil_write_mask = {
68 .stencil_reference = {
75 anv_dynamic_state_copy(struct anv_dynamic_state *dest,
76 const struct anv_dynamic_state *src,
79 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
80 dest->viewport.count = src->viewport.count;
81 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
85 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
86 dest->scissor.count = src->scissor.count;
87 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
91 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
92 dest->line_width = src->line_width;
94 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
95 dest->depth_bias = src->depth_bias;
97 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
98 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
100 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
101 dest->depth_bounds = src->depth_bounds;
103 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
104 dest->stencil_compare_mask = src->stencil_compare_mask;
106 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
107 dest->stencil_write_mask = src->stencil_write_mask;
109 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
110 dest->stencil_reference = src->stencil_reference;
114 anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
116 struct anv_cmd_state *state = &cmd_buffer->state;
118 memset(&state->descriptors, 0, sizeof(state->descriptors));
119 memset(&state->push_constants, 0, sizeof(state->push_constants));
120 memset(state->binding_tables, 0, sizeof(state->binding_tables));
121 memset(state->samplers, 0, sizeof(state->samplers));
123 /* 0 isn't a valid config. This ensures that we always configure L3$. */
124 cmd_buffer->state.current_l3_config = 0;
128 state->descriptors_dirty = 0;
129 state->push_constants_dirty = 0;
130 state->pipeline = NULL;
131 state->restart_index = UINT32_MAX;
132 state->dynamic = default_dynamic_state;
133 state->need_query_wa = true;
135 if (state->attachments != NULL) {
136 anv_free(&cmd_buffer->pool->alloc, state->attachments);
137 state->attachments = NULL;
140 state->gen7.index_buffer = NULL;
144 * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
147 anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
148 const VkRenderPassBeginInfo *info)
150 struct anv_cmd_state *state = &cmd_buffer->state;
151 ANV_FROM_HANDLE(anv_render_pass, pass, info->renderPass);
153 anv_free(&cmd_buffer->pool->alloc, state->attachments);
155 if (pass->attachment_count == 0) {
156 state->attachments = NULL;
160 state->attachments = anv_alloc(&cmd_buffer->pool->alloc,
161 pass->attachment_count *
162 sizeof(state->attachments[0]),
163 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
164 if (state->attachments == NULL) {
165 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
169 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
170 struct anv_render_pass_attachment *att = &pass->attachments[i];
171 VkImageAspectFlags clear_aspects = 0;
173 if (anv_format_is_color(att->format)) {
174 /* color attachment */
175 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
176 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
179 /* depthstencil attachment */
180 if (att->format->has_depth &&
181 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
182 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
184 if (att->format->has_stencil &&
185 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
186 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
190 state->attachments[i].pending_clear_aspects = clear_aspects;
192 assert(info->clearValueCount > i);
193 state->attachments[i].clear_value = info->pClearValues[i];
199 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
200 gl_shader_stage stage, uint32_t size)
202 struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
205 *ptr = anv_alloc(&cmd_buffer->pool->alloc, size, 8,
206 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
208 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
209 } else if ((*ptr)->size < size) {
210 *ptr = anv_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
211 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
213 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
220 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
221 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
222 (offsetof(struct anv_push_constants, field) + \
223 sizeof(cmd_buffer->state.push_constants[0]->field)))
225 static VkResult anv_create_cmd_buffer(
226 struct anv_device * device,
227 struct anv_cmd_pool * pool,
228 VkCommandBufferLevel level,
229 VkCommandBuffer* pCommandBuffer)
231 struct anv_cmd_buffer *cmd_buffer;
234 cmd_buffer = anv_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
235 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
236 if (cmd_buffer == NULL)
237 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
239 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
240 cmd_buffer->device = device;
241 cmd_buffer->pool = pool;
242 cmd_buffer->level = level;
243 cmd_buffer->state.attachments = NULL;
245 result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
246 if (result != VK_SUCCESS)
249 anv_state_stream_init(&cmd_buffer->surface_state_stream,
250 &device->surface_state_block_pool);
251 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
252 &device->dynamic_state_block_pool);
255 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
257 /* Init the pool_link so we can safefly call list_del when we destroy
260 list_inithead(&cmd_buffer->pool_link);
263 *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
268 anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
273 VkResult anv_AllocateCommandBuffers(
275 const VkCommandBufferAllocateInfo* pAllocateInfo,
276 VkCommandBuffer* pCommandBuffers)
278 ANV_FROM_HANDLE(anv_device, device, _device);
279 ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
281 VkResult result = VK_SUCCESS;
284 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
285 result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
286 &pCommandBuffers[i]);
287 if (result != VK_SUCCESS)
291 if (result != VK_SUCCESS)
292 anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
299 anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
301 list_del(&cmd_buffer->pool_link);
303 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
305 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
306 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
308 anv_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
309 anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
312 void anv_FreeCommandBuffers(
314 VkCommandPool commandPool,
315 uint32_t commandBufferCount,
316 const VkCommandBuffer* pCommandBuffers)
318 for (uint32_t i = 0; i < commandBufferCount; i++) {
319 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
321 anv_cmd_buffer_destroy(cmd_buffer);
325 VkResult anv_ResetCommandBuffer(
326 VkCommandBuffer commandBuffer,
327 VkCommandBufferResetFlags flags)
329 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
331 cmd_buffer->usage_flags = 0;
332 cmd_buffer->state.current_pipeline = UINT32_MAX;
333 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
334 anv_cmd_state_reset(cmd_buffer);
340 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
342 switch (cmd_buffer->device->info.gen) {
344 if (cmd_buffer->device->info.is_haswell)
345 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
347 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
349 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
351 return gen9_cmd_buffer_emit_state_base_address(cmd_buffer);
353 unreachable("unsupported gen\n");
357 VkResult anv_BeginCommandBuffer(
358 VkCommandBuffer commandBuffer,
359 const VkCommandBufferBeginInfo* pBeginInfo)
361 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
363 /* If this is the first vkBeginCommandBuffer, we must *initialize* the
364 * command buffer's state. Otherwise, we must *reset* its state. In both
367 * From the Vulkan 1.0 spec:
369 * If a command buffer is in the executable state and the command buffer
370 * was allocated from a command pool with the
371 * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
372 * vkBeginCommandBuffer implicitly resets the command buffer, behaving
373 * as if vkResetCommandBuffer had been called with
374 * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
375 * the command buffer in the recording state.
377 anv_ResetCommandBuffer(commandBuffer, /*flags*/ 0);
379 cmd_buffer->usage_flags = pBeginInfo->flags;
381 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
382 !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
384 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
386 if (cmd_buffer->usage_flags &
387 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
388 cmd_buffer->state.framebuffer =
389 anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
390 cmd_buffer->state.pass =
391 anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
393 struct anv_subpass *subpass =
394 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
396 anv_cmd_buffer_set_subpass(cmd_buffer, subpass);
402 VkResult anv_EndCommandBuffer(
403 VkCommandBuffer commandBuffer)
405 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
406 struct anv_device *device = cmd_buffer->device;
408 anv_cmd_buffer_end_batch_buffer(cmd_buffer);
410 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
411 /* The algorithm used to compute the validate list is not threadsafe as
412 * it uses the bo->index field. We have to lock the device around it.
413 * Fortunately, the chances for contention here are probably very low.
415 pthread_mutex_lock(&device->mutex);
416 anv_cmd_buffer_prepare_execbuf(cmd_buffer);
417 pthread_mutex_unlock(&device->mutex);
423 void anv_CmdBindPipeline(
424 VkCommandBuffer commandBuffer,
425 VkPipelineBindPoint pipelineBindPoint,
426 VkPipeline _pipeline)
428 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
429 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
431 switch (pipelineBindPoint) {
432 case VK_PIPELINE_BIND_POINT_COMPUTE:
433 cmd_buffer->state.compute_pipeline = pipeline;
434 cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
435 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
436 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
439 case VK_PIPELINE_BIND_POINT_GRAPHICS:
440 cmd_buffer->state.pipeline = pipeline;
441 cmd_buffer->state.vb_dirty |= pipeline->vb_used;
442 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
443 cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
444 cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
446 /* Apply the dynamic state from the pipeline */
447 cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
448 anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
449 &pipeline->dynamic_state,
450 pipeline->dynamic_state_mask);
454 assert(!"invalid bind point");
459 void anv_CmdSetViewport(
460 VkCommandBuffer commandBuffer,
461 uint32_t firstViewport,
462 uint32_t viewportCount,
463 const VkViewport* pViewports)
465 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
467 const uint32_t total_count = firstViewport + viewportCount;
468 if (cmd_buffer->state.dynamic.viewport.count < total_count);
469 cmd_buffer->state.dynamic.viewport.count = total_count;
471 memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
472 pViewports, viewportCount * sizeof(*pViewports));
474 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
477 void anv_CmdSetScissor(
478 VkCommandBuffer commandBuffer,
479 uint32_t firstScissor,
480 uint32_t scissorCount,
481 const VkRect2D* pScissors)
483 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
485 const uint32_t total_count = firstScissor + scissorCount;
486 if (cmd_buffer->state.dynamic.scissor.count < total_count);
487 cmd_buffer->state.dynamic.scissor.count = total_count;
489 memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
490 pScissors, scissorCount * sizeof(*pScissors));
492 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
495 void anv_CmdSetLineWidth(
496 VkCommandBuffer commandBuffer,
499 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
501 cmd_buffer->state.dynamic.line_width = lineWidth;
502 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
505 void anv_CmdSetDepthBias(
506 VkCommandBuffer commandBuffer,
507 float depthBiasConstantFactor,
508 float depthBiasClamp,
509 float depthBiasSlopeFactor)
511 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
513 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
514 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
515 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
517 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
520 void anv_CmdSetBlendConstants(
521 VkCommandBuffer commandBuffer,
522 const float blendConstants[4])
524 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
526 memcpy(cmd_buffer->state.dynamic.blend_constants,
527 blendConstants, sizeof(float) * 4);
529 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
532 void anv_CmdSetDepthBounds(
533 VkCommandBuffer commandBuffer,
534 float minDepthBounds,
535 float maxDepthBounds)
537 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
539 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
540 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
542 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
545 void anv_CmdSetStencilCompareMask(
546 VkCommandBuffer commandBuffer,
547 VkStencilFaceFlags faceMask,
548 uint32_t compareMask)
550 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
552 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
553 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
554 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
555 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
557 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
560 void anv_CmdSetStencilWriteMask(
561 VkCommandBuffer commandBuffer,
562 VkStencilFaceFlags faceMask,
565 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
567 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
568 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
569 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
570 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
572 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
575 void anv_CmdSetStencilReference(
576 VkCommandBuffer commandBuffer,
577 VkStencilFaceFlags faceMask,
580 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
582 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
583 cmd_buffer->state.dynamic.stencil_reference.front = reference;
584 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
585 cmd_buffer->state.dynamic.stencil_reference.back = reference;
587 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
590 void anv_CmdBindDescriptorSets(
591 VkCommandBuffer commandBuffer,
592 VkPipelineBindPoint pipelineBindPoint,
593 VkPipelineLayout _layout,
595 uint32_t descriptorSetCount,
596 const VkDescriptorSet* pDescriptorSets,
597 uint32_t dynamicOffsetCount,
598 const uint32_t* pDynamicOffsets)
600 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
601 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
602 struct anv_descriptor_set_layout *set_layout;
604 assert(firstSet + descriptorSetCount < MAX_SETS);
606 uint32_t dynamic_slot = 0;
607 for (uint32_t i = 0; i < descriptorSetCount; i++) {
608 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
609 set_layout = layout->set[firstSet + i].layout;
611 if (cmd_buffer->state.descriptors[firstSet + i] != set) {
612 cmd_buffer->state.descriptors[firstSet + i] = set;
613 cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
616 if (set_layout->dynamic_offset_count > 0) {
617 anv_foreach_stage(s, set_layout->shader_stages) {
618 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic);
620 struct anv_push_constants *push =
621 cmd_buffer->state.push_constants[s];
623 unsigned d = layout->set[firstSet + i].dynamic_offset_start;
624 const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
625 struct anv_descriptor *desc = set->descriptors;
627 for (unsigned b = 0; b < set_layout->binding_count; b++) {
628 if (set_layout->binding[b].dynamic_offset_index < 0)
631 unsigned array_size = set_layout->binding[b].array_size;
632 for (unsigned j = 0; j < array_size; j++) {
634 if (desc->buffer_view)
635 range = desc->buffer_view->range;
636 push->dynamic[d].offset = *(offsets++);
637 push->dynamic[d].range = range;
643 cmd_buffer->state.push_constants_dirty |= set_layout->shader_stages;
648 void anv_CmdBindVertexBuffers(
649 VkCommandBuffer commandBuffer,
650 uint32_t firstBinding,
651 uint32_t bindingCount,
652 const VkBuffer* pBuffers,
653 const VkDeviceSize* pOffsets)
655 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
656 struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
658 /* We have to defer setting up vertex buffer since we need the buffer
659 * stride from the pipeline. */
661 assert(firstBinding + bindingCount < MAX_VBS);
662 for (uint32_t i = 0; i < bindingCount; i++) {
663 vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
664 vb[firstBinding + i].offset = pOffsets[i];
665 cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
670 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
671 struct anv_state state, struct anv_bo *bo, uint32_t offset)
673 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
674 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
675 * the initial state to set the high bits to 0. */
677 const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
679 anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
680 state.offset + dword * 4, bo, offset);
683 const struct anv_format *
684 anv_format_for_descriptor_type(VkDescriptorType type)
687 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
688 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
689 return anv_format_for_vk_format(VK_FORMAT_R32G32B32A32_SFLOAT);
691 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
692 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
693 return anv_format_for_vk_format(VK_FORMAT_UNDEFINED);
696 unreachable("Invalid descriptor type");
701 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
702 gl_shader_stage stage,
703 struct anv_state *bt_state)
705 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
706 struct anv_subpass *subpass = cmd_buffer->state.subpass;
707 struct anv_pipeline_bind_map *map;
708 uint32_t color_count, bias, state_offset;
711 case MESA_SHADER_FRAGMENT:
712 map = &cmd_buffer->state.pipeline->bindings[stage];
714 color_count = subpass->color_count;
716 case MESA_SHADER_COMPUTE:
717 map = &cmd_buffer->state.compute_pipeline->bindings[stage];
722 map = &cmd_buffer->state.pipeline->bindings[stage];
728 if (color_count + map->surface_count == 0) {
729 *bt_state = (struct anv_state) { 0, };
733 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
734 bias + map->surface_count,
736 uint32_t *bt_map = bt_state->map;
738 if (bt_state->map == NULL)
739 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
741 for (uint32_t a = 0; a < color_count; a++) {
742 const struct anv_image_view *iview =
743 fb->attachments[subpass->color_attachments[a]];
745 assert(iview->color_rt_surface_state.alloc_size);
746 bt_map[a] = iview->color_rt_surface_state.offset + state_offset;
747 add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
748 iview->bo, iview->offset);
751 if (stage == MESA_SHADER_COMPUTE &&
752 cmd_buffer->state.compute_pipeline->cs_prog_data.uses_num_work_groups) {
753 struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
754 uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
756 struct anv_state surface_state;
758 anv_cmd_buffer_alloc_surface_state(cmd_buffer);
760 const struct anv_format *format =
761 anv_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
762 anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
763 format->isl_format, bo_offset, 12, 1);
765 bt_map[0] = surface_state.offset + state_offset;
766 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
769 if (map->surface_count == 0)
772 if (map->image_count > 0) {
774 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
775 if (result != VK_SUCCESS)
778 cmd_buffer->state.push_constants_dirty |= 1 << stage;
782 for (uint32_t s = 0; s < map->surface_count; s++) {
783 struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
784 struct anv_descriptor_set *set =
785 cmd_buffer->state.descriptors[binding->set];
786 struct anv_descriptor *desc = &set->descriptors[binding->offset];
788 struct anv_state surface_state;
792 switch (desc->type) {
793 case VK_DESCRIPTOR_TYPE_SAMPLER:
794 /* Nothing for us to do here */
797 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
798 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
799 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
800 surface_state = desc->image_view->sampler_surface_state;
801 assert(surface_state.alloc_size);
802 bo = desc->image_view->bo;
803 bo_offset = desc->image_view->offset;
806 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
807 surface_state = desc->image_view->storage_surface_state;
808 assert(surface_state.alloc_size);
809 bo = desc->image_view->bo;
810 bo_offset = desc->image_view->offset;
812 struct brw_image_param *image_param =
813 &cmd_buffer->state.push_constants[stage]->images[image++];
815 anv_image_view_fill_image_param(cmd_buffer->device, desc->image_view,
817 image_param->surface_idx = bias + s;
821 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
822 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
823 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
824 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
825 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
826 surface_state = desc->buffer_view->surface_state;
827 assert(surface_state.alloc_size);
828 bo = desc->buffer_view->bo;
829 bo_offset = desc->buffer_view->offset;
832 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
833 surface_state = desc->buffer_view->storage_surface_state;
834 assert(surface_state.alloc_size);
835 bo = desc->buffer_view->bo;
836 bo_offset = desc->buffer_view->offset;
838 struct brw_image_param *image_param =
839 &cmd_buffer->state.push_constants[stage]->images[image++];
841 anv_buffer_view_fill_image_param(cmd_buffer->device, desc->buffer_view,
843 image_param->surface_idx = bias + s;
847 assert(!"Invalid descriptor type");
851 bt_map[bias + s] = surface_state.offset + state_offset;
852 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
854 assert(image == map->image_count);
857 if (!cmd_buffer->device->info.has_llc)
858 anv_state_clflush(*bt_state);
864 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
865 gl_shader_stage stage, struct anv_state *state)
867 struct anv_pipeline_bind_map *map;
869 if (stage == MESA_SHADER_COMPUTE)
870 map = &cmd_buffer->state.compute_pipeline->bindings[stage];
872 map = &cmd_buffer->state.pipeline->bindings[stage];
874 if (map->sampler_count == 0) {
875 *state = (struct anv_state) { 0, };
879 uint32_t size = map->sampler_count * 16;
880 *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
882 if (state->map == NULL)
883 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
885 for (uint32_t s = 0; s < map->sampler_count; s++) {
886 struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
887 struct anv_descriptor_set *set =
888 cmd_buffer->state.descriptors[binding->set];
889 struct anv_descriptor *desc = &set->descriptors[binding->offset];
891 if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
892 desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
895 struct anv_sampler *sampler = desc->sampler;
897 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
898 * happens to be zero.
903 memcpy(state->map + (s * 16),
904 sampler->state, sizeof(sampler->state));
907 if (!cmd_buffer->device->info.has_llc)
908 anv_state_clflush(*state);
914 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
915 const void *data, uint32_t size, uint32_t alignment)
917 struct anv_state state;
919 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
920 memcpy(state.map, data, size);
922 if (!cmd_buffer->device->info.has_llc)
923 anv_state_clflush(state);
925 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
931 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
932 uint32_t *a, uint32_t *b,
933 uint32_t dwords, uint32_t alignment)
935 struct anv_state state;
938 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
939 dwords * 4, alignment);
941 for (uint32_t i = 0; i < dwords; i++)
944 if (!cmd_buffer->device->info.has_llc)
945 anv_state_clflush(state);
947 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
953 * @brief Setup the command buffer for recording commands inside the given
956 * This does not record all commands needed for starting the subpass.
957 * Starting the subpass may require additional commands.
959 * Note that vkCmdBeginRenderPass, vkCmdNextSubpass, and vkBeginCommandBuffer
960 * with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, all setup the
961 * command buffer for recording commands for some subpass. But only the first
962 * two, vkCmdBeginRenderPass and vkCmdNextSubpass, can start a subpass.
965 anv_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
966 struct anv_subpass *subpass)
968 switch (cmd_buffer->device->info.gen) {
970 gen7_cmd_buffer_set_subpass(cmd_buffer, subpass);
973 gen8_cmd_buffer_set_subpass(cmd_buffer, subpass);
976 gen9_cmd_buffer_set_subpass(cmd_buffer, subpass);
979 unreachable("unsupported gen\n");
984 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
985 gl_shader_stage stage)
987 struct anv_push_constants *data =
988 cmd_buffer->state.push_constants[stage];
989 struct brw_stage_prog_data *prog_data =
990 cmd_buffer->state.pipeline->prog_data[stage];
992 /* If we don't actually have any push constants, bail. */
993 if (data == NULL || prog_data->nr_params == 0)
994 return (struct anv_state) { .offset = 0 };
996 struct anv_state state =
997 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
998 prog_data->nr_params * sizeof(float),
999 32 /* bottom 5 bits MBZ */);
1001 /* Walk through the param array and fill the buffer with data */
1002 uint32_t *u32_map = state.map;
1003 for (unsigned i = 0; i < prog_data->nr_params; i++) {
1004 uint32_t offset = (uintptr_t)prog_data->param[i];
1005 u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
1008 if (!cmd_buffer->device->info.has_llc)
1009 anv_state_clflush(state);
1015 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
1017 struct anv_push_constants *data =
1018 cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
1019 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1020 const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
1021 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1023 const unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
1024 const unsigned push_constant_data_size =
1025 (local_id_dwords + prog_data->nr_params) * 4;
1026 const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
1027 const unsigned param_aligned_count =
1028 reg_aligned_constant_size / sizeof(uint32_t);
1030 /* If we don't actually have any push constants, bail. */
1031 if (reg_aligned_constant_size == 0)
1032 return (struct anv_state) { .offset = 0 };
1034 const unsigned threads = pipeline->cs_thread_width_max;
1035 const unsigned total_push_constants_size =
1036 reg_aligned_constant_size * threads;
1037 const unsigned push_constant_alignment =
1038 cmd_buffer->device->info.gen < 8 ? 32 : 64;
1039 const unsigned aligned_total_push_constants_size =
1040 ALIGN(total_push_constants_size, push_constant_alignment);
1041 struct anv_state state =
1042 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1043 aligned_total_push_constants_size,
1044 push_constant_alignment);
1046 /* Walk through the param array and fill the buffer with data */
1047 uint32_t *u32_map = state.map;
1049 brw_cs_fill_local_id_payload(cs_prog_data, u32_map, threads,
1050 reg_aligned_constant_size);
1052 /* Setup uniform data for the first thread */
1053 for (unsigned i = 0; i < prog_data->nr_params; i++) {
1054 uint32_t offset = (uintptr_t)prog_data->param[i];
1055 u32_map[local_id_dwords + i] = *(uint32_t *)((uint8_t *)data + offset);
1058 /* Copy uniform data from the first thread to every other thread */
1059 const size_t uniform_data_size = prog_data->nr_params * sizeof(uint32_t);
1060 for (unsigned t = 1; t < threads; t++) {
1061 memcpy(&u32_map[t * param_aligned_count + local_id_dwords],
1062 &u32_map[local_id_dwords],
1066 if (!cmd_buffer->device->info.has_llc)
1067 anv_state_clflush(state);
1072 void anv_CmdPushConstants(
1073 VkCommandBuffer commandBuffer,
1074 VkPipelineLayout layout,
1075 VkShaderStageFlags stageFlags,
1078 const void* pValues)
1080 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1082 anv_foreach_stage(stage, stageFlags) {
1083 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data);
1085 memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
1089 cmd_buffer->state.push_constants_dirty |= stageFlags;
1092 void anv_CmdExecuteCommands(
1093 VkCommandBuffer commandBuffer,
1094 uint32_t commandBufferCount,
1095 const VkCommandBuffer* pCmdBuffers)
1097 ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
1099 assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1101 for (uint32_t i = 0; i < commandBufferCount; i++) {
1102 ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
1104 assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1106 anv_cmd_buffer_add_secondary(primary, secondary);
1110 VkResult anv_CreateCommandPool(
1112 const VkCommandPoolCreateInfo* pCreateInfo,
1113 const VkAllocationCallbacks* pAllocator,
1114 VkCommandPool* pCmdPool)
1116 ANV_FROM_HANDLE(anv_device, device, _device);
1117 struct anv_cmd_pool *pool;
1119 pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
1120 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1122 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1125 pool->alloc = *pAllocator;
1127 pool->alloc = device->alloc;
1129 list_inithead(&pool->cmd_buffers);
1131 *pCmdPool = anv_cmd_pool_to_handle(pool);
1136 void anv_DestroyCommandPool(
1138 VkCommandPool commandPool,
1139 const VkAllocationCallbacks* pAllocator)
1141 ANV_FROM_HANDLE(anv_device, device, _device);
1142 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1144 anv_ResetCommandPool(_device, commandPool, 0);
1146 anv_free2(&device->alloc, pAllocator, pool);
1149 VkResult anv_ResetCommandPool(
1151 VkCommandPool commandPool,
1152 VkCommandPoolResetFlags flags)
1154 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1156 /* FIXME: vkResetCommandPool must not destroy its command buffers. The
1157 * Vulkan 1.0 spec requires that it only reset them:
1159 * Resetting a command pool recycles all of the resources from all of
1160 * the command buffers allocated from the command pool back to the
1161 * command pool. All command buffers that have been allocated from the
1162 * command pool are put in the initial state.
1164 list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
1165 &pool->cmd_buffers, pool_link) {
1166 anv_cmd_buffer_destroy(cmd_buffer);
1173 * Return NULL if the current subpass has no depthstencil attachment.
1175 const struct anv_image_view *
1176 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
1178 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1179 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1181 if (subpass->depth_stencil_attachment == VK_ATTACHMENT_UNUSED)
1184 const struct anv_image_view *iview =
1185 fb->attachments[subpass->depth_stencil_attachment];
1187 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
1188 VK_IMAGE_ASPECT_STENCIL_BIT));