2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
32 /** \file anv_cmd_buffer.c
34 * This file contains all of the stuff for emitting commands into a command
35 * buffer. This includes implementations of most of the vkCmd*
36 * entrypoints. This file is concerned entirely with state emission and
37 * not with the command buffer data structure itself. As far as this file
38 * is concerned, most of anv_cmd_buffer is magic.
41 /* TODO: These are taken from GLES. We should check the Vulkan spec */
42 const struct anv_dynamic_state default_dynamic_state = {
55 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
60 .stencil_compare_mask = {
64 .stencil_write_mask = {
68 .stencil_reference = {
75 anv_dynamic_state_copy(struct anv_dynamic_state *dest,
76 const struct anv_dynamic_state *src,
79 if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
80 dest->viewport.count = src->viewport.count;
81 typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
85 if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
86 dest->scissor.count = src->scissor.count;
87 typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
91 if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
92 dest->line_width = src->line_width;
94 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
95 dest->depth_bias = src->depth_bias;
97 if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
98 typed_memcpy(dest->blend_constants, src->blend_constants, 4);
100 if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
101 dest->depth_bounds = src->depth_bounds;
103 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
104 dest->stencil_compare_mask = src->stencil_compare_mask;
106 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
107 dest->stencil_write_mask = src->stencil_write_mask;
109 if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
110 dest->stencil_reference = src->stencil_reference;
114 anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
116 struct anv_cmd_state *state = &cmd_buffer->state;
118 memset(&state->descriptors, 0, sizeof(state->descriptors));
119 memset(&state->push_constants, 0, sizeof(state->push_constants));
120 memset(state->binding_tables, 0, sizeof(state->binding_tables));
121 memset(state->samplers, 0, sizeof(state->samplers));
123 /* 0 isn't a valid config. This ensures that we always configure L3$. */
124 cmd_buffer->state.current_l3_config = 0;
128 state->descriptors_dirty = 0;
129 state->push_constants_dirty = 0;
130 state->pipeline = NULL;
131 state->restart_index = UINT32_MAX;
132 state->dynamic = default_dynamic_state;
133 state->need_query_wa = true;
135 if (state->attachments != NULL) {
136 anv_free(&cmd_buffer->pool->alloc, state->attachments);
137 state->attachments = NULL;
140 state->gen7.index_buffer = NULL;
144 * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
147 anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
148 const VkRenderPassBeginInfo *info)
150 struct anv_cmd_state *state = &cmd_buffer->state;
151 ANV_FROM_HANDLE(anv_render_pass, pass, info->renderPass);
153 anv_free(&cmd_buffer->pool->alloc, state->attachments);
155 if (pass->attachment_count == 0) {
156 state->attachments = NULL;
160 state->attachments = anv_alloc(&cmd_buffer->pool->alloc,
161 pass->attachment_count *
162 sizeof(state->attachments[0]),
163 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
164 if (state->attachments == NULL) {
165 /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
169 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
170 struct anv_render_pass_attachment *att = &pass->attachments[i];
171 VkImageAspectFlags clear_aspects = 0;
173 if (anv_format_is_color(att->format)) {
174 /* color attachment */
175 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
176 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
179 /* depthstencil attachment */
180 if (att->format->has_depth &&
181 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
182 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
184 if (att->format->has_stencil &&
185 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
186 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
190 state->attachments[i].pending_clear_aspects = clear_aspects;
192 assert(info->clearValueCount > i);
193 state->attachments[i].clear_value = info->pClearValues[i];
199 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
200 gl_shader_stage stage, uint32_t size)
202 struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
205 *ptr = anv_alloc(&cmd_buffer->pool->alloc, size, 8,
206 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
208 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
209 } else if ((*ptr)->size < size) {
210 *ptr = anv_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
211 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
213 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
220 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
221 anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
222 (offsetof(struct anv_push_constants, field) + \
223 sizeof(cmd_buffer->state.push_constants[0]->field)))
225 static VkResult anv_create_cmd_buffer(
226 struct anv_device * device,
227 struct anv_cmd_pool * pool,
228 VkCommandBufferLevel level,
229 VkCommandBuffer* pCommandBuffer)
231 struct anv_cmd_buffer *cmd_buffer;
234 cmd_buffer = anv_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
235 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
236 if (cmd_buffer == NULL)
237 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
239 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
240 cmd_buffer->device = device;
241 cmd_buffer->pool = pool;
242 cmd_buffer->level = level;
243 cmd_buffer->state.attachments = NULL;
245 result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
246 if (result != VK_SUCCESS)
249 anv_state_stream_init(&cmd_buffer->surface_state_stream,
250 &device->surface_state_block_pool);
251 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
252 &device->dynamic_state_block_pool);
255 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
257 /* Init the pool_link so we can safefly call list_del when we destroy
260 list_inithead(&cmd_buffer->pool_link);
263 *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
268 anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
273 VkResult anv_AllocateCommandBuffers(
275 const VkCommandBufferAllocateInfo* pAllocateInfo,
276 VkCommandBuffer* pCommandBuffers)
278 ANV_FROM_HANDLE(anv_device, device, _device);
279 ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
281 VkResult result = VK_SUCCESS;
284 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
285 result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
286 &pCommandBuffers[i]);
287 if (result != VK_SUCCESS)
291 if (result != VK_SUCCESS)
292 anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
299 anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
301 list_del(&cmd_buffer->pool_link);
303 anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
305 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
306 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
308 anv_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
309 anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
312 void anv_FreeCommandBuffers(
314 VkCommandPool commandPool,
315 uint32_t commandBufferCount,
316 const VkCommandBuffer* pCommandBuffers)
318 for (uint32_t i = 0; i < commandBufferCount; i++) {
319 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
321 anv_cmd_buffer_destroy(cmd_buffer);
325 VkResult anv_ResetCommandBuffer(
326 VkCommandBuffer commandBuffer,
327 VkCommandBufferResetFlags flags)
329 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
331 cmd_buffer->usage_flags = 0;
332 cmd_buffer->state.current_pipeline = UINT32_MAX;
333 anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
334 anv_cmd_state_reset(cmd_buffer);
336 anv_state_stream_finish(&cmd_buffer->surface_state_stream);
337 anv_state_stream_init(&cmd_buffer->surface_state_stream,
338 &cmd_buffer->device->surface_state_block_pool);
340 anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
341 anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
342 &cmd_buffer->device->dynamic_state_block_pool);
348 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
350 switch (cmd_buffer->device->info.gen) {
352 if (cmd_buffer->device->info.is_haswell)
353 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
355 return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
357 return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
359 return gen9_cmd_buffer_emit_state_base_address(cmd_buffer);
361 unreachable("unsupported gen\n");
365 VkResult anv_BeginCommandBuffer(
366 VkCommandBuffer commandBuffer,
367 const VkCommandBufferBeginInfo* pBeginInfo)
369 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
371 /* If this is the first vkBeginCommandBuffer, we must *initialize* the
372 * command buffer's state. Otherwise, we must *reset* its state. In both
375 * From the Vulkan 1.0 spec:
377 * If a command buffer is in the executable state and the command buffer
378 * was allocated from a command pool with the
379 * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
380 * vkBeginCommandBuffer implicitly resets the command buffer, behaving
381 * as if vkResetCommandBuffer had been called with
382 * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
383 * the command buffer in the recording state.
385 anv_ResetCommandBuffer(commandBuffer, /*flags*/ 0);
387 cmd_buffer->usage_flags = pBeginInfo->flags;
389 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
390 !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
392 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
394 if (cmd_buffer->usage_flags &
395 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
396 cmd_buffer->state.framebuffer =
397 anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
398 cmd_buffer->state.pass =
399 anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
401 struct anv_subpass *subpass =
402 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
404 anv_cmd_buffer_set_subpass(cmd_buffer, subpass);
410 VkResult anv_EndCommandBuffer(
411 VkCommandBuffer commandBuffer)
413 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
414 struct anv_device *device = cmd_buffer->device;
416 anv_cmd_buffer_end_batch_buffer(cmd_buffer);
418 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
419 /* The algorithm used to compute the validate list is not threadsafe as
420 * it uses the bo->index field. We have to lock the device around it.
421 * Fortunately, the chances for contention here are probably very low.
423 pthread_mutex_lock(&device->mutex);
424 anv_cmd_buffer_prepare_execbuf(cmd_buffer);
425 pthread_mutex_unlock(&device->mutex);
431 void anv_CmdBindPipeline(
432 VkCommandBuffer commandBuffer,
433 VkPipelineBindPoint pipelineBindPoint,
434 VkPipeline _pipeline)
436 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
437 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
439 switch (pipelineBindPoint) {
440 case VK_PIPELINE_BIND_POINT_COMPUTE:
441 cmd_buffer->state.compute_pipeline = pipeline;
442 cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
443 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
444 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
447 case VK_PIPELINE_BIND_POINT_GRAPHICS:
448 cmd_buffer->state.pipeline = pipeline;
449 cmd_buffer->state.vb_dirty |= pipeline->vb_used;
450 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
451 cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
452 cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
454 /* Apply the dynamic state from the pipeline */
455 cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
456 anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
457 &pipeline->dynamic_state,
458 pipeline->dynamic_state_mask);
462 assert(!"invalid bind point");
467 void anv_CmdSetViewport(
468 VkCommandBuffer commandBuffer,
469 uint32_t firstViewport,
470 uint32_t viewportCount,
471 const VkViewport* pViewports)
473 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
475 const uint32_t total_count = firstViewport + viewportCount;
476 if (cmd_buffer->state.dynamic.viewport.count < total_count)
477 cmd_buffer->state.dynamic.viewport.count = total_count;
479 memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
480 pViewports, viewportCount * sizeof(*pViewports));
482 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
485 void anv_CmdSetScissor(
486 VkCommandBuffer commandBuffer,
487 uint32_t firstScissor,
488 uint32_t scissorCount,
489 const VkRect2D* pScissors)
491 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
493 const uint32_t total_count = firstScissor + scissorCount;
494 if (cmd_buffer->state.dynamic.scissor.count < total_count)
495 cmd_buffer->state.dynamic.scissor.count = total_count;
497 memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
498 pScissors, scissorCount * sizeof(*pScissors));
500 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
503 void anv_CmdSetLineWidth(
504 VkCommandBuffer commandBuffer,
507 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
509 cmd_buffer->state.dynamic.line_width = lineWidth;
510 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
513 void anv_CmdSetDepthBias(
514 VkCommandBuffer commandBuffer,
515 float depthBiasConstantFactor,
516 float depthBiasClamp,
517 float depthBiasSlopeFactor)
519 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
521 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
522 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
523 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
525 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
528 void anv_CmdSetBlendConstants(
529 VkCommandBuffer commandBuffer,
530 const float blendConstants[4])
532 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
534 memcpy(cmd_buffer->state.dynamic.blend_constants,
535 blendConstants, sizeof(float) * 4);
537 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
540 void anv_CmdSetDepthBounds(
541 VkCommandBuffer commandBuffer,
542 float minDepthBounds,
543 float maxDepthBounds)
545 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
547 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
548 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
550 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
553 void anv_CmdSetStencilCompareMask(
554 VkCommandBuffer commandBuffer,
555 VkStencilFaceFlags faceMask,
556 uint32_t compareMask)
558 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
560 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
561 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
562 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
563 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
565 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
568 void anv_CmdSetStencilWriteMask(
569 VkCommandBuffer commandBuffer,
570 VkStencilFaceFlags faceMask,
573 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
575 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
576 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
577 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
578 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
580 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
583 void anv_CmdSetStencilReference(
584 VkCommandBuffer commandBuffer,
585 VkStencilFaceFlags faceMask,
588 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
590 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
591 cmd_buffer->state.dynamic.stencil_reference.front = reference;
592 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
593 cmd_buffer->state.dynamic.stencil_reference.back = reference;
595 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
598 void anv_CmdBindDescriptorSets(
599 VkCommandBuffer commandBuffer,
600 VkPipelineBindPoint pipelineBindPoint,
601 VkPipelineLayout _layout,
603 uint32_t descriptorSetCount,
604 const VkDescriptorSet* pDescriptorSets,
605 uint32_t dynamicOffsetCount,
606 const uint32_t* pDynamicOffsets)
608 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
609 ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
610 struct anv_descriptor_set_layout *set_layout;
612 assert(firstSet + descriptorSetCount < MAX_SETS);
614 uint32_t dynamic_slot = 0;
615 for (uint32_t i = 0; i < descriptorSetCount; i++) {
616 ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
617 set_layout = layout->set[firstSet + i].layout;
619 if (cmd_buffer->state.descriptors[firstSet + i] != set) {
620 cmd_buffer->state.descriptors[firstSet + i] = set;
621 cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
624 if (set_layout->dynamic_offset_count > 0) {
625 anv_foreach_stage(s, set_layout->shader_stages) {
626 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic);
628 struct anv_push_constants *push =
629 cmd_buffer->state.push_constants[s];
631 unsigned d = layout->set[firstSet + i].dynamic_offset_start;
632 const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
633 struct anv_descriptor *desc = set->descriptors;
635 for (unsigned b = 0; b < set_layout->binding_count; b++) {
636 if (set_layout->binding[b].dynamic_offset_index < 0)
639 unsigned array_size = set_layout->binding[b].array_size;
640 for (unsigned j = 0; j < array_size; j++) {
642 if (desc->buffer_view)
643 range = desc->buffer_view->range;
644 push->dynamic[d].offset = *(offsets++);
645 push->dynamic[d].range = range;
651 cmd_buffer->state.push_constants_dirty |= set_layout->shader_stages;
656 void anv_CmdBindVertexBuffers(
657 VkCommandBuffer commandBuffer,
658 uint32_t firstBinding,
659 uint32_t bindingCount,
660 const VkBuffer* pBuffers,
661 const VkDeviceSize* pOffsets)
663 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
664 struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
666 /* We have to defer setting up vertex buffer since we need the buffer
667 * stride from the pipeline. */
669 assert(firstBinding + bindingCount < MAX_VBS);
670 for (uint32_t i = 0; i < bindingCount; i++) {
671 vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
672 vb[firstBinding + i].offset = pOffsets[i];
673 cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
678 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
679 struct anv_state state, struct anv_bo *bo, uint32_t offset)
681 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
682 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
683 * the initial state to set the high bits to 0. */
685 const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
687 anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
688 state.offset + dword * 4, bo, offset);
691 const struct anv_format *
692 anv_format_for_descriptor_type(VkDescriptorType type)
695 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
696 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
697 return anv_format_for_vk_format(VK_FORMAT_R32G32B32A32_SFLOAT);
699 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
700 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
701 return anv_format_for_vk_format(VK_FORMAT_UNDEFINED);
704 unreachable("Invalid descriptor type");
708 static struct anv_state
709 anv_cmd_buffer_alloc_null_surface_state(struct anv_cmd_buffer *cmd_buffer,
710 struct anv_framebuffer *fb)
712 switch (cmd_buffer->device->info.gen) {
714 if (cmd_buffer->device->info.is_haswell) {
715 return gen75_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
717 return gen7_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
720 return gen8_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
722 return gen9_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
724 unreachable("Invalid hardware generation");
729 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
730 gl_shader_stage stage,
731 struct anv_state *bt_state)
733 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
734 struct anv_subpass *subpass = cmd_buffer->state.subpass;
735 struct anv_pipeline_bind_map *map;
736 uint32_t bias, state_offset;
739 case MESA_SHADER_FRAGMENT:
740 map = &cmd_buffer->state.pipeline->bindings[stage];
743 case MESA_SHADER_COMPUTE:
744 map = &cmd_buffer->state.compute_pipeline->bindings[stage];
748 map = &cmd_buffer->state.pipeline->bindings[stage];
753 if (bias + map->surface_count == 0) {
754 *bt_state = (struct anv_state) { 0, };
758 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
759 bias + map->surface_count,
761 uint32_t *bt_map = bt_state->map;
763 if (bt_state->map == NULL)
764 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
766 if (stage == MESA_SHADER_FRAGMENT) {
767 if (subpass->color_count == 0) {
768 struct anv_state null_surface =
769 anv_cmd_buffer_alloc_null_surface_state(cmd_buffer,
770 cmd_buffer->state.framebuffer);
771 bt_map[0] = null_surface.offset + state_offset;
773 for (uint32_t a = 0; a < subpass->color_count; a++) {
774 const struct anv_image_view *iview =
775 fb->attachments[subpass->color_attachments[a]];
777 assert(iview->color_rt_surface_state.alloc_size);
778 bt_map[a] = iview->color_rt_surface_state.offset + state_offset;
779 add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
780 iview->bo, iview->offset);
785 if (stage == MESA_SHADER_COMPUTE &&
786 get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
787 struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
788 uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
790 struct anv_state surface_state;
792 anv_cmd_buffer_alloc_surface_state(cmd_buffer);
794 const struct anv_format *format =
795 anv_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
796 anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
797 format->isl_format, bo_offset, 12, 1);
799 bt_map[0] = surface_state.offset + state_offset;
800 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
803 if (map->surface_count == 0)
806 if (map->image_count > 0) {
808 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
809 if (result != VK_SUCCESS)
812 cmd_buffer->state.push_constants_dirty |= 1 << stage;
816 for (uint32_t s = 0; s < map->surface_count; s++) {
817 struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
818 struct anv_descriptor_set *set =
819 cmd_buffer->state.descriptors[binding->set];
820 struct anv_descriptor *desc = &set->descriptors[binding->offset];
822 struct anv_state surface_state;
826 switch (desc->type) {
827 case VK_DESCRIPTOR_TYPE_SAMPLER:
828 /* Nothing for us to do here */
831 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
832 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
833 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
834 surface_state = desc->image_view->sampler_surface_state;
835 assert(surface_state.alloc_size);
836 bo = desc->image_view->bo;
837 bo_offset = desc->image_view->offset;
840 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
841 surface_state = desc->image_view->storage_surface_state;
842 assert(surface_state.alloc_size);
843 bo = desc->image_view->bo;
844 bo_offset = desc->image_view->offset;
846 struct brw_image_param *image_param =
847 &cmd_buffer->state.push_constants[stage]->images[image++];
849 *image_param = desc->image_view->storage_image_param;
850 image_param->surface_idx = bias + s;
854 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
855 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
856 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
857 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
858 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
859 surface_state = desc->buffer_view->surface_state;
860 assert(surface_state.alloc_size);
861 bo = desc->buffer_view->bo;
862 bo_offset = desc->buffer_view->offset;
865 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
866 surface_state = desc->buffer_view->storage_surface_state;
867 assert(surface_state.alloc_size);
868 bo = desc->buffer_view->bo;
869 bo_offset = desc->buffer_view->offset;
871 struct brw_image_param *image_param =
872 &cmd_buffer->state.push_constants[stage]->images[image++];
874 *image_param = desc->buffer_view->storage_image_param;
875 image_param->surface_idx = bias + s;
879 assert(!"Invalid descriptor type");
883 bt_map[bias + s] = surface_state.offset + state_offset;
884 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
886 assert(image == map->image_count);
889 if (!cmd_buffer->device->info.has_llc)
890 anv_state_clflush(*bt_state);
896 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
897 gl_shader_stage stage, struct anv_state *state)
899 struct anv_pipeline_bind_map *map;
901 if (stage == MESA_SHADER_COMPUTE)
902 map = &cmd_buffer->state.compute_pipeline->bindings[stage];
904 map = &cmd_buffer->state.pipeline->bindings[stage];
906 if (map->sampler_count == 0) {
907 *state = (struct anv_state) { 0, };
911 uint32_t size = map->sampler_count * 16;
912 *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
914 if (state->map == NULL)
915 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
917 for (uint32_t s = 0; s < map->sampler_count; s++) {
918 struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
919 struct anv_descriptor_set *set =
920 cmd_buffer->state.descriptors[binding->set];
921 struct anv_descriptor *desc = &set->descriptors[binding->offset];
923 if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
924 desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
927 struct anv_sampler *sampler = desc->sampler;
929 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
930 * happens to be zero.
935 memcpy(state->map + (s * 16),
936 sampler->state, sizeof(sampler->state));
939 if (!cmd_buffer->device->info.has_llc)
940 anv_state_clflush(*state);
946 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
947 const void *data, uint32_t size, uint32_t alignment)
949 struct anv_state state;
951 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
952 memcpy(state.map, data, size);
954 if (!cmd_buffer->device->info.has_llc)
955 anv_state_clflush(state);
957 VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
963 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
964 uint32_t *a, uint32_t *b,
965 uint32_t dwords, uint32_t alignment)
967 struct anv_state state;
970 state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
971 dwords * 4, alignment);
973 for (uint32_t i = 0; i < dwords; i++)
976 if (!cmd_buffer->device->info.has_llc)
977 anv_state_clflush(state);
979 VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
985 * @brief Setup the command buffer for recording commands inside the given
988 * This does not record all commands needed for starting the subpass.
989 * Starting the subpass may require additional commands.
991 * Note that vkCmdBeginRenderPass, vkCmdNextSubpass, and vkBeginCommandBuffer
992 * with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, all setup the
993 * command buffer for recording commands for some subpass. But only the first
994 * two, vkCmdBeginRenderPass and vkCmdNextSubpass, can start a subpass.
997 anv_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
998 struct anv_subpass *subpass)
1000 switch (cmd_buffer->device->info.gen) {
1002 if (cmd_buffer->device->info.is_haswell) {
1003 gen75_cmd_buffer_set_subpass(cmd_buffer, subpass);
1005 gen7_cmd_buffer_set_subpass(cmd_buffer, subpass);
1009 gen8_cmd_buffer_set_subpass(cmd_buffer, subpass);
1012 gen9_cmd_buffer_set_subpass(cmd_buffer, subpass);
1015 unreachable("unsupported gen\n");
1020 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1021 gl_shader_stage stage)
1023 struct anv_push_constants *data =
1024 cmd_buffer->state.push_constants[stage];
1025 const struct brw_stage_prog_data *prog_data =
1026 cmd_buffer->state.pipeline->prog_data[stage];
1028 /* If we don't actually have any push constants, bail. */
1029 if (data == NULL || prog_data->nr_params == 0)
1030 return (struct anv_state) { .offset = 0 };
1032 struct anv_state state =
1033 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1034 prog_data->nr_params * sizeof(float),
1035 32 /* bottom 5 bits MBZ */);
1037 /* Walk through the param array and fill the buffer with data */
1038 uint32_t *u32_map = state.map;
1039 for (unsigned i = 0; i < prog_data->nr_params; i++) {
1040 uint32_t offset = (uintptr_t)prog_data->param[i];
1041 u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
1044 if (!cmd_buffer->device->info.has_llc)
1045 anv_state_clflush(state);
1051 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
1053 struct anv_push_constants *data =
1054 cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
1055 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1056 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1057 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1059 const unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
1060 const unsigned push_constant_data_size =
1061 (local_id_dwords + prog_data->nr_params) * 4;
1062 const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
1063 const unsigned param_aligned_count =
1064 reg_aligned_constant_size / sizeof(uint32_t);
1066 /* If we don't actually have any push constants, bail. */
1067 if (reg_aligned_constant_size == 0)
1068 return (struct anv_state) { .offset = 0 };
1070 const unsigned threads = pipeline->cs_thread_width_max;
1071 const unsigned total_push_constants_size =
1072 reg_aligned_constant_size * threads;
1073 const unsigned push_constant_alignment =
1074 cmd_buffer->device->info.gen < 8 ? 32 : 64;
1075 const unsigned aligned_total_push_constants_size =
1076 ALIGN(total_push_constants_size, push_constant_alignment);
1077 struct anv_state state =
1078 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1079 aligned_total_push_constants_size,
1080 push_constant_alignment);
1082 /* Walk through the param array and fill the buffer with data */
1083 uint32_t *u32_map = state.map;
1085 brw_cs_fill_local_id_payload(cs_prog_data, u32_map, threads,
1086 reg_aligned_constant_size);
1088 /* Setup uniform data for the first thread */
1089 for (unsigned i = 0; i < prog_data->nr_params; i++) {
1090 uint32_t offset = (uintptr_t)prog_data->param[i];
1091 u32_map[local_id_dwords + i] = *(uint32_t *)((uint8_t *)data + offset);
1094 /* Copy uniform data from the first thread to every other thread */
1095 const size_t uniform_data_size = prog_data->nr_params * sizeof(uint32_t);
1096 for (unsigned t = 1; t < threads; t++) {
1097 memcpy(&u32_map[t * param_aligned_count + local_id_dwords],
1098 &u32_map[local_id_dwords],
1102 if (!cmd_buffer->device->info.has_llc)
1103 anv_state_clflush(state);
1108 void anv_CmdPushConstants(
1109 VkCommandBuffer commandBuffer,
1110 VkPipelineLayout layout,
1111 VkShaderStageFlags stageFlags,
1114 const void* pValues)
1116 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1118 anv_foreach_stage(stage, stageFlags) {
1119 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data);
1121 memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
1125 cmd_buffer->state.push_constants_dirty |= stageFlags;
1128 void anv_CmdExecuteCommands(
1129 VkCommandBuffer commandBuffer,
1130 uint32_t commandBufferCount,
1131 const VkCommandBuffer* pCmdBuffers)
1133 ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
1135 assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1137 for (uint32_t i = 0; i < commandBufferCount; i++) {
1138 ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
1140 assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1142 anv_cmd_buffer_add_secondary(primary, secondary);
1146 VkResult anv_CreateCommandPool(
1148 const VkCommandPoolCreateInfo* pCreateInfo,
1149 const VkAllocationCallbacks* pAllocator,
1150 VkCommandPool* pCmdPool)
1152 ANV_FROM_HANDLE(anv_device, device, _device);
1153 struct anv_cmd_pool *pool;
1155 pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
1156 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1158 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1161 pool->alloc = *pAllocator;
1163 pool->alloc = device->alloc;
1165 list_inithead(&pool->cmd_buffers);
1167 *pCmdPool = anv_cmd_pool_to_handle(pool);
1172 void anv_DestroyCommandPool(
1174 VkCommandPool commandPool,
1175 const VkAllocationCallbacks* pAllocator)
1177 ANV_FROM_HANDLE(anv_device, device, _device);
1178 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1180 anv_ResetCommandPool(_device, commandPool, 0);
1182 anv_free2(&device->alloc, pAllocator, pool);
1185 VkResult anv_ResetCommandPool(
1187 VkCommandPool commandPool,
1188 VkCommandPoolResetFlags flags)
1190 ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1192 /* FIXME: vkResetCommandPool must not destroy its command buffers. The
1193 * Vulkan 1.0 spec requires that it only reset them:
1195 * Resetting a command pool recycles all of the resources from all of
1196 * the command buffers allocated from the command pool back to the
1197 * command pool. All command buffers that have been allocated from the
1198 * command pool are put in the initial state.
1200 list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
1201 &pool->cmd_buffers, pool_link) {
1202 anv_cmd_buffer_destroy(cmd_buffer);
1209 * Return NULL if the current subpass has no depthstencil attachment.
1211 const struct anv_image_view *
1212 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
1214 const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1215 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1217 if (subpass->depth_stencil_attachment == VK_ATTACHMENT_UNUSED)
1220 const struct anv_image_view *iview =
1221 fb->attachments[subpass->depth_stencil_attachment];
1223 assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
1224 VK_IMAGE_ASPECT_STENCIL_BIT));