#include "anv_private.h"
+#include "vk_format_info.h"
+
/** \file anv_cmd_buffer.c
*
* This file contains all of the stuff for emitting commands into a command
}
static void
-anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
+anv_cmd_state_init(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_cmd_state *state = &cmd_buffer->state;
- memset(&state->descriptors, 0, sizeof(state->descriptors));
- memset(&state->push_constants, 0, sizeof(state->push_constants));
- memset(state->binding_tables, 0, sizeof(state->binding_tables));
- memset(state->samplers, 0, sizeof(state->samplers));
-
- /* 0 isn't a valid config. This ensures that we always configure L3$. */
- cmd_buffer->state.current_l3_config = 0;
+ memset(state, 0, sizeof(*state));
- state->dirty = 0;
- state->vb_dirty = 0;
- state->descriptors_dirty = 0;
- state->push_constants_dirty = 0;
- state->pipeline = NULL;
+ state->current_pipeline = UINT32_MAX;
state->restart_index = UINT32_MAX;
- state->dynamic = default_dynamic_state;
- state->need_query_wa = true;
-
- if (state->attachments != NULL) {
- anv_free(&cmd_buffer->pool->alloc, state->attachments);
- state->attachments = NULL;
- }
+ state->gfx.dynamic = default_dynamic_state;
+}
- state->gen7.index_buffer = NULL;
+static void
+anv_cmd_pipeline_state_finish(struct anv_cmd_buffer *cmd_buffer,
+ struct anv_cmd_pipeline_state *pipe_state)
+{
+ for (uint32_t i = 0; i < ARRAY_SIZE(pipe_state->push_descriptors); i++)
+ vk_free(&cmd_buffer->pool->alloc, pipe_state->push_descriptors[i]);
}
-/**
- * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
- */
-void
-anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
- const VkRenderPassBeginInfo *info)
+static void
+anv_cmd_state_finish(struct anv_cmd_buffer *cmd_buffer)
{
struct anv_cmd_state *state = &cmd_buffer->state;
- ANV_FROM_HANDLE(anv_render_pass, pass, info->renderPass);
-
- anv_free(&cmd_buffer->pool->alloc, state->attachments);
-
- if (pass->attachment_count == 0) {
- state->attachments = NULL;
- return;
- }
- state->attachments = anv_alloc(&cmd_buffer->pool->alloc,
- pass->attachment_count *
- sizeof(state->attachments[0]),
- 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (state->attachments == NULL) {
- /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
- abort();
- }
+ anv_cmd_pipeline_state_finish(cmd_buffer, &state->gfx.base);
+ anv_cmd_pipeline_state_finish(cmd_buffer, &state->compute.base);
- for (uint32_t i = 0; i < pass->attachment_count; ++i) {
- struct anv_render_pass_attachment *att = &pass->attachments[i];
- VkImageAspectFlags clear_aspects = 0;
+ for (uint32_t i = 0; i < MESA_SHADER_STAGES; i++)
+ vk_free(&cmd_buffer->pool->alloc, state->push_constants[i]);
- if (anv_format_is_color(att->format)) {
- /* color attachment */
- if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
- clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
- }
- } else {
- /* depthstencil attachment */
- if (att->format->has_depth &&
- att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
- clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
- }
- if (att->format->has_stencil &&
- att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
- clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
- }
- }
+ vk_free(&cmd_buffer->pool->alloc, state->attachments);
+}
- state->attachments[i].pending_clear_aspects = clear_aspects;
- if (clear_aspects) {
- assert(info->clearValueCount > i);
- state->attachments[i].clear_value = info->pClearValues[i];
- }
- }
+static void
+anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
+{
+ anv_cmd_state_finish(cmd_buffer);
+ anv_cmd_state_init(cmd_buffer);
}
-static VkResult
+/**
+ * This function updates the size of the push constant buffer we need to emit.
+ * This is called in various parts of the driver to ensure that different
+ * pieces of push constant data get emitted as needed. However, it is important
+ * that we never shrink the size of the buffer. For example, a compute shader
+ * dispatch will always call this for the base group id, which has an
+ * offset in the push constant buffer that is smaller than the offset for
+ * storage image data. If the compute shader has storage images, we will call
+ * this again with a larger size during binding table emission. However,
+ * if we dispatch the compute shader again without dirtying our descriptors,
+ * we would still call this function with a smaller size for the base group
+ * id, and not for the images, which would incorrectly shrink the size of the
+ * push constant data we emit with that dispatch, making us drop the image data.
+ */
+VkResult
anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage, uint32_t size)
{
struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
if (*ptr == NULL) {
- *ptr = anv_alloc(&cmd_buffer->pool->alloc, size, 8,
+ *ptr = vk_alloc(&cmd_buffer->pool->alloc, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (*ptr == NULL)
+ if (*ptr == NULL) {
+ anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ (*ptr)->size = size;
} else if ((*ptr)->size < size) {
- *ptr = anv_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
+ *ptr = vk_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (*ptr == NULL)
+ if (*ptr == NULL) {
+ anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ (*ptr)->size = size;
}
- (*ptr)->size = size;
return VK_SUCCESS;
}
-#define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
- anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
- (offsetof(struct anv_push_constants, field) + \
- sizeof(cmd_buffer->state.push_constants[0]->field)))
-
static VkResult anv_create_cmd_buffer(
struct anv_device * device,
struct anv_cmd_pool * pool,
struct anv_cmd_buffer *cmd_buffer;
VkResult result;
- cmd_buffer = anv_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
+ cmd_buffer = vk_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (cmd_buffer == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ cmd_buffer->batch.status = VK_SUCCESS;
+
cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
cmd_buffer->device = device;
cmd_buffer->pool = pool;
cmd_buffer->level = level;
- cmd_buffer->state.attachments = NULL;
result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
if (result != VK_SUCCESS)
goto fail;
anv_state_stream_init(&cmd_buffer->surface_state_stream,
- &device->surface_state_block_pool);
+ &device->surface_state_pool, 4096);
anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
- &device->dynamic_state_block_pool);
+ &device->dynamic_state_pool, 16384);
+
+ anv_cmd_state_init(cmd_buffer);
if (pool) {
list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
return VK_SUCCESS;
fail:
- anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
+ vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
return result;
}
break;
}
- if (result != VK_SUCCESS)
+ if (result != VK_SUCCESS) {
anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
i, pCommandBuffers);
+ for (i = 0; i < pAllocateInfo->commandBufferCount; i++)
+ pCommandBuffers[i] = VK_NULL_HANDLE;
+ }
return result;
}
anv_state_stream_finish(&cmd_buffer->surface_state_stream);
anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
- anv_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
- anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
+ anv_cmd_state_finish(cmd_buffer);
+
+ vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
}
void anv_FreeCommandBuffers(
for (uint32_t i = 0; i < commandBufferCount; i++) {
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
+ if (!cmd_buffer)
+ continue;
+
anv_cmd_buffer_destroy(cmd_buffer);
}
}
-VkResult anv_ResetCommandBuffer(
- VkCommandBuffer commandBuffer,
- VkCommandBufferResetFlags flags)
+VkResult
+anv_cmd_buffer_reset(struct anv_cmd_buffer *cmd_buffer)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
-
cmd_buffer->usage_flags = 0;
- cmd_buffer->state.current_pipeline = UINT32_MAX;
anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
anv_cmd_state_reset(cmd_buffer);
anv_state_stream_finish(&cmd_buffer->surface_state_stream);
anv_state_stream_init(&cmd_buffer->surface_state_stream,
- &cmd_buffer->device->surface_state_block_pool);
+ &cmd_buffer->device->surface_state_pool, 4096);
anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
- &cmd_buffer->device->dynamic_state_block_pool);
-
+ &cmd_buffer->device->dynamic_state_pool, 16384);
return VK_SUCCESS;
}
-void
-anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
-{
- switch (cmd_buffer->device->info.gen) {
- case 7:
- if (cmd_buffer->device->info.is_haswell)
- return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
- else
- return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
- case 8:
- return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
- case 9:
- return gen9_cmd_buffer_emit_state_base_address(cmd_buffer);
- default:
- unreachable("unsupported gen\n");
- }
-}
-
-VkResult anv_BeginCommandBuffer(
+VkResult anv_ResetCommandBuffer(
VkCommandBuffer commandBuffer,
- const VkCommandBufferBeginInfo* pBeginInfo)
+ VkCommandBufferResetFlags flags)
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ return anv_cmd_buffer_reset(cmd_buffer);
+}
- /* If this is the first vkBeginCommandBuffer, we must *initialize* the
- * command buffer's state. Otherwise, we must *reset* its state. In both
- * cases we reset it.
- *
- * From the Vulkan 1.0 spec:
- *
- * If a command buffer is in the executable state and the command buffer
- * was allocated from a command pool with the
- * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
- * vkBeginCommandBuffer implicitly resets the command buffer, behaving
- * as if vkResetCommandBuffer had been called with
- * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
- * the command buffer in the recording state.
- */
- anv_ResetCommandBuffer(commandBuffer, /*flags*/ 0);
-
- cmd_buffer->usage_flags = pBeginInfo->flags;
-
- assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
- !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
-
- anv_cmd_buffer_emit_state_base_address(cmd_buffer);
-
- if (cmd_buffer->usage_flags &
- VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
- cmd_buffer->state.framebuffer =
- anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
- cmd_buffer->state.pass =
- anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
-
- struct anv_subpass *subpass =
- &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
-
- anv_cmd_buffer_set_subpass(cmd_buffer, subpass);
+#define anv_genX_call(devinfo, func, ...) \
+ switch ((devinfo)->gen) { \
+ case 7: \
+ if ((devinfo)->is_haswell) { \
+ gen75_##func(__VA_ARGS__); \
+ } else { \
+ gen7_##func(__VA_ARGS__); \
+ } \
+ break; \
+ case 8: \
+ gen8_##func(__VA_ARGS__); \
+ break; \
+ case 9: \
+ gen9_##func(__VA_ARGS__); \
+ break; \
+ case 10: \
+ gen10_##func(__VA_ARGS__); \
+ break; \
+ case 11: \
+ gen11_##func(__VA_ARGS__); \
+ break; \
+ default: \
+ assert(!"Unknown hardware generation"); \
}
- return VK_SUCCESS;
+void
+anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
+{
+ anv_genX_call(&cmd_buffer->device->info,
+ cmd_buffer_emit_state_base_address,
+ cmd_buffer);
}
-VkResult anv_EndCommandBuffer(
- VkCommandBuffer commandBuffer)
+void
+anv_cmd_buffer_mark_image_written(struct anv_cmd_buffer *cmd_buffer,
+ const struct anv_image *image,
+ VkImageAspectFlagBits aspect,
+ enum isl_aux_usage aux_usage,
+ uint32_t level,
+ uint32_t base_layer,
+ uint32_t layer_count)
{
- ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- struct anv_device *device = cmd_buffer->device;
-
- anv_cmd_buffer_end_batch_buffer(cmd_buffer);
-
- if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
- /* The algorithm used to compute the validate list is not threadsafe as
- * it uses the bo->index field. We have to lock the device around it.
- * Fortunately, the chances for contention here are probably very low.
- */
- pthread_mutex_lock(&device->mutex);
- anv_cmd_buffer_prepare_execbuf(cmd_buffer);
- pthread_mutex_unlock(&device->mutex);
- }
+ anv_genX_call(&cmd_buffer->device->info,
+ cmd_buffer_mark_image_written,
+ cmd_buffer, image, aspect, aux_usage,
+ level, base_layer, layer_count);
+}
- return VK_SUCCESS;
+void
+anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer)
+{
+ anv_genX_call(&cmd_buffer->device->info,
+ cmd_emit_conditional_render_predicate,
+ cmd_buffer);
}
void anv_CmdBindPipeline(
switch (pipelineBindPoint) {
case VK_PIPELINE_BIND_POINT_COMPUTE:
- cmd_buffer->state.compute_pipeline = pipeline;
- cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
+ cmd_buffer->state.compute.base.pipeline = pipeline;
+ cmd_buffer->state.compute.pipeline_dirty = true;
cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
break;
case VK_PIPELINE_BIND_POINT_GRAPHICS:
- cmd_buffer->state.pipeline = pipeline;
- cmd_buffer->state.vb_dirty |= pipeline->vb_used;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
+ cmd_buffer->state.gfx.base.pipeline = pipeline;
+ cmd_buffer->state.gfx.vb_dirty |= pipeline->vb_used;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
/* Apply the dynamic state from the pipeline */
- cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
- anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
+ cmd_buffer->state.gfx.dirty |= pipeline->dynamic_state_mask;
+ anv_dynamic_state_copy(&cmd_buffer->state.gfx.dynamic,
&pipeline->dynamic_state,
pipeline->dynamic_state_mask);
break;
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
const uint32_t total_count = firstViewport + viewportCount;
- if (cmd_buffer->state.dynamic.viewport.count < total_count)
- cmd_buffer->state.dynamic.viewport.count = total_count;
+ if (cmd_buffer->state.gfx.dynamic.viewport.count < total_count)
+ cmd_buffer->state.gfx.dynamic.viewport.count = total_count;
- memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
+ memcpy(cmd_buffer->state.gfx.dynamic.viewport.viewports + firstViewport,
pViewports, viewportCount * sizeof(*pViewports));
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
}
void anv_CmdSetScissor(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
const uint32_t total_count = firstScissor + scissorCount;
- if (cmd_buffer->state.dynamic.scissor.count < total_count)
- cmd_buffer->state.dynamic.scissor.count = total_count;
+ if (cmd_buffer->state.gfx.dynamic.scissor.count < total_count)
+ cmd_buffer->state.gfx.dynamic.scissor.count = total_count;
- memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
+ memcpy(cmd_buffer->state.gfx.dynamic.scissor.scissors + firstScissor,
pScissors, scissorCount * sizeof(*pScissors));
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
}
void anv_CmdSetLineWidth(
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- cmd_buffer->state.dynamic.line_width = lineWidth;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
+ cmd_buffer->state.gfx.dynamic.line_width = lineWidth;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
}
void anv_CmdSetDepthBias(
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
- cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
- cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
+ cmd_buffer->state.gfx.dynamic.depth_bias.bias = depthBiasConstantFactor;
+ cmd_buffer->state.gfx.dynamic.depth_bias.clamp = depthBiasClamp;
+ cmd_buffer->state.gfx.dynamic.depth_bias.slope = depthBiasSlopeFactor;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
}
void anv_CmdSetBlendConstants(
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- memcpy(cmd_buffer->state.dynamic.blend_constants,
+ memcpy(cmd_buffer->state.gfx.dynamic.blend_constants,
blendConstants, sizeof(float) * 4);
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
}
void anv_CmdSetDepthBounds(
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
- cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
- cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
+ cmd_buffer->state.gfx.dynamic.depth_bounds.min = minDepthBounds;
+ cmd_buffer->state.gfx.dynamic.depth_bounds.max = maxDepthBounds;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
}
void anv_CmdSetStencilCompareMask(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
- cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
+ cmd_buffer->state.gfx.dynamic.stencil_compare_mask.front = compareMask;
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
- cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
+ cmd_buffer->state.gfx.dynamic.stencil_compare_mask.back = compareMask;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
}
void anv_CmdSetStencilWriteMask(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
- cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
+ cmd_buffer->state.gfx.dynamic.stencil_write_mask.front = writeMask;
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
- cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
+ cmd_buffer->state.gfx.dynamic.stencil_write_mask.back = writeMask;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
}
void anv_CmdSetStencilReference(
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
- cmd_buffer->state.dynamic.stencil_reference.front = reference;
+ cmd_buffer->state.gfx.dynamic.stencil_reference.front = reference;
if (faceMask & VK_STENCIL_FACE_BACK_BIT)
- cmd_buffer->state.dynamic.stencil_reference.back = reference;
+ cmd_buffer->state.gfx.dynamic.stencil_reference.back = reference;
- cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
+ cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
+}
+
+static void
+anv_cmd_buffer_bind_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point,
+ struct anv_pipeline_layout *layout,
+ uint32_t set_index,
+ struct anv_descriptor_set *set,
+ uint32_t *dynamic_offset_count,
+ const uint32_t **dynamic_offsets)
+{
+ struct anv_descriptor_set_layout *set_layout =
+ layout->set[set_index].layout;
+
+ struct anv_cmd_pipeline_state *pipe_state;
+ if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
+ pipe_state = &cmd_buffer->state.compute.base;
+ } else {
+ assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
+ pipe_state = &cmd_buffer->state.gfx.base;
+ }
+ pipe_state->descriptors[set_index] = set;
+
+ if (dynamic_offsets) {
+ if (set_layout->dynamic_offset_count > 0) {
+ uint32_t dynamic_offset_start =
+ layout->set[set_index].dynamic_offset_start;
+
+ /* Assert that everything is in range */
+ assert(set_layout->dynamic_offset_count <= *dynamic_offset_count);
+ assert(dynamic_offset_start + set_layout->dynamic_offset_count <=
+ ARRAY_SIZE(pipe_state->dynamic_offsets));
+
+ typed_memcpy(&pipe_state->dynamic_offsets[dynamic_offset_start],
+ *dynamic_offsets, set_layout->dynamic_offset_count);
+
+ *dynamic_offsets += set_layout->dynamic_offset_count;
+ *dynamic_offset_count -= set_layout->dynamic_offset_count;
+ }
+ }
+
+ if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
+ cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
+ } else {
+ assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
+ cmd_buffer->state.descriptors_dirty |=
+ set_layout->shader_stages & VK_SHADER_STAGE_ALL_GRAPHICS;
+ }
+
+ /* Pipeline layout objects are required to live at least while any command
+ * buffers that use them are in recording state. We need to grab a reference
+ * to the pipeline layout being bound here so we can compute correct dynamic
+ * offsets for VK_DESCRIPTOR_TYPE_*_DYNAMIC in dynamic_offset_for_binding()
+ * when we record draw commands that come after this.
+ */
+ pipe_state->layout = layout;
}
void anv_CmdBindDescriptorSets(
{
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
- struct anv_descriptor_set_layout *set_layout;
- assert(firstSet + descriptorSetCount < MAX_SETS);
+ assert(firstSet + descriptorSetCount <= MAX_SETS);
- uint32_t dynamic_slot = 0;
for (uint32_t i = 0; i < descriptorSetCount; i++) {
ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
- set_layout = layout->set[firstSet + i].layout;
-
- if (cmd_buffer->state.descriptors[firstSet + i] != set) {
- cmd_buffer->state.descriptors[firstSet + i] = set;
- cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
- }
-
- if (set_layout->dynamic_offset_count > 0) {
- anv_foreach_stage(s, set_layout->shader_stages) {
- anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic);
-
- struct anv_push_constants *push =
- cmd_buffer->state.push_constants[s];
-
- unsigned d = layout->set[firstSet + i].dynamic_offset_start;
- const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
- struct anv_descriptor *desc = set->descriptors;
-
- for (unsigned b = 0; b < set_layout->binding_count; b++) {
- if (set_layout->binding[b].dynamic_offset_index < 0)
- continue;
-
- unsigned array_size = set_layout->binding[b].array_size;
- for (unsigned j = 0; j < array_size; j++) {
- uint32_t range = 0;
- if (desc->buffer_view)
- range = desc->buffer_view->range;
- push->dynamic[d].offset = *(offsets++);
- push->dynamic[d].range = range;
- desc++;
- d++;
- }
- }
- }
- cmd_buffer->state.push_constants_dirty |= set_layout->shader_stages;
- }
+ anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
+ layout, firstSet + i, set,
+ &dynamicOffsetCount,
+ &pDynamicOffsets);
}
}
/* We have to defer setting up vertex buffer since we need the buffer
* stride from the pipeline. */
- assert(firstBinding + bindingCount < MAX_VBS);
+ assert(firstBinding + bindingCount <= MAX_VBS);
for (uint32_t i = 0; i < bindingCount; i++) {
vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
vb[firstBinding + i].offset = pOffsets[i];
- cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
+ cmd_buffer->state.gfx.vb_dirty |= 1 << (firstBinding + i);
}
}
-static void
-add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
- struct anv_state state, struct anv_bo *bo, uint32_t offset)
+void anv_CmdBindTransformFeedbackBuffersEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets,
+ const VkDeviceSize* pSizes)
{
- /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
- * 9 for gen8+. We only write the first dword for gen8+ here and rely on
- * the initial state to set the high bits to 0. */
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct anv_xfb_binding *xfb = cmd_buffer->state.xfb_bindings;
- const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
+ /* We have to defer setting up vertex buffer since we need the buffer
+ * stride from the pipeline. */
- anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
- state.offset + dword * 4, bo, offset);
+ assert(firstBinding + bindingCount <= MAX_XFB_BUFFERS);
+ for (uint32_t i = 0; i < bindingCount; i++) {
+ if (pBuffers[i] == VK_NULL_HANDLE) {
+ xfb[firstBinding + i].buffer = NULL;
+ } else {
+ ANV_FROM_HANDLE(anv_buffer, buffer, pBuffers[i]);
+ xfb[firstBinding + i].buffer = buffer;
+ xfb[firstBinding + i].offset = pOffsets[i];
+ xfb[firstBinding + i].size =
+ anv_buffer_get_range(buffer, pOffsets[i],
+ pSizes ? pSizes[i] : VK_WHOLE_SIZE);
+ }
+ }
}
-const struct anv_format *
-anv_format_for_descriptor_type(VkDescriptorType type)
+enum isl_format
+anv_isl_format_for_descriptor_type(VkDescriptorType type)
{
switch (type) {
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- return anv_format_for_vk_format(VK_FORMAT_R32G32B32A32_SFLOAT);
+ return ISL_FORMAT_R32G32B32A32_FLOAT;
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- return anv_format_for_vk_format(VK_FORMAT_UNDEFINED);
+ return ISL_FORMAT_RAW;
default:
unreachable("Invalid descriptor type");
}
}
-static struct anv_state
-anv_cmd_buffer_alloc_null_surface_state(struct anv_cmd_buffer *cmd_buffer,
- struct anv_framebuffer *fb)
-{
- switch (cmd_buffer->device->info.gen) {
- case 7:
- if (cmd_buffer->device->info.is_haswell) {
- return gen75_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
- } else {
- return gen7_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
- }
- case 8:
- return gen8_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
- case 9:
- return gen9_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
- default:
- unreachable("Invalid hardware generation");
- }
-}
-
-VkResult
-anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
- gl_shader_stage stage,
- struct anv_state *bt_state)
-{
- struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
- struct anv_subpass *subpass = cmd_buffer->state.subpass;
- struct anv_pipeline_bind_map *map;
- uint32_t bias, state_offset;
-
- switch (stage) {
- case MESA_SHADER_COMPUTE:
- map = &cmd_buffer->state.compute_pipeline->bindings[stage];
- bias = 1;
- break;
- default:
- map = &cmd_buffer->state.pipeline->bindings[stage];
- bias = 0;
- break;
- }
-
- if (bias + map->surface_count == 0) {
- *bt_state = (struct anv_state) { 0, };
- return VK_SUCCESS;
- }
-
- *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
- bias + map->surface_count,
- &state_offset);
- uint32_t *bt_map = bt_state->map;
-
- if (bt_state->map == NULL)
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
- if (stage == MESA_SHADER_COMPUTE &&
- get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
- struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
- uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
-
- struct anv_state surface_state;
- surface_state =
- anv_cmd_buffer_alloc_surface_state(cmd_buffer);
-
- const struct anv_format *format =
- anv_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
- anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
- format->isl_format, bo_offset, 12, 1);
-
- bt_map[0] = surface_state.offset + state_offset;
- add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
- }
-
- if (map->surface_count == 0)
- goto out;
-
- if (map->image_count > 0) {
- VkResult result =
- anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
- if (result != VK_SUCCESS)
- return result;
-
- cmd_buffer->state.push_constants_dirty |= 1 << stage;
- }
-
- uint32_t image = 0;
- for (uint32_t s = 0; s < map->surface_count; s++) {
- struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
-
- struct anv_state surface_state;
- struct anv_bo *bo;
- uint32_t bo_offset;
-
- if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) {
- /* Color attachment binding */
- assert(stage == MESA_SHADER_FRAGMENT);
- if (binding->offset < subpass->color_count) {
- const struct anv_image_view *iview =
- fb->attachments[subpass->color_attachments[binding->offset]];
-
- assert(iview->color_rt_surface_state.alloc_size);
- surface_state = iview->color_rt_surface_state;
- add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
- iview->bo, iview->offset);
- } else {
- /* Null render target */
- struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
- surface_state =
- anv_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
- }
-
- bt_map[bias + s] = surface_state.offset + state_offset;
- continue;
- }
-
- struct anv_descriptor_set *set =
- cmd_buffer->state.descriptors[binding->set];
- struct anv_descriptor *desc = &set->descriptors[binding->offset];
-
- switch (desc->type) {
- case VK_DESCRIPTOR_TYPE_SAMPLER:
- /* Nothing for us to do here */
- continue;
-
- case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
- case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
- case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
- surface_state = desc->image_view->sampler_surface_state;
- assert(surface_state.alloc_size);
- bo = desc->image_view->bo;
- bo_offset = desc->image_view->offset;
- break;
-
- case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
- surface_state = desc->image_view->storage_surface_state;
- assert(surface_state.alloc_size);
- bo = desc->image_view->bo;
- bo_offset = desc->image_view->offset;
-
- struct brw_image_param *image_param =
- &cmd_buffer->state.push_constants[stage]->images[image++];
-
- *image_param = desc->image_view->storage_image_param;
- image_param->surface_idx = bias + s;
- break;
- }
-
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
- case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
- case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
- surface_state = desc->buffer_view->surface_state;
- assert(surface_state.alloc_size);
- bo = desc->buffer_view->bo;
- bo_offset = desc->buffer_view->offset;
- break;
-
- case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
- surface_state = desc->buffer_view->storage_surface_state;
- assert(surface_state.alloc_size);
- bo = desc->buffer_view->bo;
- bo_offset = desc->buffer_view->offset;
-
- struct brw_image_param *image_param =
- &cmd_buffer->state.push_constants[stage]->images[image++];
-
- *image_param = desc->buffer_view->storage_image_param;
- image_param->surface_idx = bias + s;
- break;
-
- default:
- assert(!"Invalid descriptor type");
- continue;
- }
-
- bt_map[bias + s] = surface_state.offset + state_offset;
- add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
- }
- assert(image == map->image_count);
-
- out:
- if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(*bt_state);
-
- return VK_SUCCESS;
-}
-
-VkResult
-anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
- gl_shader_stage stage, struct anv_state *state)
-{
- struct anv_pipeline_bind_map *map;
-
- if (stage == MESA_SHADER_COMPUTE)
- map = &cmd_buffer->state.compute_pipeline->bindings[stage];
- else
- map = &cmd_buffer->state.pipeline->bindings[stage];
-
- if (map->sampler_count == 0) {
- *state = (struct anv_state) { 0, };
- return VK_SUCCESS;
- }
-
- uint32_t size = map->sampler_count * 16;
- *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
-
- if (state->map == NULL)
- return VK_ERROR_OUT_OF_DEVICE_MEMORY;
-
- for (uint32_t s = 0; s < map->sampler_count; s++) {
- struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
- struct anv_descriptor_set *set =
- cmd_buffer->state.descriptors[binding->set];
- struct anv_descriptor *desc = &set->descriptors[binding->offset];
-
- if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
- desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
- continue;
-
- struct anv_sampler *sampler = desc->sampler;
-
- /* This can happen if we have an unfilled slot since TYPE_SAMPLER
- * happens to be zero.
- */
- if (sampler == NULL)
- continue;
-
- memcpy(state->map + (s * 16),
- sampler->state, sizeof(sampler->state));
- }
-
- if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(*state);
-
- return VK_SUCCESS;
-}
-
struct anv_state
anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
const void *data, uint32_t size, uint32_t alignment)
state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
memcpy(state.map, data, size);
- if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(state);
-
VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
return state;
for (uint32_t i = 0; i < dwords; i++)
p[i] = a[i] | b[i];
- if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(state);
-
VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
return state;
}
-/**
- * @brief Setup the command buffer for recording commands inside the given
- * subpass.
- *
- * This does not record all commands needed for starting the subpass.
- * Starting the subpass may require additional commands.
- *
- * Note that vkCmdBeginRenderPass, vkCmdNextSubpass, and vkBeginCommandBuffer
- * with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, all setup the
- * command buffer for recording commands for some subpass. But only the first
- * two, vkCmdBeginRenderPass and vkCmdNextSubpass, can start a subpass.
- */
-void
-anv_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
- struct anv_subpass *subpass)
+static uint32_t
+anv_push_constant_value(struct anv_push_constants *data, uint32_t param)
{
- switch (cmd_buffer->device->info.gen) {
- case 7:
- if (cmd_buffer->device->info.is_haswell) {
- gen75_cmd_buffer_set_subpass(cmd_buffer, subpass);
- } else {
- gen7_cmd_buffer_set_subpass(cmd_buffer, subpass);
+ if (BRW_PARAM_IS_BUILTIN(param)) {
+ switch (param) {
+ case BRW_PARAM_BUILTIN_ZERO:
+ return 0;
+ case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_X:
+ return data->base_work_group_id[0];
+ case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Y:
+ return data->base_work_group_id[1];
+ case BRW_PARAM_BUILTIN_BASE_WORK_GROUP_ID_Z:
+ return data->base_work_group_id[2];
+ default:
+ unreachable("Invalid param builtin");
}
- break;
- case 8:
- gen8_cmd_buffer_set_subpass(cmd_buffer, subpass);
- break;
- case 9:
- gen9_cmd_buffer_set_subpass(cmd_buffer, subpass);
- break;
- default:
- unreachable("unsupported gen\n");
+ } else {
+ uint32_t offset = ANV_PARAM_PUSH_OFFSET(param);
+ assert(offset % sizeof(uint32_t) == 0);
+ if (offset < data->size)
+ return *(uint32_t *)((uint8_t *)data + offset);
+ else
+ return 0;
}
}
anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage)
{
+ struct anv_pipeline *pipeline = cmd_buffer->state.gfx.base.pipeline;
+
+ /* If we don't have this stage, bail. */
+ if (!anv_pipeline_has_stage(pipeline, stage))
+ return (struct anv_state) { .offset = 0 };
+
struct anv_push_constants *data =
cmd_buffer->state.push_constants[stage];
const struct brw_stage_prog_data *prog_data =
- cmd_buffer->state.pipeline->prog_data[stage];
+ pipeline->shaders[stage]->prog_data;
/* If we don't actually have any push constants, bail. */
- if (data == NULL || prog_data->nr_params == 0)
+ if (data == NULL || prog_data == NULL || prog_data->nr_params == 0)
return (struct anv_state) { .offset = 0 };
struct anv_state state =
/* Walk through the param array and fill the buffer with data */
uint32_t *u32_map = state.map;
- for (unsigned i = 0; i < prog_data->nr_params; i++) {
- uint32_t offset = (uintptr_t)prog_data->param[i];
- u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
- }
-
- if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(state);
+ for (unsigned i = 0; i < prog_data->nr_params; i++)
+ u32_map[i] = anv_push_constant_value(data, prog_data->param[i]);
return state;
}
{
struct anv_push_constants *data =
cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
- struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
+ struct anv_pipeline *pipeline = cmd_buffer->state.compute.base.pipeline;
const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
- const unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
- const unsigned push_constant_data_size =
- (local_id_dwords + prog_data->nr_params) * 4;
- const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
- const unsigned param_aligned_count =
- reg_aligned_constant_size / sizeof(uint32_t);
-
/* If we don't actually have any push constants, bail. */
- if (reg_aligned_constant_size == 0)
+ if (cs_prog_data->push.total.size == 0)
return (struct anv_state) { .offset = 0 };
- const unsigned threads = pipeline->cs_thread_width_max;
- const unsigned total_push_constants_size =
- reg_aligned_constant_size * threads;
const unsigned push_constant_alignment =
cmd_buffer->device->info.gen < 8 ? 32 : 64;
const unsigned aligned_total_push_constants_size =
- ALIGN(total_push_constants_size, push_constant_alignment);
+ ALIGN(cs_prog_data->push.total.size, push_constant_alignment);
struct anv_state state =
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
aligned_total_push_constants_size,
/* Walk through the param array and fill the buffer with data */
uint32_t *u32_map = state.map;
- brw_cs_fill_local_id_payload(cs_prog_data, u32_map, threads,
- reg_aligned_constant_size);
-
- /* Setup uniform data for the first thread */
- for (unsigned i = 0; i < prog_data->nr_params; i++) {
- uint32_t offset = (uintptr_t)prog_data->param[i];
- u32_map[local_id_dwords + i] = *(uint32_t *)((uint8_t *)data + offset);
+ if (cs_prog_data->push.cross_thread.size > 0) {
+ for (unsigned i = 0;
+ i < cs_prog_data->push.cross_thread.dwords;
+ i++) {
+ assert(prog_data->param[i] != BRW_PARAM_BUILTIN_SUBGROUP_ID);
+ u32_map[i] = anv_push_constant_value(data, prog_data->param[i]);
+ }
}
- /* Copy uniform data from the first thread to every other thread */
- const size_t uniform_data_size = prog_data->nr_params * sizeof(uint32_t);
- for (unsigned t = 1; t < threads; t++) {
- memcpy(&u32_map[t * param_aligned_count + local_id_dwords],
- &u32_map[local_id_dwords],
- uniform_data_size);
+ if (cs_prog_data->push.per_thread.size > 0) {
+ for (unsigned t = 0; t < cs_prog_data->threads; t++) {
+ unsigned dst =
+ 8 * (cs_prog_data->push.per_thread.regs * t +
+ cs_prog_data->push.cross_thread.regs);
+ unsigned src = cs_prog_data->push.cross_thread.dwords;
+ for ( ; src < prog_data->nr_params; src++, dst++) {
+ if (prog_data->param[src] == BRW_PARAM_BUILTIN_SUBGROUP_ID) {
+ u32_map[dst] = t;
+ } else {
+ u32_map[dst] =
+ anv_push_constant_value(data, prog_data->param[src]);
+ }
+ }
+ }
}
- if (!cmd_buffer->device->info.has_llc)
- anv_state_clflush(state);
-
return state;
}
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
anv_foreach_stage(stage, stageFlags) {
- anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data);
+ VkResult result =
+ anv_cmd_buffer_ensure_push_constant_field(cmd_buffer,
+ stage, client_data);
+ if (result != VK_SUCCESS)
+ return;
memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
pValues, size);
cmd_buffer->state.push_constants_dirty |= stageFlags;
}
-void anv_CmdExecuteCommands(
- VkCommandBuffer commandBuffer,
- uint32_t commandBufferCount,
- const VkCommandBuffer* pCmdBuffers)
-{
- ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
-
- assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
-
- for (uint32_t i = 0; i < commandBufferCount; i++) {
- ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
-
- assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
-
- anv_cmd_buffer_add_secondary(primary, secondary);
- }
-}
-
VkResult anv_CreateCommandPool(
VkDevice _device,
const VkCommandPoolCreateInfo* pCreateInfo,
ANV_FROM_HANDLE(anv_device, device, _device);
struct anv_cmd_pool *pool;
- pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
+ pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (pool == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
- anv_ResetCommandPool(_device, commandPool, 0);
+ if (!pool)
+ return;
- anv_free2(&device->alloc, pAllocator, pool);
+ list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
+ &pool->cmd_buffers, pool_link) {
+ anv_cmd_buffer_destroy(cmd_buffer);
+ }
+
+ vk_free2(&device->alloc, pAllocator, pool);
}
VkResult anv_ResetCommandPool(
{
ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
- /* FIXME: vkResetCommandPool must not destroy its command buffers. The
- * Vulkan 1.0 spec requires that it only reset them:
- *
- * Resetting a command pool recycles all of the resources from all of
- * the command buffers allocated from the command pool back to the
- * command pool. All command buffers that have been allocated from the
- * command pool are put in the initial state.
- */
- list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
- &pool->cmd_buffers, pool_link) {
- anv_cmd_buffer_destroy(cmd_buffer);
+ list_for_each_entry(struct anv_cmd_buffer, cmd_buffer,
+ &pool->cmd_buffers, pool_link) {
+ anv_cmd_buffer_reset(cmd_buffer);
}
return VK_SUCCESS;
}
+void anv_TrimCommandPool(
+ VkDevice device,
+ VkCommandPool commandPool,
+ VkCommandPoolTrimFlags flags)
+{
+ /* Nothing for us to do here. Our pools stay pretty tidy. */
+}
+
/**
* Return NULL if the current subpass has no depthstencil attachment.
*/
const struct anv_subpass *subpass = cmd_buffer->state.subpass;
const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
- if (subpass->depth_stencil_attachment == VK_ATTACHMENT_UNUSED)
+ if (subpass->depth_stencil_attachment == NULL)
return NULL;
const struct anv_image_view *iview =
- fb->attachments[subpass->depth_stencil_attachment];
+ fb->attachments[subpass->depth_stencil_attachment->attachment];
assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
VK_IMAGE_ASPECT_STENCIL_BIT));
return iview;
}
+
+static struct anv_descriptor_set *
+anv_cmd_buffer_push_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point,
+ struct anv_descriptor_set_layout *layout,
+ uint32_t _set)
+{
+ struct anv_cmd_pipeline_state *pipe_state;
+ if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
+ pipe_state = &cmd_buffer->state.compute.base;
+ } else {
+ assert(bind_point == VK_PIPELINE_BIND_POINT_GRAPHICS);
+ pipe_state = &cmd_buffer->state.gfx.base;
+ }
+
+ struct anv_push_descriptor_set **push_set =
+ &pipe_state->push_descriptors[_set];
+
+ if (*push_set == NULL) {
+ *push_set = vk_alloc(&cmd_buffer->pool->alloc,
+ sizeof(struct anv_push_descriptor_set), 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (*push_set == NULL) {
+ anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
+ return NULL;
+ }
+ }
+
+ struct anv_descriptor_set *set = &(*push_set)->set;
+
+ set->layout = layout;
+ set->size = anv_descriptor_set_layout_size(layout);
+ set->buffer_view_count = layout->buffer_view_count;
+ set->buffer_views = (*push_set)->buffer_views;
+
+ return set;
+}
+
+void anv_CmdPushDescriptorSetKHR(
+ VkCommandBuffer commandBuffer,
+ VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout _layout,
+ uint32_t _set,
+ uint32_t descriptorWriteCount,
+ const VkWriteDescriptorSet* pDescriptorWrites)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
+
+ assert(_set < MAX_SETS);
+
+ struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
+
+ struct anv_descriptor_set *set =
+ anv_cmd_buffer_push_descriptor_set(cmd_buffer, pipelineBindPoint,
+ set_layout, _set);
+ if (!set)
+ return;
+
+ /* Go through the user supplied descriptors. */
+ for (uint32_t i = 0; i < descriptorWriteCount; i++) {
+ const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
+
+ switch (write->descriptorType) {
+ case VK_DESCRIPTOR_TYPE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
+ case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
+ case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
+ case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
+ for (uint32_t j = 0; j < write->descriptorCount; j++) {
+ anv_descriptor_set_write_image_view(cmd_buffer->device, set,
+ write->pImageInfo + j,
+ write->descriptorType,
+ write->dstBinding,
+ write->dstArrayElement + j);
+ }
+ break;
+
+ case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
+ for (uint32_t j = 0; j < write->descriptorCount; j++) {
+ ANV_FROM_HANDLE(anv_buffer_view, bview,
+ write->pTexelBufferView[j]);
+
+ anv_descriptor_set_write_buffer_view(cmd_buffer->device, set,
+ write->descriptorType,
+ bview,
+ write->dstBinding,
+ write->dstArrayElement + j);
+ }
+ break;
+
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ for (uint32_t j = 0; j < write->descriptorCount; j++) {
+ assert(write->pBufferInfo[j].buffer);
+ ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
+ assert(buffer);
+
+ anv_descriptor_set_write_buffer(cmd_buffer->device, set,
+ &cmd_buffer->surface_state_stream,
+ write->descriptorType,
+ buffer,
+ write->dstBinding,
+ write->dstArrayElement + j,
+ write->pBufferInfo[j].offset,
+ write->pBufferInfo[j].range);
+ }
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ anv_cmd_buffer_bind_descriptor_set(cmd_buffer, pipelineBindPoint,
+ layout, _set, set, NULL, NULL);
+}
+
+void anv_CmdPushDescriptorSetWithTemplateKHR(
+ VkCommandBuffer commandBuffer,
+ VkDescriptorUpdateTemplate descriptorUpdateTemplate,
+ VkPipelineLayout _layout,
+ uint32_t _set,
+ const void* pData)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ ANV_FROM_HANDLE(anv_descriptor_update_template, template,
+ descriptorUpdateTemplate);
+ ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
+
+ assert(_set < MAX_PUSH_DESCRIPTORS);
+
+ struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
+
+ struct anv_descriptor_set *set =
+ anv_cmd_buffer_push_descriptor_set(cmd_buffer, template->bind_point,
+ set_layout, _set);
+ if (!set)
+ return;
+
+ anv_descriptor_set_write_template(cmd_buffer->device, set,
+ &cmd_buffer->surface_state_stream,
+ template,
+ pData);
+
+ anv_cmd_buffer_bind_descriptor_set(cmd_buffer, template->bind_point,
+ layout, _set, set, NULL, NULL);
+}
+
+void anv_CmdSetDeviceMask(
+ VkCommandBuffer commandBuffer,
+ uint32_t deviceMask)
+{
+ /* No-op */
+}