anv_cmd_state_init(cmd_buffer);
}
+/**
+ * This function updates the size of the push constant buffer we need to emit.
+ * This is called in various parts of the driver to ensure that different
+ * pieces of push constant data get emitted as needed. However, it is important
+ * that we never shrink the size of the buffer. For example, a compute shader
+ * dispatch will always call this for the base group id, which has an
+ * offset in the push constant buffer that is smaller than the offset for
+ * storage image data. If the compute shader has storage images, we will call
+ * this again with a larger size during binding table emission. However,
+ * if we dispatch the compute shader again without dirtying our descriptors,
+ * we would still call this function with a smaller size for the base group
+ * id, and not for the images, which would incorrectly shrink the size of the
+ * push constant data we emit with that dispatch, making us drop the image data.
+ */
VkResult
anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
gl_shader_stage stage, uint32_t size)
anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
+ (*ptr)->size = size;
} else if ((*ptr)->size < size) {
*ptr = vk_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
anv_batch_set_error(&cmd_buffer->batch, VK_ERROR_OUT_OF_HOST_MEMORY);
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
}
+ (*ptr)->size = size;
}
- (*ptr)->size = size;
return VK_SUCCESS;
}
case 10: \
gen10_##func(__VA_ARGS__); \
break; \
+ case 11: \
+ gen11_##func(__VA_ARGS__); \
+ break; \
default: \
assert(!"Unknown hardware generation"); \
}
level, base_layer, layer_count);
}
+void
+anv_cmd_emit_conditional_render_predicate(struct anv_cmd_buffer *cmd_buffer)
+{
+ anv_genX_call(&cmd_buffer->device->info,
+ cmd_emit_conditional_render_predicate,
+ cmd_buffer);
+}
+
void anv_CmdBindPipeline(
VkCommandBuffer commandBuffer,
VkPipelineBindPoint pipelineBindPoint,
ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
- assert(firstSet + descriptorSetCount < MAX_SETS);
+ assert(firstSet + descriptorSetCount <= MAX_SETS);
for (uint32_t i = 0; i < descriptorSetCount; i++) {
ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
}
}
+void anv_CmdBindTransformFeedbackBuffersEXT(
+ VkCommandBuffer commandBuffer,
+ uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets,
+ const VkDeviceSize* pSizes)
+{
+ ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct anv_xfb_binding *xfb = cmd_buffer->state.xfb_bindings;
+
+ /* We have to defer setting up vertex buffer since we need the buffer
+ * stride from the pipeline. */
+
+ assert(firstBinding + bindingCount <= MAX_XFB_BUFFERS);
+ for (uint32_t i = 0; i < bindingCount; i++) {
+ if (pBuffers[i] == VK_NULL_HANDLE) {
+ xfb[firstBinding + i].buffer = NULL;
+ } else {
+ ANV_FROM_HANDLE(anv_buffer, buffer, pBuffers[i]);
+ xfb[firstBinding + i].buffer = buffer;
+ xfb[firstBinding + i].offset = pOffsets[i];
+ xfb[firstBinding + i].size =
+ anv_buffer_get_range(buffer, pOffsets[i],
+ pSizes ? pSizes[i] : VK_WHOLE_SIZE);
+ }
+ }
+}
+
enum isl_format
anv_isl_format_for_descriptor_type(VkDescriptorType type)
{
state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
memcpy(state.map, data, size);
- anv_state_flush(cmd_buffer->device, state);
-
VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
return state;
for (uint32_t i = 0; i < dwords; i++)
p[i] = a[i] | b[i];
- anv_state_flush(cmd_buffer->device, state);
-
VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
return state;
for (unsigned i = 0; i < prog_data->nr_params; i++)
u32_map[i] = anv_push_constant_value(data, prog_data->param[i]);
- anv_state_flush(cmd_buffer->device, state);
-
return state;
}
}
}
- anv_state_flush(cmd_buffer->device, state);
-
return state;
}
const struct anv_subpass *subpass = cmd_buffer->state.subpass;
const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
- if (subpass->depth_stencil_attachment.attachment == VK_ATTACHMENT_UNUSED)
+ if (subpass->depth_stencil_attachment == NULL)
return NULL;
const struct anv_image_view *iview =
- fb->attachments[subpass->depth_stencil_attachment.attachment];
+ fb->attachments[subpass->depth_stencil_attachment->attachment];
assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
VK_IMAGE_ASPECT_STENCIL_BIT));
return iview;
}
-static struct anv_push_descriptor_set *
-anv_cmd_buffer_get_push_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
- VkPipelineBindPoint bind_point,
- uint32_t set)
+static struct anv_descriptor_set *
+anv_cmd_buffer_push_descriptor_set(struct anv_cmd_buffer *cmd_buffer,
+ VkPipelineBindPoint bind_point,
+ struct anv_descriptor_set_layout *layout,
+ uint32_t _set)
{
struct anv_cmd_pipeline_state *pipe_state;
if (bind_point == VK_PIPELINE_BIND_POINT_COMPUTE) {
}
struct anv_push_descriptor_set **push_set =
- &pipe_state->push_descriptors[set];
+ &pipe_state->push_descriptors[_set];
if (*push_set == NULL) {
*push_set = vk_alloc(&cmd_buffer->pool->alloc,
}
}
- return *push_set;
+ struct anv_descriptor_set *set = &(*push_set)->set;
+
+ set->layout = layout;
+ set->size = anv_descriptor_set_layout_size(layout);
+ set->buffer_view_count = layout->buffer_view_count;
+ set->buffer_views = (*push_set)->buffer_views;
+
+ return set;
}
void anv_CmdPushDescriptorSetKHR(
struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
- struct anv_push_descriptor_set *push_set =
- anv_cmd_buffer_get_push_descriptor_set(cmd_buffer,
- pipelineBindPoint, _set);
- if (!push_set)
+ struct anv_descriptor_set *set =
+ anv_cmd_buffer_push_descriptor_set(cmd_buffer, pipelineBindPoint,
+ set_layout, _set);
+ if (!set)
return;
- struct anv_descriptor_set *set = &push_set->set;
-
- set->layout = set_layout;
- set->size = anv_descriptor_set_layout_size(set_layout);
- set->buffer_count = set_layout->buffer_count;
- set->buffer_views = push_set->buffer_views;
-
/* Go through the user supplied descriptors. */
for (uint32_t i = 0; i < descriptorWriteCount; i++) {
const VkWriteDescriptorSet *write = &pDescriptorWrites[i];
case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
- anv_descriptor_set_write_image_view(set, &cmd_buffer->device->info,
+ anv_descriptor_set_write_image_view(cmd_buffer->device, set,
write->pImageInfo + j,
write->descriptorType,
write->dstBinding,
ANV_FROM_HANDLE(anv_buffer_view, bview,
write->pTexelBufferView[j]);
- anv_descriptor_set_write_buffer_view(set,
+ anv_descriptor_set_write_buffer_view(cmd_buffer->device, set,
write->descriptorType,
bview,
write->dstBinding,
ANV_FROM_HANDLE(anv_buffer, buffer, write->pBufferInfo[j].buffer);
assert(buffer);
- anv_descriptor_set_write_buffer(set,
- cmd_buffer->device,
+ anv_descriptor_set_write_buffer(cmd_buffer->device, set,
&cmd_buffer->surface_state_stream,
write->descriptorType,
buffer,
struct anv_descriptor_set_layout *set_layout = layout->set[_set].layout;
- struct anv_push_descriptor_set *push_set =
- anv_cmd_buffer_get_push_descriptor_set(cmd_buffer,
- template->bind_point, _set);
- if (!push_set)
+ struct anv_descriptor_set *set =
+ anv_cmd_buffer_push_descriptor_set(cmd_buffer, template->bind_point,
+ set_layout, _set);
+ if (!set)
return;
- struct anv_descriptor_set *set = &push_set->set;
-
- set->layout = set_layout;
- set->size = anv_descriptor_set_layout_size(set_layout);
- set->buffer_count = set_layout->buffer_count;
- set->buffer_views = push_set->buffer_views;
-
- anv_descriptor_set_write_template(set,
- cmd_buffer->device,
+ anv_descriptor_set_write_template(cmd_buffer->device, set,
&cmd_buffer->surface_state_stream,
template,
pData);
anv_cmd_buffer_bind_descriptor_set(cmd_buffer, template->bind_point,
layout, _set, set, NULL, NULL);
}
+
+void anv_CmdSetDeviceMask(
+ VkCommandBuffer commandBuffer,
+ uint32_t deviceMask)
+{
+ /* No-op */
+}