#include <unistd.h>
#include <fcntl.h>
+#include "util/mesa-sha1.h"
+
#include "anv_private.h"
/*
(max_binding + 1) * sizeof(set_layout->binding[0]) +
immutable_sampler_count * sizeof(struct anv_sampler *);
- set_layout = anv_alloc2(&device->alloc, pAllocator, size, 8,
+ set_layout = vk_alloc2(&device->alloc, pAllocator, size, 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (!set_layout)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
struct anv_sampler **samplers =
(struct anv_sampler **)&set_layout->binding[max_binding + 1];
+ memset(set_layout, 0, sizeof(*set_layout));
set_layout->binding_count = max_binding + 1;
- set_layout->shader_stages = 0;
- set_layout->size = 0;
for (uint32_t b = 0; b <= max_binding; b++) {
/* Initialize all binding_layout entries to -1 */
memset(&set_layout->binding[b], -1, sizeof(set_layout->binding[b]));
+ set_layout->binding[b].array_size = 0;
set_layout->binding[b].immutable_samplers = NULL;
}
for (uint32_t j = 0; j < pCreateInfo->bindingCount; j++) {
const VkDescriptorSetLayoutBinding *binding = &pCreateInfo->pBindings[j];
uint32_t b = binding->binding;
+ /* We temporarily store the pointer to the binding in the
+ * immutable_samplers pointer. This provides us with a quick-and-dirty
+ * way to sort the bindings by binding number.
+ */
+ set_layout->binding[b].immutable_samplers = (void *)binding;
+ }
+
+ for (uint32_t b = 0; b <= max_binding; b++) {
+ const VkDescriptorSetLayoutBinding *binding =
+ (void *)set_layout->binding[b].immutable_samplers;
+
+ if (binding == NULL)
+ continue;
assert(binding->descriptorCount > 0);
+#ifndef NDEBUG
+ set_layout->binding[b].type = binding->descriptorType;
+#endif
set_layout->binding[b].array_size = binding->descriptorCount;
set_layout->binding[b].descriptor_index = set_layout->size;
set_layout->size += binding->descriptorCount;
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_descriptor_set_layout, set_layout, _set_layout);
- anv_free2(&device->alloc, pAllocator, set_layout);
+ vk_free2(&device->alloc, pAllocator, set_layout);
+}
+
+static void
+sha1_update_descriptor_set_layout(struct mesa_sha1 *ctx,
+ const struct anv_descriptor_set_layout *layout)
+{
+ size_t size = sizeof(*layout) +
+ sizeof(layout->binding[0]) * layout->binding_count;
+ _mesa_sha1_update(ctx, layout, size);
}
/*
assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO);
- layout = anv_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
+ layout = vk_alloc2(&device->alloc, pAllocator, sizeof(*layout), 8,
VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
if (layout == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
layout->set[set].dynamic_offset_start = dynamic_offset_count;
for (uint32_t b = 0; b < set_layout->binding_count; b++) {
- if (set_layout->binding[b].dynamic_offset_index >= 0)
- dynamic_offset_count += set_layout->binding[b].array_size;
+ if (set_layout->binding[b].dynamic_offset_index < 0)
+ continue;
+
+ dynamic_offset_count += set_layout->binding[b].array_size;
for (gl_shader_stage s = 0; s < MESA_SHADER_STAGES; s++) {
if (set_layout->binding[b].stage[s].surface_index >= 0)
layout->stage[s].has_dynamic_offsets = true;
}
}
+ struct mesa_sha1 *ctx = _mesa_sha1_init();
+ for (unsigned s = 0; s < layout->num_sets; s++) {
+ sha1_update_descriptor_set_layout(ctx, layout->set[s].layout);
+ _mesa_sha1_update(ctx, &layout->set[s].dynamic_offset_start,
+ sizeof(layout->set[s].dynamic_offset_start));
+ }
+ _mesa_sha1_update(ctx, &layout->num_sets, sizeof(layout->num_sets));
+ for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
+ _mesa_sha1_update(ctx, &layout->stage[s].has_dynamic_offsets,
+ sizeof(layout->stage[s].has_dynamic_offsets));
+ }
+ _mesa_sha1_final(ctx, layout->sha1);
+
*pPipelineLayout = anv_pipeline_layout_to_handle(layout);
return VK_SUCCESS;
ANV_FROM_HANDLE(anv_device, device, _device);
ANV_FROM_HANDLE(anv_pipeline_layout, pipeline_layout, _pipelineLayout);
- anv_free2(&device->alloc, pAllocator, pipeline_layout);
+ vk_free2(&device->alloc, pAllocator, pipeline_layout);
}
/*
- * Descriptor pools. These are a no-op for now.
+ * Descriptor pools.
+ *
+ * These are implemented using a big pool of memory and a free-list for the
+ * host memory allocations and a state_stream and a free list for the buffer
+ * view surface state. The spec allows us to fail to allocate due to
+ * fragmentation in all cases but two: 1) after pool reset, allocating up
+ * until the pool size with no freeing must succeed and 2) allocating and
+ * freeing only descriptor sets with the same layout. Case 1) is easy enogh,
+ * and the free lists lets us recycle blocks for case 2).
*/
+#define EMPTY 1
+
VkResult anv_CreateDescriptorPool(
- VkDevice device,
+ VkDevice _device,
const VkDescriptorPoolCreateInfo* pCreateInfo,
const VkAllocationCallbacks* pAllocator,
VkDescriptorPool* pDescriptorPool)
{
- anv_finishme("VkDescriptorPool is a stub");
- *pDescriptorPool = (VkDescriptorPool)1;
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ struct anv_descriptor_pool *pool;
+
+ uint32_t descriptor_count = 0;
+ uint32_t buffer_count = 0;
+ for (uint32_t i = 0; i < pCreateInfo->poolSizeCount; i++) {
+ switch (pCreateInfo->pPoolSizes[i].type) {
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
+ case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
+ case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
+ buffer_count += pCreateInfo->pPoolSizes[i].descriptorCount;
+ default:
+ descriptor_count += pCreateInfo->pPoolSizes[i].descriptorCount;
+ break;
+ }
+ }
+
+ const size_t size =
+ sizeof(*pool) +
+ pCreateInfo->maxSets * sizeof(struct anv_descriptor_set) +
+ descriptor_count * sizeof(struct anv_descriptor) +
+ buffer_count * sizeof(struct anv_buffer_view);
+
+ pool = vk_alloc2(&device->alloc, pAllocator, size, 8,
+ VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
+ if (!pool)
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+
+ pool->size = size;
+ pool->next = 0;
+ pool->free_list = EMPTY;
+
+ anv_state_stream_init(&pool->surface_state_stream,
+ &device->surface_state_block_pool);
+ pool->surface_state_free_list = NULL;
+
+ *pDescriptorPool = anv_descriptor_pool_to_handle(pool);
+
return VK_SUCCESS;
}
VkDescriptorPool _pool,
const VkAllocationCallbacks* pAllocator)
{
- anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_descriptor_pool, pool, _pool);
+
+ anv_state_stream_finish(&pool->surface_state_stream);
+ vk_free2(&device->alloc, pAllocator, pool);
}
VkResult anv_ResetDescriptorPool(
- VkDevice device,
+ VkDevice _device,
VkDescriptorPool descriptorPool,
VkDescriptorPoolResetFlags flags)
{
- anv_finishme("VkDescriptorPool is a stub: free the pool's descriptor sets");
+ ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
+
+ pool->next = 0;
+ pool->free_list = EMPTY;
+ anv_state_stream_finish(&pool->surface_state_stream);
+ anv_state_stream_init(&pool->surface_state_stream,
+ &device->surface_state_block_pool);
+ pool->surface_state_free_list = NULL;
+
return VK_SUCCESS;
}
+struct pool_free_list_entry {
+ uint32_t next;
+ uint32_t size;
+};
+
+static size_t
+layout_size(const struct anv_descriptor_set_layout *layout)
+{
+ return
+ sizeof(struct anv_descriptor_set) +
+ layout->size * sizeof(struct anv_descriptor) +
+ layout->buffer_count * sizeof(struct anv_buffer_view);
+}
+
+struct surface_state_free_list_entry {
+ void *next;
+ uint32_t offset;
+};
+
VkResult
anv_descriptor_set_create(struct anv_device *device,
+ struct anv_descriptor_pool *pool,
const struct anv_descriptor_set_layout *layout,
struct anv_descriptor_set **out_set)
{
struct anv_descriptor_set *set;
- size_t size = sizeof(*set) + layout->size * sizeof(set->descriptors[0]);
+ const size_t size = layout_size(layout);
+
+ set = NULL;
+ if (size <= pool->size - pool->next) {
+ set = (struct anv_descriptor_set *) (pool->data + pool->next);
+ pool->next += size;
+ } else {
+ struct pool_free_list_entry *entry;
+ uint32_t *link = &pool->free_list;
+ for (uint32_t f = pool->free_list; f != EMPTY; f = entry->next) {
+ entry = (struct pool_free_list_entry *) (pool->data + f);
+ if (size <= entry->size) {
+ *link = entry->next;
+ set = (struct anv_descriptor_set *) entry;
+ break;
+ }
+ link = &entry->next;
+ }
+ }
- set = anv_alloc(&device->alloc /* XXX: Use the pool */, size, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (!set)
+ if (set == NULL)
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- /* A descriptor set may not be 100% filled. Clear the set so we can can
- * later detect holes in it.
- */
- memset(set, 0, size);
-
+ set->size = size;
set->layout = layout;
+ set->buffer_views =
+ (struct anv_buffer_view *) &set->descriptors[layout->size];
+ set->buffer_count = layout->buffer_count;
+
+ /* By defining the descriptors to be zero now, we can later verify that
+ * a descriptor has not been populated with user data.
+ */
+ memset(set->descriptors, 0, sizeof(struct anv_descriptor) * layout->size);
/* Go through and fill out immutable samplers if we have any */
struct anv_descriptor *desc = set->descriptors;
for (uint32_t b = 0; b < layout->binding_count; b++) {
if (layout->binding[b].immutable_samplers) {
- for (uint32_t i = 0; i < layout->binding[b].array_size; i++)
- desc[i].sampler = layout->binding[b].immutable_samplers[i];
+ for (uint32_t i = 0; i < layout->binding[b].array_size; i++) {
+ /* The type will get changed to COMBINED_IMAGE_SAMPLER in
+ * UpdateDescriptorSets if needed. However, if the descriptor
+ * set has an immutable sampler, UpdateDescriptorSets may never
+ * touch it, so we need to make sure it's 100% valid now.
+ */
+ desc[i] = (struct anv_descriptor) {
+ .type = VK_DESCRIPTOR_TYPE_SAMPLER,
+ .sampler = layout->binding[b].immutable_samplers[i],
+ };
+ }
}
desc += layout->binding[b].array_size;
}
- /* XXX: Use the pool */
- set->buffer_views =
- anv_alloc(&device->alloc,
- sizeof(set->buffer_views[0]) * layout->buffer_count, 8,
- VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
- if (!set->buffer_views) {
- anv_free(&device->alloc, set);
- return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
- }
-
+ /* Allocate surface state for the buffer views. */
for (uint32_t b = 0; b < layout->buffer_count; b++) {
- set->buffer_views[b].surface_state =
- anv_state_pool_alloc(&device->surface_state_pool, 64, 64);
+ struct surface_state_free_list_entry *entry =
+ pool->surface_state_free_list;
+ struct anv_state state;
+
+ if (entry) {
+ state.map = entry;
+ state.offset = entry->offset;
+ state.alloc_size = 64;
+ pool->surface_state_free_list = entry->next;
+ } else {
+ state = anv_state_stream_alloc(&pool->surface_state_stream, 64, 64);
+ }
+
+ set->buffer_views[b].surface_state = state;
}
- set->buffer_count = layout->buffer_count;
+
*out_set = set;
return VK_SUCCESS;
void
anv_descriptor_set_destroy(struct anv_device *device,
+ struct anv_descriptor_pool *pool,
struct anv_descriptor_set *set)
{
- /* XXX: Use the pool */
- for (uint32_t b = 0; b < set->buffer_count; b++)
- anv_state_pool_free(&device->surface_state_pool,
- set->buffer_views[b].surface_state);
+ /* Put the buffer view surface state back on the free list. */
+ for (uint32_t b = 0; b < set->buffer_count; b++) {
+ struct surface_state_free_list_entry *entry =
+ set->buffer_views[b].surface_state.map;
+ entry->next = pool->surface_state_free_list;
+ entry->offset = set->buffer_views[b].surface_state.offset;
+ pool->surface_state_free_list = entry;
+ }
- anv_free(&device->alloc, set->buffer_views);
- anv_free(&device->alloc, set);
+ /* Put the descriptor set allocation back on the free list. */
+ const uint32_t index = (char *) set - pool->data;
+ if (index + set->size == pool->next) {
+ pool->next = index;
+ } else {
+ struct pool_free_list_entry *entry = (struct pool_free_list_entry *) set;
+ entry->next = pool->free_list;
+ entry->size = set->size;
+ pool->free_list = (char *) entry - pool->data;
+ }
}
VkResult anv_AllocateDescriptorSets(
VkDescriptorSet* pDescriptorSets)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_descriptor_pool, pool, pAllocateInfo->descriptorPool);
VkResult result = VK_SUCCESS;
struct anv_descriptor_set *set;
ANV_FROM_HANDLE(anv_descriptor_set_layout, layout,
pAllocateInfo->pSetLayouts[i]);
- result = anv_descriptor_set_create(device, layout, &set);
+ result = anv_descriptor_set_create(device, pool, layout, &set);
if (result != VK_SUCCESS)
break;
const VkDescriptorSet* pDescriptorSets)
{
ANV_FROM_HANDLE(anv_device, device, _device);
+ ANV_FROM_HANDLE(anv_descriptor_pool, pool, descriptorPool);
for (uint32_t i = 0; i < count; i++) {
ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
- anv_descriptor_set_destroy(device, set);
+ anv_descriptor_set_destroy(device, pool, set);
}
return VK_SUCCESS;
&set->descriptors[bind_layout->descriptor_index];
desc += write->dstArrayElement;
+ assert(write->descriptorType == bind_layout->type);
+
switch (write->descriptorType) {
case VK_DESCRIPTOR_TYPE_SAMPLER:
for (uint32_t j = 0; j < write->descriptorCount; j++) {
&set->buffer_views[bind_layout->buffer_index];
view += write->dstArrayElement + j;
- const struct anv_format *format =
- anv_format_for_descriptor_type(write->descriptorType);
-
- view->format = format->isl_format;
+ view->format =
+ anv_isl_format_for_descriptor_type(write->descriptorType);
view->bo = buffer->bo;
view->offset = buffer->offset + write->pBufferInfo[j].offset;