OSDN Git Service

anv: Change render_pass_attachment.format to a VkFormat
[android-x86/external-mesa.git] / src / intel / vulkan / anv_cmd_buffer.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "vk_format_info.h"
33
34 /** \file anv_cmd_buffer.c
35  *
36  * This file contains all of the stuff for emitting commands into a command
37  * buffer.  This includes implementations of most of the vkCmd*
38  * entrypoints.  This file is concerned entirely with state emission and
39  * not with the command buffer data structure itself.  As far as this file
40  * is concerned, most of anv_cmd_buffer is magic.
41  */
42
43 /* TODO: These are taken from GLES.  We should check the Vulkan spec */
44 const struct anv_dynamic_state default_dynamic_state = {
45    .viewport = {
46       .count = 0,
47    },
48    .scissor = {
49       .count = 0,
50    },
51    .line_width = 1.0f,
52    .depth_bias = {
53       .bias = 0.0f,
54       .clamp = 0.0f,
55       .slope = 0.0f,
56    },
57    .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
58    .depth_bounds = {
59       .min = 0.0f,
60       .max = 1.0f,
61    },
62    .stencil_compare_mask = {
63       .front = ~0u,
64       .back = ~0u,
65    },
66    .stencil_write_mask = {
67       .front = ~0u,
68       .back = ~0u,
69    },
70    .stencil_reference = {
71       .front = 0u,
72       .back = 0u,
73    },
74 };
75
76 void
77 anv_dynamic_state_copy(struct anv_dynamic_state *dest,
78                        const struct anv_dynamic_state *src,
79                        uint32_t copy_mask)
80 {
81    if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
82       dest->viewport.count = src->viewport.count;
83       typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
84                    src->viewport.count);
85    }
86
87    if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
88       dest->scissor.count = src->scissor.count;
89       typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
90                    src->scissor.count);
91    }
92
93    if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
94       dest->line_width = src->line_width;
95
96    if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
97       dest->depth_bias = src->depth_bias;
98
99    if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
100       typed_memcpy(dest->blend_constants, src->blend_constants, 4);
101
102    if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
103       dest->depth_bounds = src->depth_bounds;
104
105    if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
106       dest->stencil_compare_mask = src->stencil_compare_mask;
107
108    if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
109       dest->stencil_write_mask = src->stencil_write_mask;
110
111    if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
112       dest->stencil_reference = src->stencil_reference;
113 }
114
115 static void
116 anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
117 {
118    struct anv_cmd_state *state = &cmd_buffer->state;
119
120    memset(&state->descriptors, 0, sizeof(state->descriptors));
121    memset(&state->push_constants, 0, sizeof(state->push_constants));
122    memset(state->binding_tables, 0, sizeof(state->binding_tables));
123    memset(state->samplers, 0, sizeof(state->samplers));
124
125    /* 0 isn't a valid config.  This ensures that we always configure L3$. */
126    cmd_buffer->state.current_l3_config = 0;
127
128    state->dirty = 0;
129    state->vb_dirty = 0;
130    state->descriptors_dirty = 0;
131    state->push_constants_dirty = 0;
132    state->pipeline = NULL;
133    state->restart_index = UINT32_MAX;
134    state->dynamic = default_dynamic_state;
135    state->need_query_wa = true;
136
137    if (state->attachments != NULL) {
138       anv_free(&cmd_buffer->pool->alloc, state->attachments);
139       state->attachments = NULL;
140    }
141
142    state->gen7.index_buffer = NULL;
143 }
144
145 /**
146  * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
147  */
148 void
149 anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
150                                 const VkRenderPassBeginInfo *info)
151 {
152    struct anv_cmd_state *state = &cmd_buffer->state;
153    ANV_FROM_HANDLE(anv_render_pass, pass, info->renderPass);
154
155    anv_free(&cmd_buffer->pool->alloc, state->attachments);
156
157    if (pass->attachment_count == 0) {
158       state->attachments = NULL;
159       return;
160    }
161
162    state->attachments = anv_alloc(&cmd_buffer->pool->alloc,
163                                   pass->attachment_count *
164                                        sizeof(state->attachments[0]),
165                                   8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
166    if (state->attachments == NULL) {
167       /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
168       abort();
169    }
170
171    for (uint32_t i = 0; i < pass->attachment_count; ++i) {
172       struct anv_render_pass_attachment *att = &pass->attachments[i];
173       VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
174       VkImageAspectFlags clear_aspects = 0;
175
176       if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
177          /* color attachment */
178          if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
179             clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
180          }
181       } else {
182          /* depthstencil attachment */
183          if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
184              att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
185             clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
186          }
187          if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
188              att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
189             clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
190          }
191       }
192
193       state->attachments[i].pending_clear_aspects = clear_aspects;
194       if (clear_aspects) {
195          assert(info->clearValueCount > i);
196          state->attachments[i].clear_value = info->pClearValues[i];
197       }
198    }
199 }
200
201 static VkResult
202 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
203                                           gl_shader_stage stage, uint32_t size)
204 {
205    struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
206
207    if (*ptr == NULL) {
208       *ptr = anv_alloc(&cmd_buffer->pool->alloc, size, 8,
209                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
210       if (*ptr == NULL)
211          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
212    } else if ((*ptr)->size < size) {
213       *ptr = anv_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
214                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
215       if (*ptr == NULL)
216          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
217    }
218    (*ptr)->size = size;
219
220    return VK_SUCCESS;
221 }
222
223 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
224    anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
225       (offsetof(struct anv_push_constants, field) + \
226        sizeof(cmd_buffer->state.push_constants[0]->field)))
227
228 static VkResult anv_create_cmd_buffer(
229     struct anv_device *                         device,
230     struct anv_cmd_pool *                       pool,
231     VkCommandBufferLevel                        level,
232     VkCommandBuffer*                            pCommandBuffer)
233 {
234    struct anv_cmd_buffer *cmd_buffer;
235    VkResult result;
236
237    cmd_buffer = anv_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
238                           VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
239    if (cmd_buffer == NULL)
240       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
241
242    cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
243    cmd_buffer->device = device;
244    cmd_buffer->pool = pool;
245    cmd_buffer->level = level;
246    cmd_buffer->state.attachments = NULL;
247
248    result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
249    if (result != VK_SUCCESS)
250       goto fail;
251
252    anv_state_stream_init(&cmd_buffer->surface_state_stream,
253                          &device->surface_state_block_pool);
254    anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
255                          &device->dynamic_state_block_pool);
256
257    if (pool) {
258       list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
259    } else {
260       /* Init the pool_link so we can safefly call list_del when we destroy
261        * the command buffer
262        */
263       list_inithead(&cmd_buffer->pool_link);
264    }
265
266    *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
267
268    return VK_SUCCESS;
269
270  fail:
271    anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
272
273    return result;
274 }
275
276 VkResult anv_AllocateCommandBuffers(
277     VkDevice                                    _device,
278     const VkCommandBufferAllocateInfo*          pAllocateInfo,
279     VkCommandBuffer*                            pCommandBuffers)
280 {
281    ANV_FROM_HANDLE(anv_device, device, _device);
282    ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
283
284    VkResult result = VK_SUCCESS;
285    uint32_t i;
286
287    for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
288       result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
289                                      &pCommandBuffers[i]);
290       if (result != VK_SUCCESS)
291          break;
292    }
293
294    if (result != VK_SUCCESS)
295       anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
296                              i, pCommandBuffers);
297
298    return result;
299 }
300
301 static void
302 anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
303 {
304    list_del(&cmd_buffer->pool_link);
305
306    anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
307
308    anv_state_stream_finish(&cmd_buffer->surface_state_stream);
309    anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
310
311    anv_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
312    anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
313 }
314
315 void anv_FreeCommandBuffers(
316     VkDevice                                    device,
317     VkCommandPool                               commandPool,
318     uint32_t                                    commandBufferCount,
319     const VkCommandBuffer*                      pCommandBuffers)
320 {
321    for (uint32_t i = 0; i < commandBufferCount; i++) {
322       ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
323
324       anv_cmd_buffer_destroy(cmd_buffer);
325    }
326 }
327
328 VkResult anv_ResetCommandBuffer(
329     VkCommandBuffer                             commandBuffer,
330     VkCommandBufferResetFlags                   flags)
331 {
332    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
333
334    cmd_buffer->usage_flags = 0;
335    cmd_buffer->state.current_pipeline = UINT32_MAX;
336    anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
337    anv_cmd_state_reset(cmd_buffer);
338
339    anv_state_stream_finish(&cmd_buffer->surface_state_stream);
340    anv_state_stream_init(&cmd_buffer->surface_state_stream,
341                          &cmd_buffer->device->surface_state_block_pool);
342
343    anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
344    anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
345                          &cmd_buffer->device->dynamic_state_block_pool);
346
347    return VK_SUCCESS;
348 }
349
350 void
351 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
352 {
353    switch (cmd_buffer->device->info.gen) {
354    case 7:
355       if (cmd_buffer->device->info.is_haswell)
356          return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
357       else
358          return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
359    case 8:
360       return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
361    case 9:
362       return gen9_cmd_buffer_emit_state_base_address(cmd_buffer);
363    default:
364       unreachable("unsupported gen\n");
365    }
366 }
367
368 VkResult anv_BeginCommandBuffer(
369     VkCommandBuffer                             commandBuffer,
370     const VkCommandBufferBeginInfo*             pBeginInfo)
371 {
372    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
373
374    /* If this is the first vkBeginCommandBuffer, we must *initialize* the
375     * command buffer's state. Otherwise, we must *reset* its state. In both
376     * cases we reset it.
377     *
378     * From the Vulkan 1.0 spec:
379     *
380     *    If a command buffer is in the executable state and the command buffer
381     *    was allocated from a command pool with the
382     *    VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
383     *    vkBeginCommandBuffer implicitly resets the command buffer, behaving
384     *    as if vkResetCommandBuffer had been called with
385     *    VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
386     *    the command buffer in the recording state.
387     */
388    anv_ResetCommandBuffer(commandBuffer, /*flags*/ 0);
389
390    cmd_buffer->usage_flags = pBeginInfo->flags;
391
392    assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
393           !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
394
395    anv_cmd_buffer_emit_state_base_address(cmd_buffer);
396
397    if (cmd_buffer->usage_flags &
398        VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
399       cmd_buffer->state.framebuffer =
400          anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
401       cmd_buffer->state.pass =
402          anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
403
404       struct anv_subpass *subpass =
405          &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
406
407       anv_cmd_buffer_set_subpass(cmd_buffer, subpass);
408    }
409
410    return VK_SUCCESS;
411 }
412
413 VkResult anv_EndCommandBuffer(
414     VkCommandBuffer                             commandBuffer)
415 {
416    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
417    struct anv_device *device = cmd_buffer->device;
418
419    anv_cmd_buffer_end_batch_buffer(cmd_buffer);
420
421    if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
422       /* The algorithm used to compute the validate list is not threadsafe as
423        * it uses the bo->index field.  We have to lock the device around it.
424        * Fortunately, the chances for contention here are probably very low.
425        */
426       pthread_mutex_lock(&device->mutex);
427       anv_cmd_buffer_prepare_execbuf(cmd_buffer);
428       pthread_mutex_unlock(&device->mutex);
429    }
430
431    return VK_SUCCESS;
432 }
433
434 void anv_CmdBindPipeline(
435     VkCommandBuffer                             commandBuffer,
436     VkPipelineBindPoint                         pipelineBindPoint,
437     VkPipeline                                  _pipeline)
438 {
439    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
440    ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
441
442    switch (pipelineBindPoint) {
443    case VK_PIPELINE_BIND_POINT_COMPUTE:
444       cmd_buffer->state.compute_pipeline = pipeline;
445       cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
446       cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
447       cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
448       break;
449
450    case VK_PIPELINE_BIND_POINT_GRAPHICS:
451       cmd_buffer->state.pipeline = pipeline;
452       cmd_buffer->state.vb_dirty |= pipeline->vb_used;
453       cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
454       cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
455       cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
456
457       /* Apply the dynamic state from the pipeline */
458       cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
459       anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
460                              &pipeline->dynamic_state,
461                              pipeline->dynamic_state_mask);
462       break;
463
464    default:
465       assert(!"invalid bind point");
466       break;
467    }
468 }
469
470 void anv_CmdSetViewport(
471     VkCommandBuffer                             commandBuffer,
472     uint32_t                                    firstViewport,
473     uint32_t                                    viewportCount,
474     const VkViewport*                           pViewports)
475 {
476    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
477
478    const uint32_t total_count = firstViewport + viewportCount;
479    if (cmd_buffer->state.dynamic.viewport.count < total_count)
480       cmd_buffer->state.dynamic.viewport.count = total_count;
481
482    memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
483           pViewports, viewportCount * sizeof(*pViewports));
484
485    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
486 }
487
488 void anv_CmdSetScissor(
489     VkCommandBuffer                             commandBuffer,
490     uint32_t                                    firstScissor,
491     uint32_t                                    scissorCount,
492     const VkRect2D*                             pScissors)
493 {
494    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
495
496    const uint32_t total_count = firstScissor + scissorCount;
497    if (cmd_buffer->state.dynamic.scissor.count < total_count)
498       cmd_buffer->state.dynamic.scissor.count = total_count;
499
500    memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
501           pScissors, scissorCount * sizeof(*pScissors));
502
503    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
504 }
505
506 void anv_CmdSetLineWidth(
507     VkCommandBuffer                             commandBuffer,
508     float                                       lineWidth)
509 {
510    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
511
512    cmd_buffer->state.dynamic.line_width = lineWidth;
513    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
514 }
515
516 void anv_CmdSetDepthBias(
517     VkCommandBuffer                             commandBuffer,
518     float                                       depthBiasConstantFactor,
519     float                                       depthBiasClamp,
520     float                                       depthBiasSlopeFactor)
521 {
522    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
523
524    cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
525    cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
526    cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
527
528    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
529 }
530
531 void anv_CmdSetBlendConstants(
532     VkCommandBuffer                             commandBuffer,
533     const float                                 blendConstants[4])
534 {
535    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
536
537    memcpy(cmd_buffer->state.dynamic.blend_constants,
538           blendConstants, sizeof(float) * 4);
539
540    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
541 }
542
543 void anv_CmdSetDepthBounds(
544     VkCommandBuffer                             commandBuffer,
545     float                                       minDepthBounds,
546     float                                       maxDepthBounds)
547 {
548    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
549
550    cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
551    cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
552
553    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
554 }
555
556 void anv_CmdSetStencilCompareMask(
557     VkCommandBuffer                             commandBuffer,
558     VkStencilFaceFlags                          faceMask,
559     uint32_t                                    compareMask)
560 {
561    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
562
563    if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
564       cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
565    if (faceMask & VK_STENCIL_FACE_BACK_BIT)
566       cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
567
568    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
569 }
570
571 void anv_CmdSetStencilWriteMask(
572     VkCommandBuffer                             commandBuffer,
573     VkStencilFaceFlags                          faceMask,
574     uint32_t                                    writeMask)
575 {
576    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
577
578    if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
579       cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
580    if (faceMask & VK_STENCIL_FACE_BACK_BIT)
581       cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
582
583    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
584 }
585
586 void anv_CmdSetStencilReference(
587     VkCommandBuffer                             commandBuffer,
588     VkStencilFaceFlags                          faceMask,
589     uint32_t                                    reference)
590 {
591    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
592
593    if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
594       cmd_buffer->state.dynamic.stencil_reference.front = reference;
595    if (faceMask & VK_STENCIL_FACE_BACK_BIT)
596       cmd_buffer->state.dynamic.stencil_reference.back = reference;
597
598    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
599 }
600
601 void anv_CmdBindDescriptorSets(
602     VkCommandBuffer                             commandBuffer,
603     VkPipelineBindPoint                         pipelineBindPoint,
604     VkPipelineLayout                            _layout,
605     uint32_t                                    firstSet,
606     uint32_t                                    descriptorSetCount,
607     const VkDescriptorSet*                      pDescriptorSets,
608     uint32_t                                    dynamicOffsetCount,
609     const uint32_t*                             pDynamicOffsets)
610 {
611    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
612    ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
613    struct anv_descriptor_set_layout *set_layout;
614
615    assert(firstSet + descriptorSetCount < MAX_SETS);
616
617    uint32_t dynamic_slot = 0;
618    for (uint32_t i = 0; i < descriptorSetCount; i++) {
619       ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
620       set_layout = layout->set[firstSet + i].layout;
621
622       if (cmd_buffer->state.descriptors[firstSet + i] != set) {
623          cmd_buffer->state.descriptors[firstSet + i] = set;
624          cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
625       }
626
627       if (set_layout->dynamic_offset_count > 0) {
628          anv_foreach_stage(s, set_layout->shader_stages) {
629             anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic);
630
631             struct anv_push_constants *push =
632                cmd_buffer->state.push_constants[s];
633
634             unsigned d = layout->set[firstSet + i].dynamic_offset_start;
635             const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
636             struct anv_descriptor *desc = set->descriptors;
637
638             for (unsigned b = 0; b < set_layout->binding_count; b++) {
639                if (set_layout->binding[b].dynamic_offset_index < 0)
640                   continue;
641
642                unsigned array_size = set_layout->binding[b].array_size;
643                for (unsigned j = 0; j < array_size; j++) {
644                   uint32_t range = 0;
645                   if (desc->buffer_view)
646                      range = desc->buffer_view->range;
647                   push->dynamic[d].offset = *(offsets++);
648                   push->dynamic[d].range = range;
649                   desc++;
650                   d++;
651                }
652             }
653          }
654          cmd_buffer->state.push_constants_dirty |= set_layout->shader_stages;
655       }
656    }
657 }
658
659 void anv_CmdBindVertexBuffers(
660     VkCommandBuffer                             commandBuffer,
661     uint32_t                                    firstBinding,
662     uint32_t                                    bindingCount,
663     const VkBuffer*                             pBuffers,
664     const VkDeviceSize*                         pOffsets)
665 {
666    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
667    struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
668
669    /* We have to defer setting up vertex buffer since we need the buffer
670     * stride from the pipeline. */
671
672    assert(firstBinding + bindingCount < MAX_VBS);
673    for (uint32_t i = 0; i < bindingCount; i++) {
674       vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
675       vb[firstBinding + i].offset = pOffsets[i];
676       cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
677    }
678 }
679
680 static void
681 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
682                         struct anv_state state, struct anv_bo *bo, uint32_t offset)
683 {
684    /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
685     * 9 for gen8+.  We only write the first dword for gen8+ here and rely on
686     * the initial state to set the high bits to 0. */
687
688    const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
689
690    anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
691                       state.offset + dword * 4, bo, offset);
692 }
693
694 enum isl_format
695 anv_isl_format_for_descriptor_type(VkDescriptorType type)
696 {
697    switch (type) {
698    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
699    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
700       return ISL_FORMAT_R32G32B32A32_FLOAT;
701
702    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
703    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
704       return ISL_FORMAT_RAW;
705
706    default:
707       unreachable("Invalid descriptor type");
708    }
709 }
710
711 static struct anv_state
712 anv_cmd_buffer_alloc_null_surface_state(struct anv_cmd_buffer *cmd_buffer,
713                                         struct anv_framebuffer *fb)
714 {
715    switch (cmd_buffer->device->info.gen) {
716    case 7:
717       if (cmd_buffer->device->info.is_haswell) {
718          return gen75_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
719       } else {
720          return gen7_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
721       }
722    case 8:
723       return gen8_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
724    case 9:
725       return gen9_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
726    default:
727       unreachable("Invalid hardware generation");
728    }
729 }
730
731 VkResult
732 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
733                                   gl_shader_stage stage,
734                                   struct anv_state *bt_state)
735 {
736    struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
737    struct anv_subpass *subpass = cmd_buffer->state.subpass;
738    struct anv_pipeline_bind_map *map;
739    uint32_t bias, state_offset;
740
741    switch (stage) {
742    case  MESA_SHADER_COMPUTE:
743       map = &cmd_buffer->state.compute_pipeline->bindings[stage];
744       bias = 1;
745       break;
746    default:
747       map = &cmd_buffer->state.pipeline->bindings[stage];
748       bias = 0;
749       break;
750    }
751
752    if (bias + map->surface_count == 0) {
753       *bt_state = (struct anv_state) { 0, };
754       return VK_SUCCESS;
755    }
756
757    *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
758                                                   bias + map->surface_count,
759                                                   &state_offset);
760    uint32_t *bt_map = bt_state->map;
761
762    if (bt_state->map == NULL)
763       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
764
765    if (stage == MESA_SHADER_COMPUTE &&
766        get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
767       struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
768       uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
769
770       struct anv_state surface_state;
771       surface_state =
772          anv_cmd_buffer_alloc_surface_state(cmd_buffer);
773
774       const enum isl_format format =
775          anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
776       anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
777                                     format, bo_offset, 12, 1);
778
779       bt_map[0] = surface_state.offset + state_offset;
780       add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
781    }
782
783    if (map->surface_count == 0)
784       goto out;
785
786    if (map->image_count > 0) {
787       VkResult result =
788          anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
789       if (result != VK_SUCCESS)
790          return result;
791
792       cmd_buffer->state.push_constants_dirty |= 1 << stage;
793    }
794
795    uint32_t image = 0;
796    for (uint32_t s = 0; s < map->surface_count; s++) {
797       struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
798
799       struct anv_state surface_state;
800       struct anv_bo *bo;
801       uint32_t bo_offset;
802
803       if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) {
804          /* Color attachment binding */
805          assert(stage == MESA_SHADER_FRAGMENT);
806          if (binding->offset < subpass->color_count) {
807             const struct anv_image_view *iview =
808                fb->attachments[subpass->color_attachments[binding->offset]];
809
810             assert(iview->color_rt_surface_state.alloc_size);
811             surface_state = iview->color_rt_surface_state;
812             add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
813                                     iview->bo, iview->offset);
814          } else {
815             /* Null render target */
816             struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
817             surface_state =
818                anv_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
819          }
820
821          bt_map[bias + s] = surface_state.offset + state_offset;
822          continue;
823       }
824
825       struct anv_descriptor_set *set =
826          cmd_buffer->state.descriptors[binding->set];
827       struct anv_descriptor *desc = &set->descriptors[binding->offset];
828
829       switch (desc->type) {
830       case VK_DESCRIPTOR_TYPE_SAMPLER:
831          /* Nothing for us to do here */
832          continue;
833
834       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
835       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
836       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
837          surface_state = desc->image_view->sampler_surface_state;
838          assert(surface_state.alloc_size);
839          bo = desc->image_view->bo;
840          bo_offset = desc->image_view->offset;
841          break;
842
843       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
844          surface_state = desc->image_view->storage_surface_state;
845          assert(surface_state.alloc_size);
846          bo = desc->image_view->bo;
847          bo_offset = desc->image_view->offset;
848
849          struct brw_image_param *image_param =
850             &cmd_buffer->state.push_constants[stage]->images[image++];
851
852          *image_param = desc->image_view->storage_image_param;
853          image_param->surface_idx = bias + s;
854          break;
855       }
856
857       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
858       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
859       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
860       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
861       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
862          surface_state = desc->buffer_view->surface_state;
863          assert(surface_state.alloc_size);
864          bo = desc->buffer_view->bo;
865          bo_offset = desc->buffer_view->offset;
866          break;
867
868       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
869          surface_state = desc->buffer_view->storage_surface_state;
870          assert(surface_state.alloc_size);
871          bo = desc->buffer_view->bo;
872          bo_offset = desc->buffer_view->offset;
873
874          struct brw_image_param *image_param =
875             &cmd_buffer->state.push_constants[stage]->images[image++];
876
877          *image_param = desc->buffer_view->storage_image_param;
878          image_param->surface_idx = bias + s;
879          break;
880
881       default:
882          assert(!"Invalid descriptor type");
883          continue;
884       }
885
886       bt_map[bias + s] = surface_state.offset + state_offset;
887       add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
888    }
889    assert(image == map->image_count);
890
891  out:
892    if (!cmd_buffer->device->info.has_llc)
893       anv_state_clflush(*bt_state);
894
895    return VK_SUCCESS;
896 }
897
898 VkResult
899 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
900                              gl_shader_stage stage, struct anv_state *state)
901 {
902    struct anv_pipeline_bind_map *map;
903
904    if (stage == MESA_SHADER_COMPUTE)
905       map = &cmd_buffer->state.compute_pipeline->bindings[stage];
906    else
907       map = &cmd_buffer->state.pipeline->bindings[stage];
908
909    if (map->sampler_count == 0) {
910       *state = (struct anv_state) { 0, };
911       return VK_SUCCESS;
912    }
913
914    uint32_t size = map->sampler_count * 16;
915    *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
916
917    if (state->map == NULL)
918       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
919
920    for (uint32_t s = 0; s < map->sampler_count; s++) {
921       struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
922       struct anv_descriptor_set *set =
923          cmd_buffer->state.descriptors[binding->set];
924       struct anv_descriptor *desc = &set->descriptors[binding->offset];
925
926       if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
927           desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
928          continue;
929
930       struct anv_sampler *sampler = desc->sampler;
931
932       /* This can happen if we have an unfilled slot since TYPE_SAMPLER
933        * happens to be zero.
934        */
935       if (sampler == NULL)
936          continue;
937
938       memcpy(state->map + (s * 16),
939              sampler->state, sizeof(sampler->state));
940    }
941
942    if (!cmd_buffer->device->info.has_llc)
943       anv_state_clflush(*state);
944
945    return VK_SUCCESS;
946 }
947
948 struct anv_state
949 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
950                             const void *data, uint32_t size, uint32_t alignment)
951 {
952    struct anv_state state;
953
954    state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
955    memcpy(state.map, data, size);
956
957    if (!cmd_buffer->device->info.has_llc)
958       anv_state_clflush(state);
959
960    VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
961
962    return state;
963 }
964
965 struct anv_state
966 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
967                              uint32_t *a, uint32_t *b,
968                              uint32_t dwords, uint32_t alignment)
969 {
970    struct anv_state state;
971    uint32_t *p;
972
973    state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
974                                               dwords * 4, alignment);
975    p = state.map;
976    for (uint32_t i = 0; i < dwords; i++)
977       p[i] = a[i] | b[i];
978
979    if (!cmd_buffer->device->info.has_llc)
980       anv_state_clflush(state);
981
982    VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
983
984    return state;
985 }
986
987 /**
988  * @brief Setup the command buffer for recording commands inside the given
989  * subpass.
990  *
991  * This does not record all commands needed for starting the subpass.
992  * Starting the subpass may require additional commands.
993  *
994  * Note that vkCmdBeginRenderPass, vkCmdNextSubpass, and vkBeginCommandBuffer
995  * with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, all setup the
996  * command buffer for recording commands for some subpass.  But only the first
997  * two, vkCmdBeginRenderPass and vkCmdNextSubpass, can start a subpass.
998  */
999 void
1000 anv_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
1001                            struct anv_subpass *subpass)
1002 {
1003    switch (cmd_buffer->device->info.gen) {
1004    case 7:
1005       if (cmd_buffer->device->info.is_haswell) {
1006          gen75_cmd_buffer_set_subpass(cmd_buffer, subpass);
1007       } else {
1008          gen7_cmd_buffer_set_subpass(cmd_buffer, subpass);
1009       }
1010       break;
1011    case 8:
1012       gen8_cmd_buffer_set_subpass(cmd_buffer, subpass);
1013       break;
1014    case 9:
1015       gen9_cmd_buffer_set_subpass(cmd_buffer, subpass);
1016       break;
1017    default:
1018       unreachable("unsupported gen\n");
1019    }
1020 }
1021
1022 struct anv_state
1023 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1024                               gl_shader_stage stage)
1025 {
1026    struct anv_push_constants *data =
1027       cmd_buffer->state.push_constants[stage];
1028    const struct brw_stage_prog_data *prog_data =
1029       cmd_buffer->state.pipeline->prog_data[stage];
1030
1031    /* If we don't actually have any push constants, bail. */
1032    if (data == NULL || prog_data->nr_params == 0)
1033       return (struct anv_state) { .offset = 0 };
1034
1035    struct anv_state state =
1036       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1037                                          prog_data->nr_params * sizeof(float),
1038                                          32 /* bottom 5 bits MBZ */);
1039
1040    /* Walk through the param array and fill the buffer with data */
1041    uint32_t *u32_map = state.map;
1042    for (unsigned i = 0; i < prog_data->nr_params; i++) {
1043       uint32_t offset = (uintptr_t)prog_data->param[i];
1044       u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
1045    }
1046
1047    if (!cmd_buffer->device->info.has_llc)
1048       anv_state_clflush(state);
1049
1050    return state;
1051 }
1052
1053 struct anv_state
1054 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
1055 {
1056    struct anv_push_constants *data =
1057       cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
1058    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1059    const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1060    const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1061
1062    const unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
1063    const unsigned push_constant_data_size =
1064       (local_id_dwords + prog_data->nr_params) * 4;
1065    const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
1066    const unsigned param_aligned_count =
1067       reg_aligned_constant_size / sizeof(uint32_t);
1068
1069    /* If we don't actually have any push constants, bail. */
1070    if (reg_aligned_constant_size == 0)
1071       return (struct anv_state) { .offset = 0 };
1072
1073    const unsigned threads = pipeline->cs_thread_width_max;
1074    const unsigned total_push_constants_size =
1075       reg_aligned_constant_size * threads;
1076    const unsigned push_constant_alignment =
1077       cmd_buffer->device->info.gen < 8 ? 32 : 64;
1078    const unsigned aligned_total_push_constants_size =
1079       ALIGN(total_push_constants_size, push_constant_alignment);
1080    struct anv_state state =
1081       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1082                                          aligned_total_push_constants_size,
1083                                          push_constant_alignment);
1084
1085    /* Walk through the param array and fill the buffer with data */
1086    uint32_t *u32_map = state.map;
1087
1088    brw_cs_fill_local_id_payload(cs_prog_data, u32_map, threads,
1089                                 reg_aligned_constant_size);
1090
1091    /* Setup uniform data for the first thread */
1092    for (unsigned i = 0; i < prog_data->nr_params; i++) {
1093       uint32_t offset = (uintptr_t)prog_data->param[i];
1094       u32_map[local_id_dwords + i] = *(uint32_t *)((uint8_t *)data + offset);
1095    }
1096
1097    /* Copy uniform data from the first thread to every other thread */
1098    const size_t uniform_data_size = prog_data->nr_params * sizeof(uint32_t);
1099    for (unsigned t = 1; t < threads; t++) {
1100       memcpy(&u32_map[t * param_aligned_count + local_id_dwords],
1101              &u32_map[local_id_dwords],
1102              uniform_data_size);
1103    }
1104
1105    if (!cmd_buffer->device->info.has_llc)
1106       anv_state_clflush(state);
1107
1108    return state;
1109 }
1110
1111 void anv_CmdPushConstants(
1112     VkCommandBuffer                             commandBuffer,
1113     VkPipelineLayout                            layout,
1114     VkShaderStageFlags                          stageFlags,
1115     uint32_t                                    offset,
1116     uint32_t                                    size,
1117     const void*                                 pValues)
1118 {
1119    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1120
1121    anv_foreach_stage(stage, stageFlags) {
1122       anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data);
1123
1124       memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
1125              pValues, size);
1126    }
1127
1128    cmd_buffer->state.push_constants_dirty |= stageFlags;
1129 }
1130
1131 void anv_CmdExecuteCommands(
1132     VkCommandBuffer                             commandBuffer,
1133     uint32_t                                    commandBufferCount,
1134     const VkCommandBuffer*                      pCmdBuffers)
1135 {
1136    ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
1137
1138    assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1139
1140    for (uint32_t i = 0; i < commandBufferCount; i++) {
1141       ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
1142
1143       assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1144
1145       anv_cmd_buffer_add_secondary(primary, secondary);
1146    }
1147 }
1148
1149 VkResult anv_CreateCommandPool(
1150     VkDevice                                    _device,
1151     const VkCommandPoolCreateInfo*              pCreateInfo,
1152     const VkAllocationCallbacks*                pAllocator,
1153     VkCommandPool*                              pCmdPool)
1154 {
1155    ANV_FROM_HANDLE(anv_device, device, _device);
1156    struct anv_cmd_pool *pool;
1157
1158    pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
1159                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1160    if (pool == NULL)
1161       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1162
1163    if (pAllocator)
1164       pool->alloc = *pAllocator;
1165    else
1166       pool->alloc = device->alloc;
1167
1168    list_inithead(&pool->cmd_buffers);
1169
1170    *pCmdPool = anv_cmd_pool_to_handle(pool);
1171
1172    return VK_SUCCESS;
1173 }
1174
1175 void anv_DestroyCommandPool(
1176     VkDevice                                    _device,
1177     VkCommandPool                               commandPool,
1178     const VkAllocationCallbacks*                pAllocator)
1179 {
1180    ANV_FROM_HANDLE(anv_device, device, _device);
1181    ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1182
1183    anv_ResetCommandPool(_device, commandPool, 0);
1184
1185    anv_free2(&device->alloc, pAllocator, pool);
1186 }
1187
1188 VkResult anv_ResetCommandPool(
1189     VkDevice                                    device,
1190     VkCommandPool                               commandPool,
1191     VkCommandPoolResetFlags                     flags)
1192 {
1193    ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1194
1195    /* FIXME: vkResetCommandPool must not destroy its command buffers. The
1196     * Vulkan 1.0 spec requires that it only reset them:
1197     *
1198     *    Resetting a command pool recycles all of the resources from all of
1199     *    the command buffers allocated from the command pool back to the
1200     *    command pool. All command buffers that have been allocated from the
1201     *    command pool are put in the initial state.
1202     */
1203    list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
1204                             &pool->cmd_buffers, pool_link) {
1205       anv_cmd_buffer_destroy(cmd_buffer);
1206    }
1207
1208    return VK_SUCCESS;
1209 }
1210
1211 /**
1212  * Return NULL if the current subpass has no depthstencil attachment.
1213  */
1214 const struct anv_image_view *
1215 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
1216 {
1217    const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1218    const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1219
1220    if (subpass->depth_stencil_attachment == VK_ATTACHMENT_UNUSED)
1221       return NULL;
1222
1223    const struct anv_image_view *iview =
1224       fb->attachments[subpass->depth_stencil_attachment];
1225
1226    assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
1227                                 VK_IMAGE_ASPECT_STENCIL_BIT));
1228
1229    return iview;
1230 }