OSDN Git Service

anv/cmd_buffer: Only emit PIPE_CONTROL on-demand
[android-x86/external-mesa.git] / src / intel / vulkan / anv_cmd_buffer.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "vk_format_info.h"
33
34 /** \file anv_cmd_buffer.c
35  *
36  * This file contains all of the stuff for emitting commands into a command
37  * buffer.  This includes implementations of most of the vkCmd*
38  * entrypoints.  This file is concerned entirely with state emission and
39  * not with the command buffer data structure itself.  As far as this file
40  * is concerned, most of anv_cmd_buffer is magic.
41  */
42
43 /* TODO: These are taken from GLES.  We should check the Vulkan spec */
44 const struct anv_dynamic_state default_dynamic_state = {
45    .viewport = {
46       .count = 0,
47    },
48    .scissor = {
49       .count = 0,
50    },
51    .line_width = 1.0f,
52    .depth_bias = {
53       .bias = 0.0f,
54       .clamp = 0.0f,
55       .slope = 0.0f,
56    },
57    .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
58    .depth_bounds = {
59       .min = 0.0f,
60       .max = 1.0f,
61    },
62    .stencil_compare_mask = {
63       .front = ~0u,
64       .back = ~0u,
65    },
66    .stencil_write_mask = {
67       .front = ~0u,
68       .back = ~0u,
69    },
70    .stencil_reference = {
71       .front = 0u,
72       .back = 0u,
73    },
74 };
75
76 void
77 anv_dynamic_state_copy(struct anv_dynamic_state *dest,
78                        const struct anv_dynamic_state *src,
79                        uint32_t copy_mask)
80 {
81    if (copy_mask & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
82       dest->viewport.count = src->viewport.count;
83       typed_memcpy(dest->viewport.viewports, src->viewport.viewports,
84                    src->viewport.count);
85    }
86
87    if (copy_mask & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
88       dest->scissor.count = src->scissor.count;
89       typed_memcpy(dest->scissor.scissors, src->scissor.scissors,
90                    src->scissor.count);
91    }
92
93    if (copy_mask & (1 << VK_DYNAMIC_STATE_LINE_WIDTH))
94       dest->line_width = src->line_width;
95
96    if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS))
97       dest->depth_bias = src->depth_bias;
98
99    if (copy_mask & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
100       typed_memcpy(dest->blend_constants, src->blend_constants, 4);
101
102    if (copy_mask & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS))
103       dest->depth_bounds = src->depth_bounds;
104
105    if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK))
106       dest->stencil_compare_mask = src->stencil_compare_mask;
107
108    if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK))
109       dest->stencil_write_mask = src->stencil_write_mask;
110
111    if (copy_mask & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE))
112       dest->stencil_reference = src->stencil_reference;
113 }
114
115 static void
116 anv_cmd_state_reset(struct anv_cmd_buffer *cmd_buffer)
117 {
118    struct anv_cmd_state *state = &cmd_buffer->state;
119
120    memset(&state->descriptors, 0, sizeof(state->descriptors));
121    memset(&state->push_constants, 0, sizeof(state->push_constants));
122    memset(state->binding_tables, 0, sizeof(state->binding_tables));
123    memset(state->samplers, 0, sizeof(state->samplers));
124
125    /* 0 isn't a valid config.  This ensures that we always configure L3$. */
126    cmd_buffer->state.current_l3_config = 0;
127
128    state->dirty = 0;
129    state->vb_dirty = 0;
130    state->pending_pipe_bits = 0;
131    state->descriptors_dirty = 0;
132    state->push_constants_dirty = 0;
133    state->pipeline = NULL;
134    state->push_constant_stages = 0;
135    state->restart_index = UINT32_MAX;
136    state->dynamic = default_dynamic_state;
137    state->need_query_wa = true;
138
139    if (state->attachments != NULL) {
140       anv_free(&cmd_buffer->pool->alloc, state->attachments);
141       state->attachments = NULL;
142    }
143
144    state->gen7.index_buffer = NULL;
145 }
146
147 /**
148  * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
149  */
150 void
151 anv_cmd_state_setup_attachments(struct anv_cmd_buffer *cmd_buffer,
152                                 const VkRenderPassBeginInfo *info)
153 {
154    struct anv_cmd_state *state = &cmd_buffer->state;
155    ANV_FROM_HANDLE(anv_render_pass, pass, info->renderPass);
156
157    anv_free(&cmd_buffer->pool->alloc, state->attachments);
158
159    if (pass->attachment_count == 0) {
160       state->attachments = NULL;
161       return;
162    }
163
164    state->attachments = anv_alloc(&cmd_buffer->pool->alloc,
165                                   pass->attachment_count *
166                                        sizeof(state->attachments[0]),
167                                   8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
168    if (state->attachments == NULL) {
169       /* FIXME: Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
170       abort();
171    }
172
173    for (uint32_t i = 0; i < pass->attachment_count; ++i) {
174       struct anv_render_pass_attachment *att = &pass->attachments[i];
175       VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
176       VkImageAspectFlags clear_aspects = 0;
177
178       if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
179          /* color attachment */
180          if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
181             clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
182          }
183       } else {
184          /* depthstencil attachment */
185          if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
186              att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
187             clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
188          }
189          if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
190              att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
191             clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
192          }
193       }
194
195       state->attachments[i].pending_clear_aspects = clear_aspects;
196       if (clear_aspects) {
197          assert(info->clearValueCount > i);
198          state->attachments[i].clear_value = info->pClearValues[i];
199       }
200    }
201 }
202
203 static VkResult
204 anv_cmd_buffer_ensure_push_constants_size(struct anv_cmd_buffer *cmd_buffer,
205                                           gl_shader_stage stage, uint32_t size)
206 {
207    struct anv_push_constants **ptr = &cmd_buffer->state.push_constants[stage];
208
209    if (*ptr == NULL) {
210       *ptr = anv_alloc(&cmd_buffer->pool->alloc, size, 8,
211                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
212       if (*ptr == NULL)
213          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
214    } else if ((*ptr)->size < size) {
215       *ptr = anv_realloc(&cmd_buffer->pool->alloc, *ptr, size, 8,
216                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
217       if (*ptr == NULL)
218          return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
219    }
220    (*ptr)->size = size;
221
222    return VK_SUCCESS;
223 }
224
225 #define anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, field) \
226    anv_cmd_buffer_ensure_push_constants_size(cmd_buffer, stage, \
227       (offsetof(struct anv_push_constants, field) + \
228        sizeof(cmd_buffer->state.push_constants[0]->field)))
229
230 static VkResult anv_create_cmd_buffer(
231     struct anv_device *                         device,
232     struct anv_cmd_pool *                       pool,
233     VkCommandBufferLevel                        level,
234     VkCommandBuffer*                            pCommandBuffer)
235 {
236    struct anv_cmd_buffer *cmd_buffer;
237    VkResult result;
238
239    cmd_buffer = anv_alloc(&pool->alloc, sizeof(*cmd_buffer), 8,
240                           VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
241    if (cmd_buffer == NULL)
242       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
243
244    cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
245    cmd_buffer->device = device;
246    cmd_buffer->pool = pool;
247    cmd_buffer->level = level;
248    cmd_buffer->state.attachments = NULL;
249
250    result = anv_cmd_buffer_init_batch_bo_chain(cmd_buffer);
251    if (result != VK_SUCCESS)
252       goto fail;
253
254    anv_state_stream_init(&cmd_buffer->surface_state_stream,
255                          &device->surface_state_block_pool);
256    anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
257                          &device->dynamic_state_block_pool);
258
259    if (pool) {
260       list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
261    } else {
262       /* Init the pool_link so we can safefly call list_del when we destroy
263        * the command buffer
264        */
265       list_inithead(&cmd_buffer->pool_link);
266    }
267
268    *pCommandBuffer = anv_cmd_buffer_to_handle(cmd_buffer);
269
270    return VK_SUCCESS;
271
272  fail:
273    anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
274
275    return result;
276 }
277
278 VkResult anv_AllocateCommandBuffers(
279     VkDevice                                    _device,
280     const VkCommandBufferAllocateInfo*          pAllocateInfo,
281     VkCommandBuffer*                            pCommandBuffers)
282 {
283    ANV_FROM_HANDLE(anv_device, device, _device);
284    ANV_FROM_HANDLE(anv_cmd_pool, pool, pAllocateInfo->commandPool);
285
286    VkResult result = VK_SUCCESS;
287    uint32_t i;
288
289    for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
290       result = anv_create_cmd_buffer(device, pool, pAllocateInfo->level,
291                                      &pCommandBuffers[i]);
292       if (result != VK_SUCCESS)
293          break;
294    }
295
296    if (result != VK_SUCCESS)
297       anv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
298                              i, pCommandBuffers);
299
300    return result;
301 }
302
303 static void
304 anv_cmd_buffer_destroy(struct anv_cmd_buffer *cmd_buffer)
305 {
306    list_del(&cmd_buffer->pool_link);
307
308    anv_cmd_buffer_fini_batch_bo_chain(cmd_buffer);
309
310    anv_state_stream_finish(&cmd_buffer->surface_state_stream);
311    anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
312
313    anv_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
314    anv_free(&cmd_buffer->pool->alloc, cmd_buffer);
315 }
316
317 void anv_FreeCommandBuffers(
318     VkDevice                                    device,
319     VkCommandPool                               commandPool,
320     uint32_t                                    commandBufferCount,
321     const VkCommandBuffer*                      pCommandBuffers)
322 {
323    for (uint32_t i = 0; i < commandBufferCount; i++) {
324       ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
325
326       anv_cmd_buffer_destroy(cmd_buffer);
327    }
328 }
329
330 VkResult anv_ResetCommandBuffer(
331     VkCommandBuffer                             commandBuffer,
332     VkCommandBufferResetFlags                   flags)
333 {
334    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
335
336    cmd_buffer->usage_flags = 0;
337    cmd_buffer->state.current_pipeline = UINT32_MAX;
338    anv_cmd_buffer_reset_batch_bo_chain(cmd_buffer);
339    anv_cmd_state_reset(cmd_buffer);
340
341    anv_state_stream_finish(&cmd_buffer->surface_state_stream);
342    anv_state_stream_init(&cmd_buffer->surface_state_stream,
343                          &cmd_buffer->device->surface_state_block_pool);
344
345    anv_state_stream_finish(&cmd_buffer->dynamic_state_stream);
346    anv_state_stream_init(&cmd_buffer->dynamic_state_stream,
347                          &cmd_buffer->device->dynamic_state_block_pool);
348
349    return VK_SUCCESS;
350 }
351
352 void
353 anv_cmd_buffer_emit_state_base_address(struct anv_cmd_buffer *cmd_buffer)
354 {
355    switch (cmd_buffer->device->info.gen) {
356    case 7:
357       if (cmd_buffer->device->info.is_haswell)
358          return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
359       else
360          return gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
361    case 8:
362       return gen8_cmd_buffer_emit_state_base_address(cmd_buffer);
363    case 9:
364       return gen9_cmd_buffer_emit_state_base_address(cmd_buffer);
365    default:
366       unreachable("unsupported gen\n");
367    }
368 }
369
370 VkResult anv_BeginCommandBuffer(
371     VkCommandBuffer                             commandBuffer,
372     const VkCommandBufferBeginInfo*             pBeginInfo)
373 {
374    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
375
376    /* If this is the first vkBeginCommandBuffer, we must *initialize* the
377     * command buffer's state. Otherwise, we must *reset* its state. In both
378     * cases we reset it.
379     *
380     * From the Vulkan 1.0 spec:
381     *
382     *    If a command buffer is in the executable state and the command buffer
383     *    was allocated from a command pool with the
384     *    VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
385     *    vkBeginCommandBuffer implicitly resets the command buffer, behaving
386     *    as if vkResetCommandBuffer had been called with
387     *    VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
388     *    the command buffer in the recording state.
389     */
390    anv_ResetCommandBuffer(commandBuffer, /*flags*/ 0);
391
392    cmd_buffer->usage_flags = pBeginInfo->flags;
393
394    assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
395           !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
396
397    anv_cmd_buffer_emit_state_base_address(cmd_buffer);
398
399    if (cmd_buffer->usage_flags &
400        VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
401       cmd_buffer->state.framebuffer =
402          anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
403       cmd_buffer->state.pass =
404          anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
405
406       struct anv_subpass *subpass =
407          &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
408
409       anv_cmd_buffer_set_subpass(cmd_buffer, subpass);
410    }
411
412    return VK_SUCCESS;
413 }
414
415 VkResult anv_EndCommandBuffer(
416     VkCommandBuffer                             commandBuffer)
417 {
418    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
419    struct anv_device *device = cmd_buffer->device;
420
421    anv_cmd_buffer_end_batch_buffer(cmd_buffer);
422
423    if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
424       /* The algorithm used to compute the validate list is not threadsafe as
425        * it uses the bo->index field.  We have to lock the device around it.
426        * Fortunately, the chances for contention here are probably very low.
427        */
428       pthread_mutex_lock(&device->mutex);
429       anv_cmd_buffer_prepare_execbuf(cmd_buffer);
430       pthread_mutex_unlock(&device->mutex);
431    }
432
433    return VK_SUCCESS;
434 }
435
436 void anv_CmdBindPipeline(
437     VkCommandBuffer                             commandBuffer,
438     VkPipelineBindPoint                         pipelineBindPoint,
439     VkPipeline                                  _pipeline)
440 {
441    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
442    ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
443
444    switch (pipelineBindPoint) {
445    case VK_PIPELINE_BIND_POINT_COMPUTE:
446       cmd_buffer->state.compute_pipeline = pipeline;
447       cmd_buffer->state.compute_dirty |= ANV_CMD_DIRTY_PIPELINE;
448       cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
449       cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
450       break;
451
452    case VK_PIPELINE_BIND_POINT_GRAPHICS:
453       cmd_buffer->state.pipeline = pipeline;
454       cmd_buffer->state.vb_dirty |= pipeline->vb_used;
455       cmd_buffer->state.dirty |= ANV_CMD_DIRTY_PIPELINE;
456       cmd_buffer->state.push_constants_dirty |= pipeline->active_stages;
457       cmd_buffer->state.descriptors_dirty |= pipeline->active_stages;
458
459       /* Apply the dynamic state from the pipeline */
460       cmd_buffer->state.dirty |= pipeline->dynamic_state_mask;
461       anv_dynamic_state_copy(&cmd_buffer->state.dynamic,
462                              &pipeline->dynamic_state,
463                              pipeline->dynamic_state_mask);
464       break;
465
466    default:
467       assert(!"invalid bind point");
468       break;
469    }
470 }
471
472 void anv_CmdSetViewport(
473     VkCommandBuffer                             commandBuffer,
474     uint32_t                                    firstViewport,
475     uint32_t                                    viewportCount,
476     const VkViewport*                           pViewports)
477 {
478    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
479
480    const uint32_t total_count = firstViewport + viewportCount;
481    if (cmd_buffer->state.dynamic.viewport.count < total_count)
482       cmd_buffer->state.dynamic.viewport.count = total_count;
483
484    memcpy(cmd_buffer->state.dynamic.viewport.viewports + firstViewport,
485           pViewports, viewportCount * sizeof(*pViewports));
486
487    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_VIEWPORT;
488 }
489
490 void anv_CmdSetScissor(
491     VkCommandBuffer                             commandBuffer,
492     uint32_t                                    firstScissor,
493     uint32_t                                    scissorCount,
494     const VkRect2D*                             pScissors)
495 {
496    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
497
498    const uint32_t total_count = firstScissor + scissorCount;
499    if (cmd_buffer->state.dynamic.scissor.count < total_count)
500       cmd_buffer->state.dynamic.scissor.count = total_count;
501
502    memcpy(cmd_buffer->state.dynamic.scissor.scissors + firstScissor,
503           pScissors, scissorCount * sizeof(*pScissors));
504
505    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_SCISSOR;
506 }
507
508 void anv_CmdSetLineWidth(
509     VkCommandBuffer                             commandBuffer,
510     float                                       lineWidth)
511 {
512    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
513
514    cmd_buffer->state.dynamic.line_width = lineWidth;
515    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
516 }
517
518 void anv_CmdSetDepthBias(
519     VkCommandBuffer                             commandBuffer,
520     float                                       depthBiasConstantFactor,
521     float                                       depthBiasClamp,
522     float                                       depthBiasSlopeFactor)
523 {
524    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
525
526    cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
527    cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
528    cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
529
530    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
531 }
532
533 void anv_CmdSetBlendConstants(
534     VkCommandBuffer                             commandBuffer,
535     const float                                 blendConstants[4])
536 {
537    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
538
539    memcpy(cmd_buffer->state.dynamic.blend_constants,
540           blendConstants, sizeof(float) * 4);
541
542    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
543 }
544
545 void anv_CmdSetDepthBounds(
546     VkCommandBuffer                             commandBuffer,
547     float                                       minDepthBounds,
548     float                                       maxDepthBounds)
549 {
550    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
551
552    cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
553    cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
554
555    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
556 }
557
558 void anv_CmdSetStencilCompareMask(
559     VkCommandBuffer                             commandBuffer,
560     VkStencilFaceFlags                          faceMask,
561     uint32_t                                    compareMask)
562 {
563    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
564
565    if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
566       cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
567    if (faceMask & VK_STENCIL_FACE_BACK_BIT)
568       cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
569
570    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
571 }
572
573 void anv_CmdSetStencilWriteMask(
574     VkCommandBuffer                             commandBuffer,
575     VkStencilFaceFlags                          faceMask,
576     uint32_t                                    writeMask)
577 {
578    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
579
580    if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
581       cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
582    if (faceMask & VK_STENCIL_FACE_BACK_BIT)
583       cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
584
585    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
586 }
587
588 void anv_CmdSetStencilReference(
589     VkCommandBuffer                             commandBuffer,
590     VkStencilFaceFlags                          faceMask,
591     uint32_t                                    reference)
592 {
593    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
594
595    if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
596       cmd_buffer->state.dynamic.stencil_reference.front = reference;
597    if (faceMask & VK_STENCIL_FACE_BACK_BIT)
598       cmd_buffer->state.dynamic.stencil_reference.back = reference;
599
600    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
601 }
602
603 void anv_CmdBindDescriptorSets(
604     VkCommandBuffer                             commandBuffer,
605     VkPipelineBindPoint                         pipelineBindPoint,
606     VkPipelineLayout                            _layout,
607     uint32_t                                    firstSet,
608     uint32_t                                    descriptorSetCount,
609     const VkDescriptorSet*                      pDescriptorSets,
610     uint32_t                                    dynamicOffsetCount,
611     const uint32_t*                             pDynamicOffsets)
612 {
613    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
614    ANV_FROM_HANDLE(anv_pipeline_layout, layout, _layout);
615    struct anv_descriptor_set_layout *set_layout;
616
617    assert(firstSet + descriptorSetCount < MAX_SETS);
618
619    uint32_t dynamic_slot = 0;
620    for (uint32_t i = 0; i < descriptorSetCount; i++) {
621       ANV_FROM_HANDLE(anv_descriptor_set, set, pDescriptorSets[i]);
622       set_layout = layout->set[firstSet + i].layout;
623
624       if (cmd_buffer->state.descriptors[firstSet + i] != set) {
625          cmd_buffer->state.descriptors[firstSet + i] = set;
626          cmd_buffer->state.descriptors_dirty |= set_layout->shader_stages;
627       }
628
629       if (set_layout->dynamic_offset_count > 0) {
630          anv_foreach_stage(s, set_layout->shader_stages) {
631             anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, s, dynamic);
632
633             struct anv_push_constants *push =
634                cmd_buffer->state.push_constants[s];
635
636             unsigned d = layout->set[firstSet + i].dynamic_offset_start;
637             const uint32_t *offsets = pDynamicOffsets + dynamic_slot;
638             struct anv_descriptor *desc = set->descriptors;
639
640             for (unsigned b = 0; b < set_layout->binding_count; b++) {
641                if (set_layout->binding[b].dynamic_offset_index < 0)
642                   continue;
643
644                unsigned array_size = set_layout->binding[b].array_size;
645                for (unsigned j = 0; j < array_size; j++) {
646                   uint32_t range = 0;
647                   if (desc->buffer_view)
648                      range = desc->buffer_view->range;
649                   push->dynamic[d].offset = *(offsets++);
650                   push->dynamic[d].range = range;
651                   desc++;
652                   d++;
653                }
654             }
655          }
656          cmd_buffer->state.push_constants_dirty |= set_layout->shader_stages;
657       }
658    }
659 }
660
661 void anv_CmdBindVertexBuffers(
662     VkCommandBuffer                             commandBuffer,
663     uint32_t                                    firstBinding,
664     uint32_t                                    bindingCount,
665     const VkBuffer*                             pBuffers,
666     const VkDeviceSize*                         pOffsets)
667 {
668    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
669    struct anv_vertex_binding *vb = cmd_buffer->state.vertex_bindings;
670
671    /* We have to defer setting up vertex buffer since we need the buffer
672     * stride from the pipeline. */
673
674    assert(firstBinding + bindingCount < MAX_VBS);
675    for (uint32_t i = 0; i < bindingCount; i++) {
676       vb[firstBinding + i].buffer = anv_buffer_from_handle(pBuffers[i]);
677       vb[firstBinding + i].offset = pOffsets[i];
678       cmd_buffer->state.vb_dirty |= 1 << (firstBinding + i);
679    }
680 }
681
682 static void
683 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
684                         struct anv_state state, struct anv_bo *bo, uint32_t offset)
685 {
686    /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
687     * 9 for gen8+.  We only write the first dword for gen8+ here and rely on
688     * the initial state to set the high bits to 0. */
689
690    const uint32_t dword = cmd_buffer->device->info.gen < 8 ? 1 : 8;
691
692    anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
693                       state.offset + dword * 4, bo, offset);
694 }
695
696 enum isl_format
697 anv_isl_format_for_descriptor_type(VkDescriptorType type)
698 {
699    switch (type) {
700    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
701    case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
702       return ISL_FORMAT_R32G32B32A32_FLOAT;
703
704    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
705    case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
706       return ISL_FORMAT_RAW;
707
708    default:
709       unreachable("Invalid descriptor type");
710    }
711 }
712
713 static struct anv_state
714 anv_cmd_buffer_alloc_null_surface_state(struct anv_cmd_buffer *cmd_buffer,
715                                         struct anv_framebuffer *fb)
716 {
717    switch (cmd_buffer->device->info.gen) {
718    case 7:
719       if (cmd_buffer->device->info.is_haswell) {
720          return gen75_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
721       } else {
722          return gen7_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
723       }
724    case 8:
725       return gen8_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
726    case 9:
727       return gen9_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
728    default:
729       unreachable("Invalid hardware generation");
730    }
731 }
732
733 VkResult
734 anv_cmd_buffer_emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
735                                   gl_shader_stage stage,
736                                   struct anv_state *bt_state)
737 {
738    struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
739    struct anv_subpass *subpass = cmd_buffer->state.subpass;
740    struct anv_pipeline_bind_map *map;
741    uint32_t bias, state_offset;
742
743    switch (stage) {
744    case  MESA_SHADER_COMPUTE:
745       map = &cmd_buffer->state.compute_pipeline->bindings[stage];
746       bias = 1;
747       break;
748    default:
749       map = &cmd_buffer->state.pipeline->bindings[stage];
750       bias = 0;
751       break;
752    }
753
754    if (bias + map->surface_count == 0) {
755       *bt_state = (struct anv_state) { 0, };
756       return VK_SUCCESS;
757    }
758
759    *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
760                                                   bias + map->surface_count,
761                                                   &state_offset);
762    uint32_t *bt_map = bt_state->map;
763
764    if (bt_state->map == NULL)
765       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
766
767    if (stage == MESA_SHADER_COMPUTE &&
768        get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
769       struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
770       uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
771
772       struct anv_state surface_state;
773       surface_state =
774          anv_cmd_buffer_alloc_surface_state(cmd_buffer);
775
776       const enum isl_format format =
777          anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
778       anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
779                                     format, bo_offset, 12, 1);
780
781       bt_map[0] = surface_state.offset + state_offset;
782       add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
783    }
784
785    if (map->surface_count == 0)
786       goto out;
787
788    if (map->image_count > 0) {
789       VkResult result =
790          anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
791       if (result != VK_SUCCESS)
792          return result;
793
794       cmd_buffer->state.push_constants_dirty |= 1 << stage;
795    }
796
797    uint32_t image = 0;
798    for (uint32_t s = 0; s < map->surface_count; s++) {
799       struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
800
801       struct anv_state surface_state;
802       struct anv_bo *bo;
803       uint32_t bo_offset;
804
805       if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) {
806          /* Color attachment binding */
807          assert(stage == MESA_SHADER_FRAGMENT);
808          if (binding->offset < subpass->color_count) {
809             const struct anv_image_view *iview =
810                fb->attachments[subpass->color_attachments[binding->offset]];
811
812             assert(iview->color_rt_surface_state.alloc_size);
813             surface_state = iview->color_rt_surface_state;
814             add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
815                                     iview->bo, iview->offset);
816          } else {
817             /* Null render target */
818             struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
819             surface_state =
820                anv_cmd_buffer_alloc_null_surface_state(cmd_buffer, fb);
821          }
822
823          bt_map[bias + s] = surface_state.offset + state_offset;
824          continue;
825       }
826
827       struct anv_descriptor_set *set =
828          cmd_buffer->state.descriptors[binding->set];
829       struct anv_descriptor *desc = &set->descriptors[binding->offset];
830
831       switch (desc->type) {
832       case VK_DESCRIPTOR_TYPE_SAMPLER:
833          /* Nothing for us to do here */
834          continue;
835
836       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
837       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
838       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
839          surface_state = desc->image_view->sampler_surface_state;
840          assert(surface_state.alloc_size);
841          bo = desc->image_view->bo;
842          bo_offset = desc->image_view->offset;
843          break;
844
845       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
846          surface_state = desc->image_view->storage_surface_state;
847          assert(surface_state.alloc_size);
848          bo = desc->image_view->bo;
849          bo_offset = desc->image_view->offset;
850
851          struct brw_image_param *image_param =
852             &cmd_buffer->state.push_constants[stage]->images[image++];
853
854          *image_param = desc->image_view->storage_image_param;
855          image_param->surface_idx = bias + s;
856          break;
857       }
858
859       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
860       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
861       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
862       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
863       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
864          surface_state = desc->buffer_view->surface_state;
865          assert(surface_state.alloc_size);
866          bo = desc->buffer_view->bo;
867          bo_offset = desc->buffer_view->offset;
868          break;
869
870       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
871          surface_state = desc->buffer_view->storage_surface_state;
872          assert(surface_state.alloc_size);
873          bo = desc->buffer_view->bo;
874          bo_offset = desc->buffer_view->offset;
875
876          struct brw_image_param *image_param =
877             &cmd_buffer->state.push_constants[stage]->images[image++];
878
879          *image_param = desc->buffer_view->storage_image_param;
880          image_param->surface_idx = bias + s;
881          break;
882
883       default:
884          assert(!"Invalid descriptor type");
885          continue;
886       }
887
888       bt_map[bias + s] = surface_state.offset + state_offset;
889       add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
890    }
891    assert(image == map->image_count);
892
893  out:
894    if (!cmd_buffer->device->info.has_llc)
895       anv_state_clflush(*bt_state);
896
897    return VK_SUCCESS;
898 }
899
900 VkResult
901 anv_cmd_buffer_emit_samplers(struct anv_cmd_buffer *cmd_buffer,
902                              gl_shader_stage stage, struct anv_state *state)
903 {
904    struct anv_pipeline_bind_map *map;
905
906    if (stage == MESA_SHADER_COMPUTE)
907       map = &cmd_buffer->state.compute_pipeline->bindings[stage];
908    else
909       map = &cmd_buffer->state.pipeline->bindings[stage];
910
911    if (map->sampler_count == 0) {
912       *state = (struct anv_state) { 0, };
913       return VK_SUCCESS;
914    }
915
916    uint32_t size = map->sampler_count * 16;
917    *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
918
919    if (state->map == NULL)
920       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
921
922    for (uint32_t s = 0; s < map->sampler_count; s++) {
923       struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
924       struct anv_descriptor_set *set =
925          cmd_buffer->state.descriptors[binding->set];
926       struct anv_descriptor *desc = &set->descriptors[binding->offset];
927
928       if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
929           desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
930          continue;
931
932       struct anv_sampler *sampler = desc->sampler;
933
934       /* This can happen if we have an unfilled slot since TYPE_SAMPLER
935        * happens to be zero.
936        */
937       if (sampler == NULL)
938          continue;
939
940       memcpy(state->map + (s * 16),
941              sampler->state, sizeof(sampler->state));
942    }
943
944    if (!cmd_buffer->device->info.has_llc)
945       anv_state_clflush(*state);
946
947    return VK_SUCCESS;
948 }
949
950 struct anv_state
951 anv_cmd_buffer_emit_dynamic(struct anv_cmd_buffer *cmd_buffer,
952                             const void *data, uint32_t size, uint32_t alignment)
953 {
954    struct anv_state state;
955
956    state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, alignment);
957    memcpy(state.map, data, size);
958
959    if (!cmd_buffer->device->info.has_llc)
960       anv_state_clflush(state);
961
962    VG(VALGRIND_CHECK_MEM_IS_DEFINED(state.map, size));
963
964    return state;
965 }
966
967 struct anv_state
968 anv_cmd_buffer_merge_dynamic(struct anv_cmd_buffer *cmd_buffer,
969                              uint32_t *a, uint32_t *b,
970                              uint32_t dwords, uint32_t alignment)
971 {
972    struct anv_state state;
973    uint32_t *p;
974
975    state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
976                                               dwords * 4, alignment);
977    p = state.map;
978    for (uint32_t i = 0; i < dwords; i++)
979       p[i] = a[i] | b[i];
980
981    if (!cmd_buffer->device->info.has_llc)
982       anv_state_clflush(state);
983
984    VG(VALGRIND_CHECK_MEM_IS_DEFINED(p, dwords * 4));
985
986    return state;
987 }
988
989 /**
990  * @brief Setup the command buffer for recording commands inside the given
991  * subpass.
992  *
993  * This does not record all commands needed for starting the subpass.
994  * Starting the subpass may require additional commands.
995  *
996  * Note that vkCmdBeginRenderPass, vkCmdNextSubpass, and vkBeginCommandBuffer
997  * with VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT, all setup the
998  * command buffer for recording commands for some subpass.  But only the first
999  * two, vkCmdBeginRenderPass and vkCmdNextSubpass, can start a subpass.
1000  */
1001 void
1002 anv_cmd_buffer_set_subpass(struct anv_cmd_buffer *cmd_buffer,
1003                            struct anv_subpass *subpass)
1004 {
1005    switch (cmd_buffer->device->info.gen) {
1006    case 7:
1007       if (cmd_buffer->device->info.is_haswell) {
1008          gen75_cmd_buffer_set_subpass(cmd_buffer, subpass);
1009       } else {
1010          gen7_cmd_buffer_set_subpass(cmd_buffer, subpass);
1011       }
1012       break;
1013    case 8:
1014       gen8_cmd_buffer_set_subpass(cmd_buffer, subpass);
1015       break;
1016    case 9:
1017       gen9_cmd_buffer_set_subpass(cmd_buffer, subpass);
1018       break;
1019    default:
1020       unreachable("unsupported gen\n");
1021    }
1022 }
1023
1024 struct anv_state
1025 anv_cmd_buffer_push_constants(struct anv_cmd_buffer *cmd_buffer,
1026                               gl_shader_stage stage)
1027 {
1028    struct anv_push_constants *data =
1029       cmd_buffer->state.push_constants[stage];
1030    const struct brw_stage_prog_data *prog_data =
1031       cmd_buffer->state.pipeline->prog_data[stage];
1032
1033    /* If we don't actually have any push constants, bail. */
1034    if (data == NULL || prog_data->nr_params == 0)
1035       return (struct anv_state) { .offset = 0 };
1036
1037    struct anv_state state =
1038       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1039                                          prog_data->nr_params * sizeof(float),
1040                                          32 /* bottom 5 bits MBZ */);
1041
1042    /* Walk through the param array and fill the buffer with data */
1043    uint32_t *u32_map = state.map;
1044    for (unsigned i = 0; i < prog_data->nr_params; i++) {
1045       uint32_t offset = (uintptr_t)prog_data->param[i];
1046       u32_map[i] = *(uint32_t *)((uint8_t *)data + offset);
1047    }
1048
1049    if (!cmd_buffer->device->info.has_llc)
1050       anv_state_clflush(state);
1051
1052    return state;
1053 }
1054
1055 struct anv_state
1056 anv_cmd_buffer_cs_push_constants(struct anv_cmd_buffer *cmd_buffer)
1057 {
1058    struct anv_push_constants *data =
1059       cmd_buffer->state.push_constants[MESA_SHADER_COMPUTE];
1060    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1061    const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1062    const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1063
1064    const unsigned local_id_dwords = cs_prog_data->local_invocation_id_regs * 8;
1065    const unsigned push_constant_data_size =
1066       (local_id_dwords + prog_data->nr_params) * 4;
1067    const unsigned reg_aligned_constant_size = ALIGN(push_constant_data_size, 32);
1068    const unsigned param_aligned_count =
1069       reg_aligned_constant_size / sizeof(uint32_t);
1070
1071    /* If we don't actually have any push constants, bail. */
1072    if (reg_aligned_constant_size == 0)
1073       return (struct anv_state) { .offset = 0 };
1074
1075    const unsigned threads = pipeline->cs_thread_width_max;
1076    const unsigned total_push_constants_size =
1077       reg_aligned_constant_size * threads;
1078    const unsigned push_constant_alignment =
1079       cmd_buffer->device->info.gen < 8 ? 32 : 64;
1080    const unsigned aligned_total_push_constants_size =
1081       ALIGN(total_push_constants_size, push_constant_alignment);
1082    struct anv_state state =
1083       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
1084                                          aligned_total_push_constants_size,
1085                                          push_constant_alignment);
1086
1087    /* Walk through the param array and fill the buffer with data */
1088    uint32_t *u32_map = state.map;
1089
1090    brw_cs_fill_local_id_payload(cs_prog_data, u32_map, threads,
1091                                 reg_aligned_constant_size);
1092
1093    /* Setup uniform data for the first thread */
1094    for (unsigned i = 0; i < prog_data->nr_params; i++) {
1095       uint32_t offset = (uintptr_t)prog_data->param[i];
1096       u32_map[local_id_dwords + i] = *(uint32_t *)((uint8_t *)data + offset);
1097    }
1098
1099    /* Copy uniform data from the first thread to every other thread */
1100    const size_t uniform_data_size = prog_data->nr_params * sizeof(uint32_t);
1101    for (unsigned t = 1; t < threads; t++) {
1102       memcpy(&u32_map[t * param_aligned_count + local_id_dwords],
1103              &u32_map[local_id_dwords],
1104              uniform_data_size);
1105    }
1106
1107    if (!cmd_buffer->device->info.has_llc)
1108       anv_state_clflush(state);
1109
1110    return state;
1111 }
1112
1113 void anv_CmdPushConstants(
1114     VkCommandBuffer                             commandBuffer,
1115     VkPipelineLayout                            layout,
1116     VkShaderStageFlags                          stageFlags,
1117     uint32_t                                    offset,
1118     uint32_t                                    size,
1119     const void*                                 pValues)
1120 {
1121    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1122
1123    anv_foreach_stage(stage, stageFlags) {
1124       anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, client_data);
1125
1126       memcpy(cmd_buffer->state.push_constants[stage]->client_data + offset,
1127              pValues, size);
1128    }
1129
1130    cmd_buffer->state.push_constants_dirty |= stageFlags;
1131 }
1132
1133 void anv_CmdExecuteCommands(
1134     VkCommandBuffer                             commandBuffer,
1135     uint32_t                                    commandBufferCount,
1136     const VkCommandBuffer*                      pCmdBuffers)
1137 {
1138    ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
1139
1140    assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1141
1142    for (uint32_t i = 0; i < commandBufferCount; i++) {
1143       ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
1144
1145       assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1146
1147       anv_cmd_buffer_add_secondary(primary, secondary);
1148    }
1149 }
1150
1151 VkResult anv_CreateCommandPool(
1152     VkDevice                                    _device,
1153     const VkCommandPoolCreateInfo*              pCreateInfo,
1154     const VkAllocationCallbacks*                pAllocator,
1155     VkCommandPool*                              pCmdPool)
1156 {
1157    ANV_FROM_HANDLE(anv_device, device, _device);
1158    struct anv_cmd_pool *pool;
1159
1160    pool = anv_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
1161                      VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1162    if (pool == NULL)
1163       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1164
1165    if (pAllocator)
1166       pool->alloc = *pAllocator;
1167    else
1168       pool->alloc = device->alloc;
1169
1170    list_inithead(&pool->cmd_buffers);
1171
1172    *pCmdPool = anv_cmd_pool_to_handle(pool);
1173
1174    return VK_SUCCESS;
1175 }
1176
1177 void anv_DestroyCommandPool(
1178     VkDevice                                    _device,
1179     VkCommandPool                               commandPool,
1180     const VkAllocationCallbacks*                pAllocator)
1181 {
1182    ANV_FROM_HANDLE(anv_device, device, _device);
1183    ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1184
1185    anv_ResetCommandPool(_device, commandPool, 0);
1186
1187    anv_free2(&device->alloc, pAllocator, pool);
1188 }
1189
1190 VkResult anv_ResetCommandPool(
1191     VkDevice                                    device,
1192     VkCommandPool                               commandPool,
1193     VkCommandPoolResetFlags                     flags)
1194 {
1195    ANV_FROM_HANDLE(anv_cmd_pool, pool, commandPool);
1196
1197    /* FIXME: vkResetCommandPool must not destroy its command buffers. The
1198     * Vulkan 1.0 spec requires that it only reset them:
1199     *
1200     *    Resetting a command pool recycles all of the resources from all of
1201     *    the command buffers allocated from the command pool back to the
1202     *    command pool. All command buffers that have been allocated from the
1203     *    command pool are put in the initial state.
1204     */
1205    list_for_each_entry_safe(struct anv_cmd_buffer, cmd_buffer,
1206                             &pool->cmd_buffers, pool_link) {
1207       anv_cmd_buffer_destroy(cmd_buffer);
1208    }
1209
1210    return VK_SUCCESS;
1211 }
1212
1213 /**
1214  * Return NULL if the current subpass has no depthstencil attachment.
1215  */
1216 const struct anv_image_view *
1217 anv_cmd_buffer_get_depth_stencil_view(const struct anv_cmd_buffer *cmd_buffer)
1218 {
1219    const struct anv_subpass *subpass = cmd_buffer->state.subpass;
1220    const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1221
1222    if (subpass->depth_stencil_attachment == VK_ATTACHMENT_UNUSED)
1223       return NULL;
1224
1225    const struct anv_image_view *iview =
1226       fb->attachments[subpass->depth_stencil_attachment];
1227
1228    assert(iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT |
1229                                 VK_IMAGE_ASPECT_STENCIL_BIT));
1230
1231    return iview;
1232 }