OSDN Git Service

anv: Port L3 cache programming from i965
[android-x86/external-mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "anv_private.h"
28
29 #include "genxml/gen_macros.h"
30 #include "genxml/genX_pack.h"
31
32 void
33 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
34 {
35    struct anv_device *device = cmd_buffer->device;
36    struct anv_bo *scratch_bo = NULL;
37
38    cmd_buffer->state.scratch_size =
39       anv_block_pool_size(&device->scratch_block_pool);
40    if (cmd_buffer->state.scratch_size > 0)
41       scratch_bo = &device->scratch_block_pool.bo;
42
43 /* XXX: Do we need this on more than just BDW? */
44 #if (GEN_GEN >= 8)
45    /* Emit a render target cache flush.
46     *
47     * This isn't documented anywhere in the PRM.  However, it seems to be
48     * necessary prior to changing the surface state base adress.  Without
49     * this, we get GPU hangs when using multi-level command buffers which
50     * clear depth, reset state base address, and then go render stuff.
51     */
52    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
53       pc.RenderTargetCacheFlushEnable = true;
54    }
55 #endif
56
57    anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
58       sba.GeneralStateBaseAddress = (struct anv_address) { scratch_bo, 0 };
59       sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
60       sba.GeneralStateBaseAddressModifyEnable = true;
61
62       sba.SurfaceStateBaseAddress =
63          anv_cmd_buffer_surface_base_address(cmd_buffer);
64       sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
65       sba.SurfaceStateBaseAddressModifyEnable = true;
66
67       sba.DynamicStateBaseAddress =
68          (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
69       sba.DynamicStateMemoryObjectControlState = GENX(MOCS),
70       sba.DynamicStateBaseAddressModifyEnable = true,
71
72       sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
73       sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
74       sba.IndirectObjectBaseAddressModifyEnable = true;
75
76       sba.InstructionBaseAddress =
77          (struct anv_address) { &device->instruction_block_pool.bo, 0 };
78       sba.InstructionMemoryObjectControlState = GENX(MOCS);
79       sba.InstructionBaseAddressModifyEnable = true;
80
81 #  if (GEN_GEN >= 8)
82       /* Broadwell requires that we specify a buffer size for a bunch of
83        * these fields.  However, since we will be growing the BO's live, we
84        * just set them all to the maximum.
85        */
86       sba.GeneralStateBufferSize                = 0xfffff;
87       sba.GeneralStateBufferSizeModifyEnable    = true;
88       sba.DynamicStateBufferSize                = 0xfffff;
89       sba.DynamicStateBufferSizeModifyEnable    = true;
90       sba.IndirectObjectBufferSize              = 0xfffff;
91       sba.IndirectObjectBufferSizeModifyEnable  = true;
92       sba.InstructionBufferSize                 = 0xfffff;
93       sba.InstructionBuffersizeModifyEnable     = true;
94 #  endif
95    }
96
97    /* After re-setting the surface state base address, we have to do some
98     * cache flusing so that the sampler engine will pick up the new
99     * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
100     * Shared Function > 3D Sampler > State > State Caching (page 96):
101     *
102     *    Coherency with system memory in the state cache, like the texture
103     *    cache is handled partially by software. It is expected that the
104     *    command stream or shader will issue Cache Flush operation or
105     *    Cache_Flush sampler message to ensure that the L1 cache remains
106     *    coherent with system memory.
107     *
108     *    [...]
109     *
110     *    Whenever the value of the Dynamic_State_Base_Addr,
111     *    Surface_State_Base_Addr are altered, the L1 state cache must be
112     *    invalidated to ensure the new surface or sampler state is fetched
113     *    from system memory.
114     *
115     * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
116     * which, according the PIPE_CONTROL instruction documentation in the
117     * Broadwell PRM:
118     *
119     *    Setting this bit is independent of any other bit in this packet.
120     *    This bit controls the invalidation of the L1 and L2 state caches
121     *    at the top of the pipe i.e. at the parsing time.
122     *
123     * Unfortunately, experimentation seems to indicate that state cache
124     * invalidation through a PIPE_CONTROL does nothing whatsoever in
125     * regards to surface state and binding tables.  In stead, it seems that
126     * invalidating the texture cache is what is actually needed.
127     *
128     * XXX:  As far as we have been able to determine through
129     * experimentation, shows that flush the texture cache appears to be
130     * sufficient.  The theory here is that all of the sampling/rendering
131     * units cache the binding table in the texture cache.  However, we have
132     * yet to be able to actually confirm this.
133     */
134    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
135       pc.TextureCacheInvalidationEnable = true;
136    }
137 }
138
139 void genX(CmdPipelineBarrier)(
140     VkCommandBuffer                             commandBuffer,
141     VkPipelineStageFlags                        srcStageMask,
142     VkPipelineStageFlags                        destStageMask,
143     VkBool32                                    byRegion,
144     uint32_t                                    memoryBarrierCount,
145     const VkMemoryBarrier*                      pMemoryBarriers,
146     uint32_t                                    bufferMemoryBarrierCount,
147     const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
148     uint32_t                                    imageMemoryBarrierCount,
149     const VkImageMemoryBarrier*                 pImageMemoryBarriers)
150 {
151    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
152    uint32_t b, *dw;
153
154    /* XXX: Right now, we're really dumb and just flush whatever categories
155     * the app asks for.  One of these days we may make this a bit better
156     * but right now that's all the hardware allows for in most areas.
157     */
158    VkAccessFlags src_flags = 0;
159    VkAccessFlags dst_flags = 0;
160
161    for (uint32_t i = 0; i < memoryBarrierCount; i++) {
162       src_flags |= pMemoryBarriers[i].srcAccessMask;
163       dst_flags |= pMemoryBarriers[i].dstAccessMask;
164    }
165
166    for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
167       src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
168       dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
169    }
170
171    for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
172       src_flags |= pImageMemoryBarriers[i].srcAccessMask;
173       dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
174    }
175
176    /* Mask out the Source access flags we care about */
177    const uint32_t src_mask =
178       VK_ACCESS_SHADER_WRITE_BIT |
179       VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
180       VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
181       VK_ACCESS_TRANSFER_WRITE_BIT;
182
183    src_flags = src_flags & src_mask;
184
185    /* Mask out the destination access flags we care about */
186    const uint32_t dst_mask =
187       VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
188       VK_ACCESS_INDEX_READ_BIT |
189       VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
190       VK_ACCESS_UNIFORM_READ_BIT |
191       VK_ACCESS_SHADER_READ_BIT |
192       VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
193       VK_ACCESS_TRANSFER_READ_BIT;
194
195    dst_flags = dst_flags & dst_mask;
196
197    /* The src flags represent how things were used previously.  This is
198     * what we use for doing flushes.
199     */
200    struct GENX(PIPE_CONTROL) flush_cmd = {
201       GENX(PIPE_CONTROL_header),
202       .PostSyncOperation = NoWrite,
203    };
204
205    for_each_bit(b, src_flags) {
206       switch ((VkAccessFlagBits)(1 << b)) {
207       case VK_ACCESS_SHADER_WRITE_BIT:
208          flush_cmd.DCFlushEnable = true;
209          break;
210       case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
211          flush_cmd.RenderTargetCacheFlushEnable = true;
212          break;
213       case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
214          flush_cmd.DepthCacheFlushEnable = true;
215          break;
216       case VK_ACCESS_TRANSFER_WRITE_BIT:
217          flush_cmd.RenderTargetCacheFlushEnable = true;
218          flush_cmd.DepthCacheFlushEnable = true;
219          break;
220       default:
221          unreachable("should've masked this out by now");
222       }
223    }
224
225    /* If we end up doing two PIPE_CONTROLs, the first, flusing one also has to
226     * stall and wait for the flushing to finish, so we don't re-dirty the
227     * caches with in-flight rendering after the second PIPE_CONTROL
228     * invalidates.
229     */
230
231    if (dst_flags)
232       flush_cmd.CommandStreamerStallEnable = true;
233
234    if (src_flags && dst_flags) {
235       dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
236       GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &flush_cmd);
237    }
238
239    /* The dst flags represent how things will be used in the future.  This
240     * is what we use for doing cache invalidations.
241     */
242    struct GENX(PIPE_CONTROL) invalidate_cmd = {
243       GENX(PIPE_CONTROL_header),
244       .PostSyncOperation = NoWrite,
245    };
246
247    for_each_bit(b, dst_flags) {
248       switch ((VkAccessFlagBits)(1 << b)) {
249       case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
250       case VK_ACCESS_INDEX_READ_BIT:
251       case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
252          invalidate_cmd.VFCacheInvalidationEnable = true;
253          break;
254       case VK_ACCESS_UNIFORM_READ_BIT:
255          invalidate_cmd.ConstantCacheInvalidationEnable = true;
256          /* fallthrough */
257       case VK_ACCESS_SHADER_READ_BIT:
258          invalidate_cmd.TextureCacheInvalidationEnable = true;
259          break;
260       case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
261          invalidate_cmd.TextureCacheInvalidationEnable = true;
262          break;
263       case VK_ACCESS_TRANSFER_READ_BIT:
264          invalidate_cmd.TextureCacheInvalidationEnable = true;
265          break;
266       default:
267          unreachable("should've masked this out by now");
268       }
269    }
270
271    if (dst_flags) {
272       dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
273       GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &invalidate_cmd);
274    }
275 }
276
277 static uint32_t
278 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
279 {
280    static const uint32_t push_constant_opcodes[] = {
281       [MESA_SHADER_VERTEX]                      = 21,
282       [MESA_SHADER_TESS_CTRL]                   = 25, /* HS */
283       [MESA_SHADER_TESS_EVAL]                   = 26, /* DS */
284       [MESA_SHADER_GEOMETRY]                    = 22,
285       [MESA_SHADER_FRAGMENT]                    = 23,
286       [MESA_SHADER_COMPUTE]                     = 0,
287    };
288
289    VkShaderStageFlags flushed = 0;
290
291    anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
292       if (stage == MESA_SHADER_COMPUTE)
293          continue;
294
295       struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
296
297       if (state.offset == 0) {
298          anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
299             c._3DCommandSubOpcode = push_constant_opcodes[stage];
300       } else {
301          anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
302             c._3DCommandSubOpcode = push_constant_opcodes[stage],
303             c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
304 #if GEN_GEN >= 9
305                .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
306                .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
307 #else
308                .PointerToConstantBuffer0 = { .offset = state.offset },
309                .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
310 #endif
311             };
312          }
313       }
314
315       flushed |= mesa_to_vk_shader_stage(stage);
316    }
317
318    cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
319
320    return flushed;
321 }
322
323 void
324 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
325 {
326    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
327    uint32_t *p;
328
329    uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
330
331    assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
332
333 #if GEN_GEN >= 8
334    /* FIXME (jason): Currently, the config_l3 function causes problems on
335     * Haswell and prior if you have a kernel older than 4.4.  In order to
336     * work, it requires a couple of registers be white-listed in the
337     * command parser and they weren't added until 4.4.  What we should do
338     * is check the command parser version and make it a no-op if your
339     * command parser is either off or too old.  Compute won't work 100%,
340     * but at least 3-D will.  In the mean time, I'm going to make this
341     * gen8+ only so that we can get Haswell working again.
342     */
343    genX(cmd_buffer_config_l3)(cmd_buffer, pipeline);
344 #endif
345
346    genX(flush_pipeline_select_3d)(cmd_buffer);
347
348    if (vb_emit) {
349       const uint32_t num_buffers = __builtin_popcount(vb_emit);
350       const uint32_t num_dwords = 1 + num_buffers * 4;
351
352       p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
353                           GENX(3DSTATE_VERTEX_BUFFERS));
354       uint32_t vb, i = 0;
355       for_each_bit(vb, vb_emit) {
356          struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
357          uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
358
359          struct GENX(VERTEX_BUFFER_STATE) state = {
360             .VertexBufferIndex = vb,
361
362 #if GEN_GEN >= 8
363             .MemoryObjectControlState = GENX(MOCS),
364 #else
365             .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
366             .InstanceDataStepRate = 1,
367             .VertexBufferMemoryObjectControlState = GENX(MOCS),
368 #endif
369
370             .AddressModifyEnable = true,
371             .BufferPitch = pipeline->binding_stride[vb],
372             .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
373
374 #if GEN_GEN >= 8
375             .BufferSize = buffer->size - offset
376 #else
377             .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
378 #endif
379          };
380
381          GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
382          i++;
383       }
384    }
385
386    cmd_buffer->state.vb_dirty &= ~vb_emit;
387
388    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
389       /* If somebody compiled a pipeline after starting a command buffer the
390        * scratch bo may have grown since we started this cmd buffer (and
391        * emitted STATE_BASE_ADDRESS).  If we're binding that pipeline now,
392        * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
393       if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
394          anv_cmd_buffer_emit_state_base_address(cmd_buffer);
395
396       anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
397
398       /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
399        *
400        *    "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
401        *    the next 3DPRIMITIVE command after programming the
402        *    3DSTATE_PUSH_CONSTANT_ALLOC_VS"
403        *
404        * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
405        * pipeline setup, we need to dirty push constants.
406        */
407       cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
408    }
409
410 #if GEN_GEN <= 7
411    if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
412        cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
413       /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
414        *
415        *    "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
416        *    stall needs to be sent just prior to any 3DSTATE_VS,
417        *    3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
418        *    3DSTATE_BINDING_TABLE_POINTER_VS,
419        *    3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one
420        *    PIPE_CONTROL needs to be sent before any combination of VS
421        *    associated 3DSTATE."
422        */
423       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
424          pc.DepthStallEnable  = true;
425          pc.PostSyncOperation = WriteImmediateData;
426          pc.Address           =
427             (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
428       }
429    }
430 #endif
431
432    /* We emit the binding tables and sampler tables first, then emit push
433     * constants and then finally emit binding table and sampler table
434     * pointers.  It has to happen in this order, since emitting the binding
435     * tables may change the push constants (in case of storage images). After
436     * emitting push constants, on SKL+ we have to emit the corresponding
437     * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
438     */
439    uint32_t dirty = 0;
440    if (cmd_buffer->state.descriptors_dirty)
441       dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
442
443    if (cmd_buffer->state.push_constants_dirty) {
444 #if GEN_GEN >= 9
445       /* On Sky Lake and later, the binding table pointers commands are
446        * what actually flush the changes to push constant state so we need
447        * to dirty them so they get re-emitted below.
448        */
449       dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
450 #else
451       cmd_buffer_flush_push_constants(cmd_buffer);
452 #endif
453    }
454
455    if (dirty)
456       gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
457
458    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
459       gen8_cmd_buffer_emit_viewport(cmd_buffer);
460
461    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
462       gen7_cmd_buffer_emit_scissor(cmd_buffer);
463
464    genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
465 }
466
467 static void
468 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
469                              struct anv_bo *bo, uint32_t offset)
470 {
471    uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
472                                  GENX(3DSTATE_VERTEX_BUFFERS));
473
474    GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
475       &(struct GENX(VERTEX_BUFFER_STATE)) {
476          .VertexBufferIndex = 32, /* Reserved for this */
477          .AddressModifyEnable = true,
478          .BufferPitch = 0,
479 #if (GEN_GEN >= 8)
480          .MemoryObjectControlState = GENX(MOCS),
481          .BufferStartingAddress = { bo, offset },
482          .BufferSize = 8
483 #else
484          .VertexBufferMemoryObjectControlState = GENX(MOCS),
485          .BufferStartingAddress = { bo, offset },
486          .EndAddress = { bo, offset + 8 },
487 #endif
488       });
489 }
490
491 static void
492 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
493                           uint32_t base_vertex, uint32_t base_instance)
494 {
495    struct anv_state id_state =
496       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
497
498    ((uint32_t *)id_state.map)[0] = base_vertex;
499    ((uint32_t *)id_state.map)[1] = base_instance;
500
501    if (!cmd_buffer->device->info.has_llc)
502       anv_state_clflush(id_state);
503
504    emit_base_vertex_instance_bo(cmd_buffer,
505       &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
506 }
507
508 void genX(CmdDraw)(
509     VkCommandBuffer                             commandBuffer,
510     uint32_t                                    vertexCount,
511     uint32_t                                    instanceCount,
512     uint32_t                                    firstVertex,
513     uint32_t                                    firstInstance)
514 {
515    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
516    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
517    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
518
519    genX(cmd_buffer_flush_state)(cmd_buffer);
520
521    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
522       emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
523
524    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
525       prim.VertexAccessType         = SEQUENTIAL;
526       prim.PrimitiveTopologyType    = pipeline->topology;
527       prim.VertexCountPerInstance   = vertexCount;
528       prim.StartVertexLocation      = firstVertex;
529       prim.InstanceCount            = instanceCount;
530       prim.StartInstanceLocation    = firstInstance;
531       prim.BaseVertexLocation       = 0;
532    }
533 }
534
535 void genX(CmdDrawIndexed)(
536     VkCommandBuffer                             commandBuffer,
537     uint32_t                                    indexCount,
538     uint32_t                                    instanceCount,
539     uint32_t                                    firstIndex,
540     int32_t                                     vertexOffset,
541     uint32_t                                    firstInstance)
542 {
543    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
544    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
545    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
546
547    genX(cmd_buffer_flush_state)(cmd_buffer);
548
549    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
550       emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
551
552    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
553       prim.VertexAccessType         = RANDOM;
554       prim.PrimitiveTopologyType    = pipeline->topology;
555       prim.VertexCountPerInstance   = indexCount;
556       prim.StartVertexLocation      = firstIndex;
557       prim.InstanceCount            = instanceCount;
558       prim.StartInstanceLocation    = firstInstance;
559       prim.BaseVertexLocation       = vertexOffset;
560    }
561 }
562
563 /* Auto-Draw / Indirect Registers */
564 #define GEN7_3DPRIM_END_OFFSET          0x2420
565 #define GEN7_3DPRIM_START_VERTEX        0x2430
566 #define GEN7_3DPRIM_VERTEX_COUNT        0x2434
567 #define GEN7_3DPRIM_INSTANCE_COUNT      0x2438
568 #define GEN7_3DPRIM_START_INSTANCE      0x243C
569 #define GEN7_3DPRIM_BASE_VERTEX         0x2440
570
571 static void
572 emit_lrm(struct anv_batch *batch,
573          uint32_t reg, struct anv_bo *bo, uint32_t offset)
574 {
575    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
576       lrm.RegisterAddress  = reg;
577       lrm.MemoryAddress    = (struct anv_address) { bo, offset };
578    }
579 }
580
581 static void
582 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
583 {
584    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
585       lri.RegisterOffset   = reg;
586       lri.DataDWord        = imm;
587    }
588 }
589
590 void genX(CmdDrawIndirect)(
591     VkCommandBuffer                             commandBuffer,
592     VkBuffer                                    _buffer,
593     VkDeviceSize                                offset,
594     uint32_t                                    drawCount,
595     uint32_t                                    stride)
596 {
597    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
598    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
599    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
600    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
601    struct anv_bo *bo = buffer->bo;
602    uint32_t bo_offset = buffer->offset + offset;
603
604    genX(cmd_buffer_flush_state)(cmd_buffer);
605
606    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
607       emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
608
609    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
610    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
611    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
612    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
613    emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
614
615    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
616       prim.IndirectParameterEnable  = true;
617       prim.VertexAccessType         = SEQUENTIAL;
618       prim.PrimitiveTopologyType    = pipeline->topology;
619    }
620 }
621
622 void genX(CmdDrawIndexedIndirect)(
623     VkCommandBuffer                             commandBuffer,
624     VkBuffer                                    _buffer,
625     VkDeviceSize                                offset,
626     uint32_t                                    drawCount,
627     uint32_t                                    stride)
628 {
629    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
630    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
631    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
632    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
633    struct anv_bo *bo = buffer->bo;
634    uint32_t bo_offset = buffer->offset + offset;
635
636    genX(cmd_buffer_flush_state)(cmd_buffer);
637
638    /* TODO: We need to stomp base vertex to 0 somehow */
639    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
640       emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
641
642    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
643    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
644    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
645    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
646    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
647
648    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
649       prim.IndirectParameterEnable  = true;
650       prim.VertexAccessType         = RANDOM;
651       prim.PrimitiveTopologyType    = pipeline->topology;
652    }
653 }
654
655 #if GEN_GEN == 7
656
657 static bool
658 verify_cmd_parser(const struct anv_device *device,
659                   int required_version,
660                   const char *function)
661 {
662    if (device->instance->physicalDevice.cmd_parser_version < required_version) {
663       vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
664                 "cmd parser version %d is required for %s",
665                 required_version, function);
666       return false;
667    } else {
668       return true;
669    }
670 }
671
672 #endif
673
674 void genX(CmdDispatch)(
675     VkCommandBuffer                             commandBuffer,
676     uint32_t                                    x,
677     uint32_t                                    y,
678     uint32_t                                    z)
679 {
680    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
681    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
682    const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
683
684    if (prog_data->uses_num_work_groups) {
685       struct anv_state state =
686          anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
687       uint32_t *sizes = state.map;
688       sizes[0] = x;
689       sizes[1] = y;
690       sizes[2] = z;
691       if (!cmd_buffer->device->info.has_llc)
692          anv_state_clflush(state);
693       cmd_buffer->state.num_workgroups_offset = state.offset;
694       cmd_buffer->state.num_workgroups_bo =
695          &cmd_buffer->device->dynamic_state_block_pool.bo;
696    }
697
698    genX(cmd_buffer_flush_compute_state)(cmd_buffer);
699
700    anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
701       ggw.SIMDSize                     = prog_data->simd_size / 16;
702       ggw.ThreadDepthCounterMaximum    = 0;
703       ggw.ThreadHeightCounterMaximum   = 0;
704       ggw.ThreadWidthCounterMaximum    = pipeline->cs_thread_width_max - 1;
705       ggw.ThreadGroupIDXDimension      = x;
706       ggw.ThreadGroupIDYDimension      = y;
707       ggw.ThreadGroupIDZDimension      = z;
708       ggw.RightExecutionMask           = pipeline->cs_right_mask;
709       ggw.BottomExecutionMask          = 0xffffffff;
710    }
711
712    anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
713 }
714
715 #define GPGPU_DISPATCHDIMX 0x2500
716 #define GPGPU_DISPATCHDIMY 0x2504
717 #define GPGPU_DISPATCHDIMZ 0x2508
718
719 #define MI_PREDICATE_SRC0  0x2400
720 #define MI_PREDICATE_SRC1  0x2408
721
722 void genX(CmdDispatchIndirect)(
723     VkCommandBuffer                             commandBuffer,
724     VkBuffer                                    _buffer,
725     VkDeviceSize                                offset)
726 {
727    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
728    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
729    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
730    const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
731    struct anv_bo *bo = buffer->bo;
732    uint32_t bo_offset = buffer->offset + offset;
733    struct anv_batch *batch = &cmd_buffer->batch;
734
735 #if GEN_GEN == 7
736    /* Linux 4.4 added command parser version 5 which allows the GPGPU
737     * indirect dispatch registers to be written.
738     */
739    if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
740       return;
741 #endif
742
743    if (prog_data->uses_num_work_groups) {
744       cmd_buffer->state.num_workgroups_offset = bo_offset;
745       cmd_buffer->state.num_workgroups_bo = bo;
746    }
747
748    genX(cmd_buffer_flush_compute_state)(cmd_buffer);
749
750    emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
751    emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
752    emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
753
754 #if GEN_GEN <= 7
755    /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
756    emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
757    emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
758    emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
759
760    /* Load compute_dispatch_indirect_x_size into SRC0 */
761    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
762
763    /* predicate = (compute_dispatch_indirect_x_size == 0); */
764    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
765       mip.LoadOperation    = LOAD_LOAD;
766       mip.CombineOperation = COMBINE_SET;
767       mip.CompareOperation = COMPARE_SRCS_EQUAL;
768    }
769
770    /* Load compute_dispatch_indirect_y_size into SRC0 */
771    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
772
773    /* predicate |= (compute_dispatch_indirect_y_size == 0); */
774    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
775       mip.LoadOperation    = LOAD_LOAD;
776       mip.CombineOperation = COMBINE_OR;
777       mip.CompareOperation = COMPARE_SRCS_EQUAL;
778    }
779
780    /* Load compute_dispatch_indirect_z_size into SRC0 */
781    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
782
783    /* predicate |= (compute_dispatch_indirect_z_size == 0); */
784    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
785       mip.LoadOperation    = LOAD_LOAD;
786       mip.CombineOperation = COMBINE_OR;
787       mip.CompareOperation = COMPARE_SRCS_EQUAL;
788    }
789
790    /* predicate = !predicate; */
791 #define COMPARE_FALSE                           1
792    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
793       mip.LoadOperation    = LOAD_LOADINV;
794       mip.CombineOperation = COMBINE_OR;
795       mip.CompareOperation = COMPARE_FALSE;
796    }
797 #endif
798
799    anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
800       ggw.IndirectParameterEnable      = true;
801       ggw.PredicateEnable              = GEN_GEN <= 7;
802       ggw.SIMDSize                     = prog_data->simd_size / 16;
803       ggw.ThreadDepthCounterMaximum    = 0;
804       ggw.ThreadHeightCounterMaximum   = 0;
805       ggw.ThreadWidthCounterMaximum    = pipeline->cs_thread_width_max - 1;
806       ggw.RightExecutionMask           = pipeline->cs_right_mask;
807       ggw.BottomExecutionMask          = 0xffffffff;
808    }
809
810    anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
811 }
812
813 static void
814 flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
815                                       uint32_t pipeline)
816 {
817 #if GEN_GEN >= 8 && GEN_GEN < 10
818    /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
819     *
820     *   Software must clear the COLOR_CALC_STATE Valid field in
821     *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
822     *   with Pipeline Select set to GPGPU.
823     *
824     * The internal hardware docs recommend the same workaround for Gen9
825     * hardware too.
826     */
827    if (pipeline == GPGPU)
828       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
829 #elif GEN_GEN <= 7
830       /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
831        * PIPELINE_SELECT [DevBWR+]":
832        *
833        *   Project: DEVSNB+
834        *
835        *   Software must ensure all the write caches are flushed through a
836        *   stalling PIPE_CONTROL command followed by another PIPE_CONTROL
837        *   command to invalidate read only caches prior to programming
838        *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
839        */
840       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
841          pc.RenderTargetCacheFlushEnable  = true;
842          pc.DepthCacheFlushEnable         = true;
843          pc.DCFlushEnable                 = true;
844          pc.PostSyncOperation             = NoWrite;
845          pc.CommandStreamerStallEnable    = true;
846       }
847
848       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
849          pc.TextureCacheInvalidationEnable   = true;
850          pc.ConstantCacheInvalidationEnable  = true;
851          pc.StateCacheInvalidationEnable     = true;
852          pc.InstructionCacheInvalidateEnable = true;
853          pc.PostSyncOperation                = NoWrite;
854       }
855 #endif
856 }
857
858 void
859 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
860 {
861    if (cmd_buffer->state.current_pipeline != _3D) {
862       flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
863
864       anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
865 #if GEN_GEN >= 9
866          ps.MaskBits = 3;
867 #endif
868          ps.PipelineSelection = _3D;
869       }
870
871       cmd_buffer->state.current_pipeline = _3D;
872    }
873 }
874
875 void
876 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
877 {
878    if (cmd_buffer->state.current_pipeline != GPGPU) {
879       flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
880
881       anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
882 #if GEN_GEN >= 9
883          ps.MaskBits = 3;
884 #endif
885          ps.PipelineSelection = GPGPU;
886       }
887
888       cmd_buffer->state.current_pipeline = GPGPU;
889    }
890 }
891
892 struct anv_state
893 genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer,
894                                           struct anv_framebuffer *fb)
895 {
896    struct anv_state state =
897       anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
898
899    struct GENX(RENDER_SURFACE_STATE) null_ss = {
900       .SurfaceType = SURFTYPE_NULL,
901       .SurfaceArray = fb->layers > 0,
902       .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
903 #if GEN_GEN >= 8
904       .TileMode = YMAJOR,
905 #else
906       .TiledSurface = true,
907 #endif
908       .Width = fb->width - 1,
909       .Height = fb->height - 1,
910       .Depth = fb->layers - 1,
911       .RenderTargetViewExtent = fb->layers - 1,
912    };
913
914    GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
915
916    if (!cmd_buffer->device->info.has_llc)
917       anv_state_clflush(state);
918
919    return state;
920 }
921
922 static void
923 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
924 {
925    struct anv_device *device = cmd_buffer->device;
926    const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
927    const struct anv_image_view *iview =
928       anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
929    const struct anv_image *image = iview ? iview->image : NULL;
930    const bool has_depth = image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
931    const bool has_stencil =
932       image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
933
934    /* FIXME: Implement the PMA stall W/A */
935    /* FIXME: Width and Height are wrong */
936
937    /* Emit 3DSTATE_DEPTH_BUFFER */
938    if (has_depth) {
939       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
940          db.SurfaceType                   = SURFTYPE_2D;
941          db.DepthWriteEnable              = true;
942          db.StencilWriteEnable            = has_stencil;
943          db.HierarchicalDepthBufferEnable = false;
944
945          db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
946                                                       &image->depth_surface.isl);
947
948          db.SurfaceBaseAddress = (struct anv_address) {
949             .bo = image->bo,
950             .offset = image->offset + image->depth_surface.offset,
951          };
952          db.DepthBufferObjectControlState = GENX(MOCS),
953
954          db.SurfacePitch         = image->depth_surface.isl.row_pitch - 1;
955          db.Height               = fb->height - 1;
956          db.Width                = fb->width - 1;
957          db.LOD                  = 0;
958          db.Depth                = 1 - 1;
959          db.MinimumArrayElement  = 0;
960
961 #if GEN_GEN >= 8
962          db.SurfaceQPitch =
963             isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
964 #endif
965          db.RenderTargetViewExtent = 1 - 1;
966       }
967    } else {
968       /* Even when no depth buffer is present, the hardware requires that
969        * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
970        *
971        *    If a null depth buffer is bound, the driver must instead bind depth as:
972        *       3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
973        *       3DSTATE_DEPTH.Width = 1
974        *       3DSTATE_DEPTH.Height = 1
975        *       3DSTATE_DEPTH.SuraceFormat = D16_UNORM
976        *       3DSTATE_DEPTH.SurfaceBaseAddress = 0
977        *       3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
978        *       3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
979        *       3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
980        *
981        * The PRM is wrong, though. The width and height must be programmed to
982        * actual framebuffer's width and height, even when neither depth buffer
983        * nor stencil buffer is present.  Also, D16_UNORM is not allowed to
984        * be combined with a stencil buffer so we use D32_FLOAT instead.
985        */
986       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
987          db.SurfaceType          = SURFTYPE_2D;
988          db.SurfaceFormat        = D32_FLOAT;
989          db.Width                = fb->width - 1;
990          db.Height               = fb->height - 1;
991          db.StencilWriteEnable   = has_stencil;
992       }
993    }
994
995    /* Emit 3DSTATE_STENCIL_BUFFER */
996    if (has_stencil) {
997       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
998 #if GEN_GEN >= 8 || GEN_IS_HASWELL
999          sb.StencilBufferEnable = true,
1000 #endif
1001          sb.StencilBufferObjectControlState = GENX(MOCS),
1002
1003          /* Stencil buffers have strange pitch. The PRM says:
1004           *
1005           *    The pitch must be set to 2x the value computed based on width,
1006           *    as the stencil buffer is stored with two rows interleaved.
1007           */
1008          sb.SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
1009
1010 #if GEN_GEN >= 8
1011          sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
1012 #endif
1013          sb.SurfaceBaseAddress = (struct anv_address) {
1014             .bo = image->bo,
1015             .offset = image->offset + image->stencil_surface.offset,
1016          };
1017       }
1018    } else {
1019       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
1020    }
1021
1022    /* Disable hierarchial depth buffers. */
1023    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hz);
1024
1025    /* Clear the clear params. */
1026    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp);
1027 }
1028
1029 /**
1030  * @see anv_cmd_buffer_set_subpass()
1031  */
1032 void
1033 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
1034                              struct anv_subpass *subpass)
1035 {
1036    cmd_buffer->state.subpass = subpass;
1037
1038    cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
1039
1040    cmd_buffer_emit_depth_stencil(cmd_buffer);
1041 }
1042
1043 void genX(CmdBeginRenderPass)(
1044     VkCommandBuffer                             commandBuffer,
1045     const VkRenderPassBeginInfo*                pRenderPassBegin,
1046     VkSubpassContents                           contents)
1047 {
1048    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1049    ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
1050    ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1051
1052    cmd_buffer->state.framebuffer = framebuffer;
1053    cmd_buffer->state.pass = pass;
1054    anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
1055
1056    genX(flush_pipeline_select_3d)(cmd_buffer);
1057
1058    const VkRect2D *render_area = &pRenderPassBegin->renderArea;
1059
1060    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE), r) {
1061       r.ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0);
1062       r.ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0);
1063       r.ClippedDrawingRectangleYMax =
1064          render_area->offset.y + render_area->extent.height - 1;
1065       r.ClippedDrawingRectangleXMax =
1066          render_area->offset.x + render_area->extent.width - 1;
1067       r.DrawingRectangleOriginY     = 0;
1068       r.DrawingRectangleOriginX     = 0;
1069    }
1070
1071    genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
1072    anv_cmd_buffer_clear_subpass(cmd_buffer);
1073 }
1074
1075 void genX(CmdNextSubpass)(
1076     VkCommandBuffer                             commandBuffer,
1077     VkSubpassContents                           contents)
1078 {
1079    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1080
1081    assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1082
1083    anv_cmd_buffer_resolve_subpass(cmd_buffer);
1084    genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
1085    anv_cmd_buffer_clear_subpass(cmd_buffer);
1086 }
1087
1088 void genX(CmdEndRenderPass)(
1089     VkCommandBuffer                             commandBuffer)
1090 {
1091    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1092
1093    anv_cmd_buffer_resolve_subpass(cmd_buffer);
1094 }
1095
1096 static void
1097 emit_ps_depth_count(struct anv_batch *batch,
1098                     struct anv_bo *bo, uint32_t offset)
1099 {
1100    anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
1101       pc.DestinationAddressType  = DAT_PPGTT;
1102       pc.PostSyncOperation       = WritePSDepthCount;
1103       pc.DepthStallEnable        = true;
1104       pc.Address                 = (struct anv_address) { bo, offset };
1105    }
1106 }
1107
1108 static void
1109 emit_query_availability(struct anv_batch *batch,
1110                         struct anv_bo *bo, uint32_t offset)
1111 {
1112    anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
1113       pc.DestinationAddressType  = DAT_PPGTT;
1114       pc.PostSyncOperation       = WriteImmediateData;
1115       pc.Address                 = (struct anv_address) { bo, offset };
1116       pc.ImmediateData           = 1;
1117    }
1118 }
1119
1120 void genX(CmdBeginQuery)(
1121     VkCommandBuffer                             commandBuffer,
1122     VkQueryPool                                 queryPool,
1123     uint32_t                                    query,
1124     VkQueryControlFlags                         flags)
1125 {
1126    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1127    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1128
1129    /* Workaround: When meta uses the pipeline with the VS disabled, it seems
1130     * that the pipelining of the depth write breaks. What we see is that
1131     * samples from the render pass clear leaks into the first query
1132     * immediately after the clear. Doing a pipecontrol with a post-sync
1133     * operation and DepthStallEnable seems to work around the issue.
1134     */
1135    if (cmd_buffer->state.need_query_wa) {
1136       cmd_buffer->state.need_query_wa = false;
1137       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1138          pc.DepthCacheFlushEnable   = true;
1139          pc.DepthStallEnable        = true;
1140       }
1141    }
1142
1143    switch (pool->type) {
1144    case VK_QUERY_TYPE_OCCLUSION:
1145       emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1146                           query * sizeof(struct anv_query_pool_slot));
1147       break;
1148
1149    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1150    default:
1151       unreachable("");
1152    }
1153 }
1154
1155 void genX(CmdEndQuery)(
1156     VkCommandBuffer                             commandBuffer,
1157     VkQueryPool                                 queryPool,
1158     uint32_t                                    query)
1159 {
1160    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1161    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1162
1163    switch (pool->type) {
1164    case VK_QUERY_TYPE_OCCLUSION:
1165       emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1166                           query * sizeof(struct anv_query_pool_slot) + 8);
1167
1168       emit_query_availability(&cmd_buffer->batch, &pool->bo,
1169                               query * sizeof(struct anv_query_pool_slot) + 16);
1170       break;
1171
1172    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1173    default:
1174       unreachable("");
1175    }
1176 }
1177
1178 #define TIMESTAMP 0x2358
1179
1180 void genX(CmdWriteTimestamp)(
1181     VkCommandBuffer                             commandBuffer,
1182     VkPipelineStageFlagBits                     pipelineStage,
1183     VkQueryPool                                 queryPool,
1184     uint32_t                                    query)
1185 {
1186    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1187    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1188    uint32_t offset = query * sizeof(struct anv_query_pool_slot);
1189
1190    assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
1191
1192    switch (pipelineStage) {
1193    case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1194       anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1195          srm.RegisterAddress  = TIMESTAMP;
1196          srm.MemoryAddress    = (struct anv_address) { &pool->bo, offset };
1197       }
1198       anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1199          srm.RegisterAddress  = TIMESTAMP + 4;
1200          srm.MemoryAddress    = (struct anv_address) { &pool->bo, offset + 4 };
1201       }
1202       break;
1203
1204    default:
1205       /* Everything else is bottom-of-pipe */
1206       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1207          pc.DestinationAddressType  = DAT_PPGTT,
1208          pc.PostSyncOperation       = WriteTimestamp,
1209          pc.Address = (struct anv_address) { &pool->bo, offset };
1210       }
1211       break;
1212    }
1213
1214    emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
1215 }
1216
1217 #if GEN_GEN > 7 || GEN_IS_HASWELL
1218
1219 #define alu_opcode(v)   __gen_uint((v),  20, 31)
1220 #define alu_operand1(v) __gen_uint((v),  10, 19)
1221 #define alu_operand2(v) __gen_uint((v),   0,  9)
1222 #define alu(opcode, operand1, operand2) \
1223    alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
1224
1225 #define OPCODE_NOOP      0x000
1226 #define OPCODE_LOAD      0x080
1227 #define OPCODE_LOADINV   0x480
1228 #define OPCODE_LOAD0     0x081
1229 #define OPCODE_LOAD1     0x481
1230 #define OPCODE_ADD       0x100
1231 #define OPCODE_SUB       0x101
1232 #define OPCODE_AND       0x102
1233 #define OPCODE_OR        0x103
1234 #define OPCODE_XOR       0x104
1235 #define OPCODE_STORE     0x180
1236 #define OPCODE_STOREINV  0x580
1237
1238 #define OPERAND_R0   0x00
1239 #define OPERAND_R1   0x01
1240 #define OPERAND_R2   0x02
1241 #define OPERAND_R3   0x03
1242 #define OPERAND_R4   0x04
1243 #define OPERAND_SRCA 0x20
1244 #define OPERAND_SRCB 0x21
1245 #define OPERAND_ACCU 0x31
1246 #define OPERAND_ZF   0x32
1247 #define OPERAND_CF   0x33
1248
1249 #define CS_GPR(n) (0x2600 + (n) * 8)
1250
1251 static void
1252 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
1253                       struct anv_bo *bo, uint32_t offset)
1254 {
1255    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
1256       lrm.RegisterAddress  = reg,
1257       lrm.MemoryAddress    = (struct anv_address) { bo, offset };
1258    }
1259    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
1260       lrm.RegisterAddress  = reg + 4;
1261       lrm.MemoryAddress    = (struct anv_address) { bo, offset + 4 };
1262    }
1263 }
1264
1265 static void
1266 store_query_result(struct anv_batch *batch, uint32_t reg,
1267                    struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
1268 {
1269    anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1270       srm.RegisterAddress  = reg;
1271       srm.MemoryAddress    = (struct anv_address) { bo, offset };
1272    }
1273
1274    if (flags & VK_QUERY_RESULT_64_BIT) {
1275       anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1276          srm.RegisterAddress  = reg + 4;
1277          srm.MemoryAddress    = (struct anv_address) { bo, offset + 4 };
1278       }
1279    }
1280 }
1281
1282 void genX(CmdCopyQueryPoolResults)(
1283     VkCommandBuffer                             commandBuffer,
1284     VkQueryPool                                 queryPool,
1285     uint32_t                                    firstQuery,
1286     uint32_t                                    queryCount,
1287     VkBuffer                                    destBuffer,
1288     VkDeviceSize                                destOffset,
1289     VkDeviceSize                                destStride,
1290     VkQueryResultFlags                          flags)
1291 {
1292    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1293    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1294    ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
1295    uint32_t slot_offset, dst_offset;
1296
1297    if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1298       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1299          pc.CommandStreamerStallEnable = true;
1300          pc.StallAtPixelScoreboard     = true;
1301       }
1302    }
1303
1304    dst_offset = buffer->offset + destOffset;
1305    for (uint32_t i = 0; i < queryCount; i++) {
1306
1307       slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
1308       switch (pool->type) {
1309       case VK_QUERY_TYPE_OCCLUSION:
1310          emit_load_alu_reg_u64(&cmd_buffer->batch,
1311                                CS_GPR(0), &pool->bo, slot_offset);
1312          emit_load_alu_reg_u64(&cmd_buffer->batch,
1313                                CS_GPR(1), &pool->bo, slot_offset + 8);
1314
1315          /* FIXME: We need to clamp the result for 32 bit. */
1316
1317          uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
1318          dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
1319          dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
1320          dw[3] = alu(OPCODE_SUB, 0, 0);
1321          dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
1322          break;
1323
1324       case VK_QUERY_TYPE_TIMESTAMP:
1325          emit_load_alu_reg_u64(&cmd_buffer->batch,
1326                                CS_GPR(2), &pool->bo, slot_offset);
1327          break;
1328
1329       default:
1330          unreachable("unhandled query type");
1331       }
1332
1333       store_query_result(&cmd_buffer->batch,
1334                          CS_GPR(2), buffer->bo, dst_offset, flags);
1335
1336       if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1337          emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
1338                                &pool->bo, slot_offset + 16);
1339          if (flags & VK_QUERY_RESULT_64_BIT)
1340             store_query_result(&cmd_buffer->batch,
1341                                CS_GPR(0), buffer->bo, dst_offset + 8, flags);
1342          else
1343             store_query_result(&cmd_buffer->batch,
1344                                CS_GPR(0), buffer->bo, dst_offset + 4, flags);
1345       }
1346
1347       dst_offset += destStride;
1348    }
1349 }
1350
1351 #endif