OSDN Git Service

Merge remote-tracking branch 'mesa/12.0' into marshmallow-x86
[android-x86/external-mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "anv_private.h"
28
29 #include "genxml/gen_macros.h"
30 #include "genxml/genX_pack.h"
31
32 void
33 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
34 {
35    struct anv_device *device = cmd_buffer->device;
36
37 /* XXX: Do we need this on more than just BDW? */
38 #if (GEN_GEN >= 8)
39    /* Emit a render target cache flush.
40     *
41     * This isn't documented anywhere in the PRM.  However, it seems to be
42     * necessary prior to changing the surface state base adress.  Without
43     * this, we get GPU hangs when using multi-level command buffers which
44     * clear depth, reset state base address, and then go render stuff.
45     */
46    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
47       pc.RenderTargetCacheFlushEnable = true;
48    }
49 #endif
50
51    anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
52       sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
53       sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
54       sba.GeneralStateBaseAddressModifyEnable = true;
55
56       sba.SurfaceStateBaseAddress =
57          anv_cmd_buffer_surface_base_address(cmd_buffer);
58       sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
59       sba.SurfaceStateBaseAddressModifyEnable = true;
60
61       sba.DynamicStateBaseAddress =
62          (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
63       sba.DynamicStateMemoryObjectControlState = GENX(MOCS),
64       sba.DynamicStateBaseAddressModifyEnable = true,
65
66       sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
67       sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
68       sba.IndirectObjectBaseAddressModifyEnable = true;
69
70       sba.InstructionBaseAddress =
71          (struct anv_address) { &device->instruction_block_pool.bo, 0 };
72       sba.InstructionMemoryObjectControlState = GENX(MOCS);
73       sba.InstructionBaseAddressModifyEnable = true;
74
75 #  if (GEN_GEN >= 8)
76       /* Broadwell requires that we specify a buffer size for a bunch of
77        * these fields.  However, since we will be growing the BO's live, we
78        * just set them all to the maximum.
79        */
80       sba.GeneralStateBufferSize                = 0xfffff;
81       sba.GeneralStateBufferSizeModifyEnable    = true;
82       sba.DynamicStateBufferSize                = 0xfffff;
83       sba.DynamicStateBufferSizeModifyEnable    = true;
84       sba.IndirectObjectBufferSize              = 0xfffff;
85       sba.IndirectObjectBufferSizeModifyEnable  = true;
86       sba.InstructionBufferSize                 = 0xfffff;
87       sba.InstructionBuffersizeModifyEnable     = true;
88 #  endif
89    }
90
91    /* After re-setting the surface state base address, we have to do some
92     * cache flusing so that the sampler engine will pick up the new
93     * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
94     * Shared Function > 3D Sampler > State > State Caching (page 96):
95     *
96     *    Coherency with system memory in the state cache, like the texture
97     *    cache is handled partially by software. It is expected that the
98     *    command stream or shader will issue Cache Flush operation or
99     *    Cache_Flush sampler message to ensure that the L1 cache remains
100     *    coherent with system memory.
101     *
102     *    [...]
103     *
104     *    Whenever the value of the Dynamic_State_Base_Addr,
105     *    Surface_State_Base_Addr are altered, the L1 state cache must be
106     *    invalidated to ensure the new surface or sampler state is fetched
107     *    from system memory.
108     *
109     * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
110     * which, according the PIPE_CONTROL instruction documentation in the
111     * Broadwell PRM:
112     *
113     *    Setting this bit is independent of any other bit in this packet.
114     *    This bit controls the invalidation of the L1 and L2 state caches
115     *    at the top of the pipe i.e. at the parsing time.
116     *
117     * Unfortunately, experimentation seems to indicate that state cache
118     * invalidation through a PIPE_CONTROL does nothing whatsoever in
119     * regards to surface state and binding tables.  In stead, it seems that
120     * invalidating the texture cache is what is actually needed.
121     *
122     * XXX:  As far as we have been able to determine through
123     * experimentation, shows that flush the texture cache appears to be
124     * sufficient.  The theory here is that all of the sampling/rendering
125     * units cache the binding table in the texture cache.  However, we have
126     * yet to be able to actually confirm this.
127     */
128    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
129       pc.TextureCacheInvalidationEnable = true;
130    }
131 }
132
133 void
134 genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
135 {
136    enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
137
138    /* Flushes are pipelined while invalidations are handled immediately.
139     * Therefore, if we're flushing anything then we need to schedule a stall
140     * before any invalidations can happen.
141     */
142    if (bits & ANV_PIPE_FLUSH_BITS)
143       bits |= ANV_PIPE_NEEDS_CS_STALL_BIT;
144
145    /* If we're going to do an invalidate and we have a pending CS stall that
146     * has yet to be resolved, we do the CS stall now.
147     */
148    if ((bits & ANV_PIPE_INVALIDATE_BITS) &&
149        (bits & ANV_PIPE_NEEDS_CS_STALL_BIT)) {
150       bits |= ANV_PIPE_CS_STALL_BIT;
151       bits &= ~ANV_PIPE_NEEDS_CS_STALL_BIT;
152    }
153
154    if (bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT)) {
155       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
156          pipe.DepthCacheFlushEnable = bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
157          pipe.DCFlushEnable = bits & ANV_PIPE_DATA_CACHE_FLUSH_BIT;
158          pipe.RenderTargetCacheFlushEnable =
159             bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
160
161          pipe.DepthStallEnable = bits & ANV_PIPE_DEPTH_STALL_BIT;
162          pipe.CommandStreamerStallEnable = bits & ANV_PIPE_CS_STALL_BIT;
163          pipe.StallAtPixelScoreboard = bits & ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
164
165          /*
166           * According to the Broadwell documentation, any PIPE_CONTROL with the
167           * "Command Streamer Stall" bit set must also have another bit set,
168           * with five different options:
169           *
170           *  - Render Target Cache Flush
171           *  - Depth Cache Flush
172           *  - Stall at Pixel Scoreboard
173           *  - Post-Sync Operation
174           *  - Depth Stall
175           *  - DC Flush Enable
176           *
177           * I chose "Stall at Pixel Scoreboard" since that's what we use in
178           * mesa and it seems to work fine. The choice is fairly arbitrary.
179           */
180          if ((bits & ANV_PIPE_CS_STALL_BIT) &&
181              !(bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_DEPTH_STALL_BIT |
182                        ANV_PIPE_STALL_AT_SCOREBOARD_BIT)))
183             pipe.StallAtPixelScoreboard = true;
184       }
185
186       bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT);
187    }
188
189    if (bits & ANV_PIPE_INVALIDATE_BITS) {
190       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
191          pipe.StateCacheInvalidationEnable =
192             bits & ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
193          pipe.ConstantCacheInvalidationEnable =
194             bits & ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
195          pipe.VFCacheInvalidationEnable =
196             bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
197          pipe.TextureCacheInvalidationEnable =
198             bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
199          pipe.InstructionCacheInvalidateEnable =
200             bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT;
201       }
202
203       bits &= ~ANV_PIPE_INVALIDATE_BITS;
204    }
205
206    cmd_buffer->state.pending_pipe_bits = bits;
207 }
208
209 void genX(CmdPipelineBarrier)(
210     VkCommandBuffer                             commandBuffer,
211     VkPipelineStageFlags                        srcStageMask,
212     VkPipelineStageFlags                        destStageMask,
213     VkBool32                                    byRegion,
214     uint32_t                                    memoryBarrierCount,
215     const VkMemoryBarrier*                      pMemoryBarriers,
216     uint32_t                                    bufferMemoryBarrierCount,
217     const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
218     uint32_t                                    imageMemoryBarrierCount,
219     const VkImageMemoryBarrier*                 pImageMemoryBarriers)
220 {
221    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
222    uint32_t b;
223
224    /* XXX: Right now, we're really dumb and just flush whatever categories
225     * the app asks for.  One of these days we may make this a bit better
226     * but right now that's all the hardware allows for in most areas.
227     */
228    VkAccessFlags src_flags = 0;
229    VkAccessFlags dst_flags = 0;
230
231    for (uint32_t i = 0; i < memoryBarrierCount; i++) {
232       src_flags |= pMemoryBarriers[i].srcAccessMask;
233       dst_flags |= pMemoryBarriers[i].dstAccessMask;
234    }
235
236    for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
237       src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
238       dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
239    }
240
241    for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
242       src_flags |= pImageMemoryBarriers[i].srcAccessMask;
243       dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
244    }
245
246    enum anv_pipe_bits pipe_bits = 0;
247
248    for_each_bit(b, src_flags) {
249       switch ((VkAccessFlagBits)(1 << b)) {
250       case VK_ACCESS_SHADER_WRITE_BIT:
251          pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
252          break;
253       case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
254          pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
255          break;
256       case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
257          pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
258          break;
259       case VK_ACCESS_TRANSFER_WRITE_BIT:
260          pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
261          pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
262          break;
263       default:
264          break; /* Nothing to do */
265       }
266    }
267
268    for_each_bit(b, dst_flags) {
269       switch ((VkAccessFlagBits)(1 << b)) {
270       case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
271       case VK_ACCESS_INDEX_READ_BIT:
272       case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
273          pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
274          break;
275       case VK_ACCESS_UNIFORM_READ_BIT:
276          pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
277          pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
278          break;
279       case VK_ACCESS_SHADER_READ_BIT:
280       case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
281       case VK_ACCESS_TRANSFER_READ_BIT:
282          pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
283          break;
284       default:
285          break; /* Nothing to do */
286       }
287    }
288
289    cmd_buffer->state.pending_pipe_bits |= pipe_bits;
290 }
291
292 static void
293 cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
294 {
295    VkShaderStageFlags stages = cmd_buffer->state.pipeline->active_stages;
296
297    /* In order to avoid thrash, we assume that vertex and fragment stages
298     * always exist.  In the rare case where one is missing *and* the other
299     * uses push concstants, this may be suboptimal.  However, avoiding stalls
300     * seems more important.
301     */
302    stages |= VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT;
303
304    if (stages == cmd_buffer->state.push_constant_stages)
305       return;
306
307 #if GEN_GEN >= 8
308    const unsigned push_constant_kb = 32;
309 #elif GEN_IS_HASWELL
310    const unsigned push_constant_kb = cmd_buffer->device->info.gt == 3 ? 32 : 16;
311 #else
312    const unsigned push_constant_kb = 16;
313 #endif
314
315    const unsigned num_stages =
316       _mesa_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
317    unsigned size_per_stage = push_constant_kb / num_stages;
318
319    /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
320     * units of 2KB.  Incidentally, these are the same platforms that have
321     * 32KB worth of push constant space.
322     */
323    if (push_constant_kb == 32)
324       size_per_stage &= ~1u;
325
326    uint32_t kb_used = 0;
327    for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
328       unsigned push_size = (stages & (1 << i)) ? size_per_stage : 0;
329       anv_batch_emit(&cmd_buffer->batch,
330                      GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
331          alloc._3DCommandSubOpcode  = 18 + i;
332          alloc.ConstantBufferOffset = (push_size > 0) ? kb_used : 0;
333          alloc.ConstantBufferSize   = push_size;
334       }
335       kb_used += push_size;
336    }
337
338    anv_batch_emit(&cmd_buffer->batch,
339                   GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), alloc) {
340       alloc.ConstantBufferOffset = kb_used;
341       alloc.ConstantBufferSize = push_constant_kb - kb_used;
342    }
343
344    cmd_buffer->state.push_constant_stages = stages;
345
346    /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
347     *
348     *    "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
349     *    the next 3DPRIMITIVE command after programming the
350     *    3DSTATE_PUSH_CONSTANT_ALLOC_VS"
351     *
352     * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
353     * pipeline setup, we need to dirty push constants.
354     */
355    cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
356 }
357
358 static void
359 cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
360                                     uint32_t stages)
361 {
362    static const uint32_t sampler_state_opcodes[] = {
363       [MESA_SHADER_VERTEX]                      = 43,
364       [MESA_SHADER_TESS_CTRL]                   = 44, /* HS */
365       [MESA_SHADER_TESS_EVAL]                   = 45, /* DS */
366       [MESA_SHADER_GEOMETRY]                    = 46,
367       [MESA_SHADER_FRAGMENT]                    = 47,
368       [MESA_SHADER_COMPUTE]                     = 0,
369    };
370
371    static const uint32_t binding_table_opcodes[] = {
372       [MESA_SHADER_VERTEX]                      = 38,
373       [MESA_SHADER_TESS_CTRL]                   = 39,
374       [MESA_SHADER_TESS_EVAL]                   = 40,
375       [MESA_SHADER_GEOMETRY]                    = 41,
376       [MESA_SHADER_FRAGMENT]                    = 42,
377       [MESA_SHADER_COMPUTE]                     = 0,
378    };
379
380    anv_foreach_stage(s, stages) {
381       if (cmd_buffer->state.samplers[s].alloc_size > 0) {
382          anv_batch_emit(&cmd_buffer->batch,
383                         GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) {
384             ssp._3DCommandSubOpcode = sampler_state_opcodes[s];
385             ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset;
386          }
387       }
388
389       /* Always emit binding table pointers if we're asked to, since on SKL
390        * this is what flushes push constants. */
391       anv_batch_emit(&cmd_buffer->batch,
392                      GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) {
393          btp._3DCommandSubOpcode = binding_table_opcodes[s];
394          btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset;
395       }
396    }
397 }
398
399 static uint32_t
400 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
401 {
402    static const uint32_t push_constant_opcodes[] = {
403       [MESA_SHADER_VERTEX]                      = 21,
404       [MESA_SHADER_TESS_CTRL]                   = 25, /* HS */
405       [MESA_SHADER_TESS_EVAL]                   = 26, /* DS */
406       [MESA_SHADER_GEOMETRY]                    = 22,
407       [MESA_SHADER_FRAGMENT]                    = 23,
408       [MESA_SHADER_COMPUTE]                     = 0,
409    };
410
411    VkShaderStageFlags flushed = 0;
412
413    anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
414       if (stage == MESA_SHADER_COMPUTE)
415          continue;
416
417       struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
418
419       if (state.offset == 0) {
420          anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
421             c._3DCommandSubOpcode = push_constant_opcodes[stage];
422       } else {
423          anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
424             c._3DCommandSubOpcode = push_constant_opcodes[stage],
425             c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
426 #if GEN_GEN >= 9
427                .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
428                .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
429 #else
430                .PointerToConstantBuffer0 = { .offset = state.offset },
431                .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
432 #endif
433             };
434          }
435       }
436
437       flushed |= mesa_to_vk_shader_stage(stage);
438    }
439
440    cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
441
442    return flushed;
443 }
444
445 void
446 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
447 {
448    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
449    uint32_t *p;
450
451    uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
452
453    assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
454
455    genX(cmd_buffer_config_l3)(cmd_buffer, pipeline);
456
457    genX(flush_pipeline_select_3d)(cmd_buffer);
458
459    if (vb_emit) {
460       const uint32_t num_buffers = __builtin_popcount(vb_emit);
461       const uint32_t num_dwords = 1 + num_buffers * 4;
462
463       p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
464                           GENX(3DSTATE_VERTEX_BUFFERS));
465       uint32_t vb, i = 0;
466       for_each_bit(vb, vb_emit) {
467          struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
468          uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
469
470          struct GENX(VERTEX_BUFFER_STATE) state = {
471             .VertexBufferIndex = vb,
472
473 #if GEN_GEN >= 8
474             .MemoryObjectControlState = GENX(MOCS),
475 #else
476             .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
477             .InstanceDataStepRate = 1,
478             .VertexBufferMemoryObjectControlState = GENX(MOCS),
479 #endif
480
481             .AddressModifyEnable = true,
482             .BufferPitch = pipeline->binding_stride[vb],
483             .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
484
485 #if GEN_GEN >= 8
486             .BufferSize = buffer->size - offset
487 #else
488             .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
489 #endif
490          };
491
492          GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
493          i++;
494       }
495    }
496
497    cmd_buffer->state.vb_dirty &= ~vb_emit;
498
499    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
500       anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
501
502       /* The exact descriptor layout is pulled from the pipeline, so we need
503        * to re-emit binding tables on every pipeline change.
504        */
505       cmd_buffer->state.descriptors_dirty |=
506          cmd_buffer->state.pipeline->active_stages;
507
508       /* If the pipeline changed, we may need to re-allocate push constant
509        * space in the URB.
510        */
511       cmd_buffer_alloc_push_constants(cmd_buffer);
512    }
513
514 #if GEN_GEN <= 7
515    if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
516        cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
517       /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
518        *
519        *    "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
520        *    stall needs to be sent just prior to any 3DSTATE_VS,
521        *    3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
522        *    3DSTATE_BINDING_TABLE_POINTER_VS,
523        *    3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one
524        *    PIPE_CONTROL needs to be sent before any combination of VS
525        *    associated 3DSTATE."
526        */
527       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
528          pc.DepthStallEnable  = true;
529          pc.PostSyncOperation = WriteImmediateData;
530          pc.Address           =
531             (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
532       }
533    }
534 #endif
535
536    /* We emit the binding tables and sampler tables first, then emit push
537     * constants and then finally emit binding table and sampler table
538     * pointers.  It has to happen in this order, since emitting the binding
539     * tables may change the push constants (in case of storage images). After
540     * emitting push constants, on SKL+ we have to emit the corresponding
541     * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
542     */
543    uint32_t dirty = 0;
544    if (cmd_buffer->state.descriptors_dirty)
545       dirty = anv_cmd_buffer_flush_descriptor_sets(cmd_buffer);
546
547    if (cmd_buffer->state.push_constants_dirty) {
548 #if GEN_GEN >= 9
549       /* On Sky Lake and later, the binding table pointers commands are
550        * what actually flush the changes to push constant state so we need
551        * to dirty them so they get re-emitted below.
552        */
553       dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
554 #else
555       cmd_buffer_flush_push_constants(cmd_buffer);
556 #endif
557    }
558
559    if (dirty)
560       cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
561
562    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
563       gen8_cmd_buffer_emit_viewport(cmd_buffer);
564
565    if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
566                                   ANV_CMD_DIRTY_PIPELINE)) {
567       gen8_cmd_buffer_emit_depth_viewport(cmd_buffer,
568                                           pipeline->depth_clamp_enable);
569    }
570
571    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
572       gen7_cmd_buffer_emit_scissor(cmd_buffer);
573
574    genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
575
576    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
577 }
578
579 static void
580 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
581                              struct anv_bo *bo, uint32_t offset)
582 {
583    uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
584                                  GENX(3DSTATE_VERTEX_BUFFERS));
585
586    GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
587       &(struct GENX(VERTEX_BUFFER_STATE)) {
588          .VertexBufferIndex = 32, /* Reserved for this */
589          .AddressModifyEnable = true,
590          .BufferPitch = 0,
591 #if (GEN_GEN >= 8)
592          .MemoryObjectControlState = GENX(MOCS),
593          .BufferStartingAddress = { bo, offset },
594          .BufferSize = 8
595 #else
596          .VertexBufferMemoryObjectControlState = GENX(MOCS),
597          .BufferStartingAddress = { bo, offset },
598          .EndAddress = { bo, offset + 8 },
599 #endif
600       });
601 }
602
603 static void
604 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
605                           uint32_t base_vertex, uint32_t base_instance)
606 {
607    struct anv_state id_state =
608       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
609
610    ((uint32_t *)id_state.map)[0] = base_vertex;
611    ((uint32_t *)id_state.map)[1] = base_instance;
612
613    if (!cmd_buffer->device->info.has_llc)
614       anv_state_clflush(id_state);
615
616    emit_base_vertex_instance_bo(cmd_buffer,
617       &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
618 }
619
620 void genX(CmdDraw)(
621     VkCommandBuffer                             commandBuffer,
622     uint32_t                                    vertexCount,
623     uint32_t                                    instanceCount,
624     uint32_t                                    firstVertex,
625     uint32_t                                    firstInstance)
626 {
627    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
628    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
629    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
630
631    genX(cmd_buffer_flush_state)(cmd_buffer);
632
633    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
634       emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
635
636    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
637       prim.VertexAccessType         = SEQUENTIAL;
638       prim.PrimitiveTopologyType    = pipeline->topology;
639       prim.VertexCountPerInstance   = vertexCount;
640       prim.StartVertexLocation      = firstVertex;
641       prim.InstanceCount            = instanceCount;
642       prim.StartInstanceLocation    = firstInstance;
643       prim.BaseVertexLocation       = 0;
644    }
645 }
646
647 void genX(CmdDrawIndexed)(
648     VkCommandBuffer                             commandBuffer,
649     uint32_t                                    indexCount,
650     uint32_t                                    instanceCount,
651     uint32_t                                    firstIndex,
652     int32_t                                     vertexOffset,
653     uint32_t                                    firstInstance)
654 {
655    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
656    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
657    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
658
659    genX(cmd_buffer_flush_state)(cmd_buffer);
660
661    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
662       emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
663
664    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
665       prim.VertexAccessType         = RANDOM;
666       prim.PrimitiveTopologyType    = pipeline->topology;
667       prim.VertexCountPerInstance   = indexCount;
668       prim.StartVertexLocation      = firstIndex;
669       prim.InstanceCount            = instanceCount;
670       prim.StartInstanceLocation    = firstInstance;
671       prim.BaseVertexLocation       = vertexOffset;
672    }
673 }
674
675 /* Auto-Draw / Indirect Registers */
676 #define GEN7_3DPRIM_END_OFFSET          0x2420
677 #define GEN7_3DPRIM_START_VERTEX        0x2430
678 #define GEN7_3DPRIM_VERTEX_COUNT        0x2434
679 #define GEN7_3DPRIM_INSTANCE_COUNT      0x2438
680 #define GEN7_3DPRIM_START_INSTANCE      0x243C
681 #define GEN7_3DPRIM_BASE_VERTEX         0x2440
682
683 static void
684 emit_lrm(struct anv_batch *batch,
685          uint32_t reg, struct anv_bo *bo, uint32_t offset)
686 {
687    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
688       lrm.RegisterAddress  = reg;
689       lrm.MemoryAddress    = (struct anv_address) { bo, offset };
690    }
691 }
692
693 static void
694 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
695 {
696    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
697       lri.RegisterOffset   = reg;
698       lri.DataDWord        = imm;
699    }
700 }
701
702 void genX(CmdDrawIndirect)(
703     VkCommandBuffer                             commandBuffer,
704     VkBuffer                                    _buffer,
705     VkDeviceSize                                offset,
706     uint32_t                                    drawCount,
707     uint32_t                                    stride)
708 {
709    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
710    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
711    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
712    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
713    struct anv_bo *bo = buffer->bo;
714    uint32_t bo_offset = buffer->offset + offset;
715
716    genX(cmd_buffer_flush_state)(cmd_buffer);
717
718    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
719       emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
720
721    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
722    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
723    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
724    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
725    emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
726
727    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
728       prim.IndirectParameterEnable  = true;
729       prim.VertexAccessType         = SEQUENTIAL;
730       prim.PrimitiveTopologyType    = pipeline->topology;
731    }
732 }
733
734 void genX(CmdDrawIndexedIndirect)(
735     VkCommandBuffer                             commandBuffer,
736     VkBuffer                                    _buffer,
737     VkDeviceSize                                offset,
738     uint32_t                                    drawCount,
739     uint32_t                                    stride)
740 {
741    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
742    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
743    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
744    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
745    struct anv_bo *bo = buffer->bo;
746    uint32_t bo_offset = buffer->offset + offset;
747
748    genX(cmd_buffer_flush_state)(cmd_buffer);
749
750    /* TODO: We need to stomp base vertex to 0 somehow */
751    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
752       emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
753
754    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
755    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
756    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
757    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
758    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
759
760    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
761       prim.IndirectParameterEnable  = true;
762       prim.VertexAccessType         = RANDOM;
763       prim.PrimitiveTopologyType    = pipeline->topology;
764    }
765 }
766
767 #if GEN_GEN == 7
768
769 static bool
770 verify_cmd_parser(const struct anv_device *device,
771                   int required_version,
772                   const char *function)
773 {
774    if (device->instance->physicalDevice.cmd_parser_version < required_version) {
775       vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
776                 "cmd parser version %d is required for %s",
777                 required_version, function);
778       return false;
779    } else {
780       return true;
781    }
782 }
783
784 #endif
785
786 void genX(CmdDispatch)(
787     VkCommandBuffer                             commandBuffer,
788     uint32_t                                    x,
789     uint32_t                                    y,
790     uint32_t                                    z)
791 {
792    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
793    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
794    const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
795
796    if (prog_data->uses_num_work_groups) {
797       struct anv_state state =
798          anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
799       uint32_t *sizes = state.map;
800       sizes[0] = x;
801       sizes[1] = y;
802       sizes[2] = z;
803       if (!cmd_buffer->device->info.has_llc)
804          anv_state_clflush(state);
805       cmd_buffer->state.num_workgroups_offset = state.offset;
806       cmd_buffer->state.num_workgroups_bo =
807          &cmd_buffer->device->dynamic_state_block_pool.bo;
808    }
809
810    genX(cmd_buffer_flush_compute_state)(cmd_buffer);
811
812    anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
813       ggw.SIMDSize                     = prog_data->simd_size / 16;
814       ggw.ThreadDepthCounterMaximum    = 0;
815       ggw.ThreadHeightCounterMaximum   = 0;
816       ggw.ThreadWidthCounterMaximum    = prog_data->threads - 1;
817       ggw.ThreadGroupIDXDimension      = x;
818       ggw.ThreadGroupIDYDimension      = y;
819       ggw.ThreadGroupIDZDimension      = z;
820       ggw.RightExecutionMask           = pipeline->cs_right_mask;
821       ggw.BottomExecutionMask          = 0xffffffff;
822    }
823
824    anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
825 }
826
827 #define GPGPU_DISPATCHDIMX 0x2500
828 #define GPGPU_DISPATCHDIMY 0x2504
829 #define GPGPU_DISPATCHDIMZ 0x2508
830
831 #define MI_PREDICATE_SRC0  0x2400
832 #define MI_PREDICATE_SRC1  0x2408
833
834 void genX(CmdDispatchIndirect)(
835     VkCommandBuffer                             commandBuffer,
836     VkBuffer                                    _buffer,
837     VkDeviceSize                                offset)
838 {
839    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
840    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
841    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
842    const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
843    struct anv_bo *bo = buffer->bo;
844    uint32_t bo_offset = buffer->offset + offset;
845    struct anv_batch *batch = &cmd_buffer->batch;
846
847 #if GEN_GEN == 7
848    /* Linux 4.4 added command parser version 5 which allows the GPGPU
849     * indirect dispatch registers to be written.
850     */
851    if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
852       return;
853 #endif
854
855    if (prog_data->uses_num_work_groups) {
856       cmd_buffer->state.num_workgroups_offset = bo_offset;
857       cmd_buffer->state.num_workgroups_bo = bo;
858    }
859
860    genX(cmd_buffer_flush_compute_state)(cmd_buffer);
861
862    emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
863    emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
864    emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
865
866 #if GEN_GEN <= 7
867    /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
868    emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
869    emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
870    emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
871
872    /* Load compute_dispatch_indirect_x_size into SRC0 */
873    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
874
875    /* predicate = (compute_dispatch_indirect_x_size == 0); */
876    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
877       mip.LoadOperation    = LOAD_LOAD;
878       mip.CombineOperation = COMBINE_SET;
879       mip.CompareOperation = COMPARE_SRCS_EQUAL;
880    }
881
882    /* Load compute_dispatch_indirect_y_size into SRC0 */
883    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
884
885    /* predicate |= (compute_dispatch_indirect_y_size == 0); */
886    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
887       mip.LoadOperation    = LOAD_LOAD;
888       mip.CombineOperation = COMBINE_OR;
889       mip.CompareOperation = COMPARE_SRCS_EQUAL;
890    }
891
892    /* Load compute_dispatch_indirect_z_size into SRC0 */
893    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
894
895    /* predicate |= (compute_dispatch_indirect_z_size == 0); */
896    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
897       mip.LoadOperation    = LOAD_LOAD;
898       mip.CombineOperation = COMBINE_OR;
899       mip.CompareOperation = COMPARE_SRCS_EQUAL;
900    }
901
902    /* predicate = !predicate; */
903 #define COMPARE_FALSE                           1
904    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
905       mip.LoadOperation    = LOAD_LOADINV;
906       mip.CombineOperation = COMBINE_OR;
907       mip.CompareOperation = COMPARE_FALSE;
908    }
909 #endif
910
911    anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
912       ggw.IndirectParameterEnable      = true;
913       ggw.PredicateEnable              = GEN_GEN <= 7;
914       ggw.SIMDSize                     = prog_data->simd_size / 16;
915       ggw.ThreadDepthCounterMaximum    = 0;
916       ggw.ThreadHeightCounterMaximum   = 0;
917       ggw.ThreadWidthCounterMaximum    = prog_data->threads - 1;
918       ggw.RightExecutionMask           = pipeline->cs_right_mask;
919       ggw.BottomExecutionMask          = 0xffffffff;
920    }
921
922    anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
923 }
924
925 static void
926 flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
927                                       uint32_t pipeline)
928 {
929 #if GEN_GEN >= 8 && GEN_GEN < 10
930    /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
931     *
932     *   Software must clear the COLOR_CALC_STATE Valid field in
933     *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
934     *   with Pipeline Select set to GPGPU.
935     *
936     * The internal hardware docs recommend the same workaround for Gen9
937     * hardware too.
938     */
939    if (pipeline == GPGPU)
940       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
941 #elif GEN_GEN <= 7
942       /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
943        * PIPELINE_SELECT [DevBWR+]":
944        *
945        *   Project: DEVSNB+
946        *
947        *   Software must ensure all the write caches are flushed through a
948        *   stalling PIPE_CONTROL command followed by another PIPE_CONTROL
949        *   command to invalidate read only caches prior to programming
950        *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
951        */
952       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
953          pc.RenderTargetCacheFlushEnable  = true;
954          pc.DepthCacheFlushEnable         = true;
955          pc.DCFlushEnable                 = true;
956          pc.PostSyncOperation             = NoWrite;
957          pc.CommandStreamerStallEnable    = true;
958       }
959
960       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
961          pc.TextureCacheInvalidationEnable   = true;
962          pc.ConstantCacheInvalidationEnable  = true;
963          pc.StateCacheInvalidationEnable     = true;
964          pc.InstructionCacheInvalidateEnable = true;
965          pc.PostSyncOperation                = NoWrite;
966       }
967 #endif
968 }
969
970 void
971 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
972 {
973    if (cmd_buffer->state.current_pipeline != _3D) {
974       flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
975
976       anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
977 #if GEN_GEN >= 9
978          ps.MaskBits = 3;
979 #endif
980          ps.PipelineSelection = _3D;
981       }
982
983       cmd_buffer->state.current_pipeline = _3D;
984    }
985 }
986
987 void
988 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
989 {
990    if (cmd_buffer->state.current_pipeline != GPGPU) {
991       flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
992
993       anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
994 #if GEN_GEN >= 9
995          ps.MaskBits = 3;
996 #endif
997          ps.PipelineSelection = GPGPU;
998       }
999
1000       cmd_buffer->state.current_pipeline = GPGPU;
1001    }
1002 }
1003
1004 struct anv_state
1005 genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer,
1006                                           struct anv_framebuffer *fb)
1007 {
1008    struct anv_state state =
1009       anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
1010
1011    struct GENX(RENDER_SURFACE_STATE) null_ss = {
1012       .SurfaceType = SURFTYPE_NULL,
1013       .SurfaceArray = fb->layers > 0,
1014       .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
1015 #if GEN_GEN >= 8
1016       .TileMode = YMAJOR,
1017 #else
1018       .TiledSurface = true,
1019 #endif
1020       .Width = fb->width - 1,
1021       .Height = fb->height - 1,
1022       .Depth = fb->layers - 1,
1023       .RenderTargetViewExtent = fb->layers - 1,
1024    };
1025
1026    GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
1027
1028    if (!cmd_buffer->device->info.has_llc)
1029       anv_state_clflush(state);
1030
1031    return state;
1032 }
1033
1034 static void
1035 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
1036 {
1037    struct anv_device *device = cmd_buffer->device;
1038    const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1039    const struct anv_image_view *iview =
1040       anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
1041    const struct anv_image *image = iview ? iview->image : NULL;
1042    const bool has_depth = image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
1043    const bool has_stencil =
1044       image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
1045
1046    /* FIXME: Implement the PMA stall W/A */
1047    /* FIXME: Width and Height are wrong */
1048
1049    /* Emit 3DSTATE_DEPTH_BUFFER */
1050    if (has_depth) {
1051       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
1052          db.SurfaceType                   = SURFTYPE_2D;
1053          db.DepthWriteEnable              = true;
1054          db.StencilWriteEnable            = has_stencil;
1055          db.HierarchicalDepthBufferEnable = false;
1056
1057          db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
1058                                                       &image->depth_surface.isl);
1059
1060          db.SurfaceBaseAddress = (struct anv_address) {
1061             .bo = image->bo,
1062             .offset = image->offset + image->depth_surface.offset,
1063          };
1064          db.DepthBufferObjectControlState = GENX(MOCS),
1065
1066          db.SurfacePitch         = image->depth_surface.isl.row_pitch - 1;
1067          db.Height               = image->extent.height - 1;
1068          db.Width                = image->extent.width - 1;
1069          db.LOD                  = iview->base_mip;
1070          db.Depth                = image->array_size - 1; /* FIXME: 3-D */
1071          db.MinimumArrayElement  = iview->base_layer;
1072
1073 #if GEN_GEN >= 8
1074          db.SurfaceQPitch =
1075             isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
1076 #endif
1077          db.RenderTargetViewExtent = 1 - 1;
1078       }
1079    } else {
1080       /* Even when no depth buffer is present, the hardware requires that
1081        * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
1082        *
1083        *    If a null depth buffer is bound, the driver must instead bind depth as:
1084        *       3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
1085        *       3DSTATE_DEPTH.Width = 1
1086        *       3DSTATE_DEPTH.Height = 1
1087        *       3DSTATE_DEPTH.SuraceFormat = D16_UNORM
1088        *       3DSTATE_DEPTH.SurfaceBaseAddress = 0
1089        *       3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
1090        *       3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
1091        *       3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
1092        *
1093        * The PRM is wrong, though. The width and height must be programmed to
1094        * actual framebuffer's width and height, even when neither depth buffer
1095        * nor stencil buffer is present.  Also, D16_UNORM is not allowed to
1096        * be combined with a stencil buffer so we use D32_FLOAT instead.
1097        */
1098       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
1099          db.SurfaceType          = SURFTYPE_2D;
1100          db.SurfaceFormat        = D32_FLOAT;
1101          db.Width                = fb->width - 1;
1102          db.Height               = fb->height - 1;
1103          db.StencilWriteEnable   = has_stencil;
1104       }
1105    }
1106
1107    /* Emit 3DSTATE_STENCIL_BUFFER */
1108    if (has_stencil) {
1109       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
1110 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1111          sb.StencilBufferEnable = true,
1112 #endif
1113          sb.StencilBufferObjectControlState = GENX(MOCS),
1114
1115          /* Stencil buffers have strange pitch. The PRM says:
1116           *
1117           *    The pitch must be set to 2x the value computed based on width,
1118           *    as the stencil buffer is stored with two rows interleaved.
1119           */
1120          sb.SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
1121
1122 #if GEN_GEN >= 8
1123          sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
1124 #endif
1125          sb.SurfaceBaseAddress = (struct anv_address) {
1126             .bo = image->bo,
1127             .offset = image->offset + image->stencil_surface.offset,
1128          };
1129       }
1130    } else {
1131       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
1132    }
1133
1134    /* Disable hierarchial depth buffers. */
1135    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hz);
1136
1137    /* Clear the clear params. */
1138    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp);
1139 }
1140
1141 /**
1142  * @see anv_cmd_buffer_set_subpass()
1143  */
1144 void
1145 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
1146                              struct anv_subpass *subpass)
1147 {
1148    cmd_buffer->state.subpass = subpass;
1149
1150    cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
1151
1152    cmd_buffer_emit_depth_stencil(cmd_buffer);
1153 }
1154
1155 void genX(CmdBeginRenderPass)(
1156     VkCommandBuffer                             commandBuffer,
1157     const VkRenderPassBeginInfo*                pRenderPassBegin,
1158     VkSubpassContents                           contents)
1159 {
1160    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1161    ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
1162    ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1163
1164    cmd_buffer->state.framebuffer = framebuffer;
1165    cmd_buffer->state.pass = pass;
1166    cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
1167    anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
1168
1169    genX(flush_pipeline_select_3d)(cmd_buffer);
1170
1171    genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
1172    anv_cmd_buffer_clear_subpass(cmd_buffer);
1173 }
1174
1175 void genX(CmdNextSubpass)(
1176     VkCommandBuffer                             commandBuffer,
1177     VkSubpassContents                           contents)
1178 {
1179    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1180
1181    assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1182
1183    anv_cmd_buffer_resolve_subpass(cmd_buffer);
1184    genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
1185    anv_cmd_buffer_clear_subpass(cmd_buffer);
1186 }
1187
1188 void genX(CmdEndRenderPass)(
1189     VkCommandBuffer                             commandBuffer)
1190 {
1191    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1192
1193    anv_cmd_buffer_resolve_subpass(cmd_buffer);
1194 }
1195
1196 static void
1197 emit_ps_depth_count(struct anv_batch *batch,
1198                     struct anv_bo *bo, uint32_t offset)
1199 {
1200    anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
1201       pc.DestinationAddressType  = DAT_PPGTT;
1202       pc.PostSyncOperation       = WritePSDepthCount;
1203       pc.DepthStallEnable        = true;
1204       pc.Address                 = (struct anv_address) { bo, offset };
1205    }
1206 }
1207
1208 static void
1209 emit_query_availability(struct anv_batch *batch,
1210                         struct anv_bo *bo, uint32_t offset)
1211 {
1212    anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
1213       pc.DestinationAddressType  = DAT_PPGTT;
1214       pc.PostSyncOperation       = WriteImmediateData;
1215       pc.Address                 = (struct anv_address) { bo, offset };
1216       pc.ImmediateData           = 1;
1217    }
1218 }
1219
1220 void genX(CmdBeginQuery)(
1221     VkCommandBuffer                             commandBuffer,
1222     VkQueryPool                                 queryPool,
1223     uint32_t                                    query,
1224     VkQueryControlFlags                         flags)
1225 {
1226    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1227    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1228
1229    /* Workaround: When meta uses the pipeline with the VS disabled, it seems
1230     * that the pipelining of the depth write breaks. What we see is that
1231     * samples from the render pass clear leaks into the first query
1232     * immediately after the clear. Doing a pipecontrol with a post-sync
1233     * operation and DepthStallEnable seems to work around the issue.
1234     */
1235    if (cmd_buffer->state.need_query_wa) {
1236       cmd_buffer->state.need_query_wa = false;
1237       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1238          pc.DepthCacheFlushEnable   = true;
1239          pc.DepthStallEnable        = true;
1240       }
1241    }
1242
1243    switch (pool->type) {
1244    case VK_QUERY_TYPE_OCCLUSION:
1245       emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1246                           query * sizeof(struct anv_query_pool_slot));
1247       break;
1248
1249    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1250    default:
1251       unreachable("");
1252    }
1253 }
1254
1255 void genX(CmdEndQuery)(
1256     VkCommandBuffer                             commandBuffer,
1257     VkQueryPool                                 queryPool,
1258     uint32_t                                    query)
1259 {
1260    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1261    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1262
1263    switch (pool->type) {
1264    case VK_QUERY_TYPE_OCCLUSION:
1265       emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1266                           query * sizeof(struct anv_query_pool_slot) + 8);
1267
1268       emit_query_availability(&cmd_buffer->batch, &pool->bo,
1269                               query * sizeof(struct anv_query_pool_slot) + 16);
1270       break;
1271
1272    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1273    default:
1274       unreachable("");
1275    }
1276 }
1277
1278 #define TIMESTAMP 0x2358
1279
1280 void genX(CmdWriteTimestamp)(
1281     VkCommandBuffer                             commandBuffer,
1282     VkPipelineStageFlagBits                     pipelineStage,
1283     VkQueryPool                                 queryPool,
1284     uint32_t                                    query)
1285 {
1286    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1287    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1288    uint32_t offset = query * sizeof(struct anv_query_pool_slot);
1289
1290    assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
1291
1292    switch (pipelineStage) {
1293    case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1294       anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1295          srm.RegisterAddress  = TIMESTAMP;
1296          srm.MemoryAddress    = (struct anv_address) { &pool->bo, offset };
1297       }
1298       anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1299          srm.RegisterAddress  = TIMESTAMP + 4;
1300          srm.MemoryAddress    = (struct anv_address) { &pool->bo, offset + 4 };
1301       }
1302       break;
1303
1304    default:
1305       /* Everything else is bottom-of-pipe */
1306       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1307          pc.DestinationAddressType  = DAT_PPGTT,
1308          pc.PostSyncOperation       = WriteTimestamp,
1309          pc.Address = (struct anv_address) { &pool->bo, offset };
1310       }
1311       break;
1312    }
1313
1314    emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
1315 }
1316
1317 #if GEN_GEN > 7 || GEN_IS_HASWELL
1318
1319 #define alu_opcode(v)   __gen_uint((v),  20, 31)
1320 #define alu_operand1(v) __gen_uint((v),  10, 19)
1321 #define alu_operand2(v) __gen_uint((v),   0,  9)
1322 #define alu(opcode, operand1, operand2) \
1323    alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
1324
1325 #define OPCODE_NOOP      0x000
1326 #define OPCODE_LOAD      0x080
1327 #define OPCODE_LOADINV   0x480
1328 #define OPCODE_LOAD0     0x081
1329 #define OPCODE_LOAD1     0x481
1330 #define OPCODE_ADD       0x100
1331 #define OPCODE_SUB       0x101
1332 #define OPCODE_AND       0x102
1333 #define OPCODE_OR        0x103
1334 #define OPCODE_XOR       0x104
1335 #define OPCODE_STORE     0x180
1336 #define OPCODE_STOREINV  0x580
1337
1338 #define OPERAND_R0   0x00
1339 #define OPERAND_R1   0x01
1340 #define OPERAND_R2   0x02
1341 #define OPERAND_R3   0x03
1342 #define OPERAND_R4   0x04
1343 #define OPERAND_SRCA 0x20
1344 #define OPERAND_SRCB 0x21
1345 #define OPERAND_ACCU 0x31
1346 #define OPERAND_ZF   0x32
1347 #define OPERAND_CF   0x33
1348
1349 #define CS_GPR(n) (0x2600 + (n) * 8)
1350
1351 static void
1352 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
1353                       struct anv_bo *bo, uint32_t offset)
1354 {
1355    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
1356       lrm.RegisterAddress  = reg,
1357       lrm.MemoryAddress    = (struct anv_address) { bo, offset };
1358    }
1359    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
1360       lrm.RegisterAddress  = reg + 4;
1361       lrm.MemoryAddress    = (struct anv_address) { bo, offset + 4 };
1362    }
1363 }
1364
1365 static void
1366 store_query_result(struct anv_batch *batch, uint32_t reg,
1367                    struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
1368 {
1369    anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1370       srm.RegisterAddress  = reg;
1371       srm.MemoryAddress    = (struct anv_address) { bo, offset };
1372    }
1373
1374    if (flags & VK_QUERY_RESULT_64_BIT) {
1375       anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1376          srm.RegisterAddress  = reg + 4;
1377          srm.MemoryAddress    = (struct anv_address) { bo, offset + 4 };
1378       }
1379    }
1380 }
1381
1382 void genX(CmdCopyQueryPoolResults)(
1383     VkCommandBuffer                             commandBuffer,
1384     VkQueryPool                                 queryPool,
1385     uint32_t                                    firstQuery,
1386     uint32_t                                    queryCount,
1387     VkBuffer                                    destBuffer,
1388     VkDeviceSize                                destOffset,
1389     VkDeviceSize                                destStride,
1390     VkQueryResultFlags                          flags)
1391 {
1392    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1393    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1394    ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
1395    uint32_t slot_offset, dst_offset;
1396
1397    if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1398       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1399          pc.CommandStreamerStallEnable = true;
1400          pc.StallAtPixelScoreboard     = true;
1401       }
1402    }
1403
1404    dst_offset = buffer->offset + destOffset;
1405    for (uint32_t i = 0; i < queryCount; i++) {
1406
1407       slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
1408       switch (pool->type) {
1409       case VK_QUERY_TYPE_OCCLUSION:
1410          emit_load_alu_reg_u64(&cmd_buffer->batch,
1411                                CS_GPR(0), &pool->bo, slot_offset);
1412          emit_load_alu_reg_u64(&cmd_buffer->batch,
1413                                CS_GPR(1), &pool->bo, slot_offset + 8);
1414
1415          /* FIXME: We need to clamp the result for 32 bit. */
1416
1417          uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
1418          dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
1419          dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
1420          dw[3] = alu(OPCODE_SUB, 0, 0);
1421          dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
1422          break;
1423
1424       case VK_QUERY_TYPE_TIMESTAMP:
1425          emit_load_alu_reg_u64(&cmd_buffer->batch,
1426                                CS_GPR(2), &pool->bo, slot_offset);
1427          break;
1428
1429       default:
1430          unreachable("unhandled query type");
1431       }
1432
1433       store_query_result(&cmd_buffer->batch,
1434                          CS_GPR(2), buffer->bo, dst_offset, flags);
1435
1436       if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1437          emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
1438                                &pool->bo, slot_offset + 16);
1439          if (flags & VK_QUERY_RESULT_64_BIT)
1440             store_query_result(&cmd_buffer->batch,
1441                                CS_GPR(0), buffer->bo, dst_offset + 8, flags);
1442          else
1443             store_query_result(&cmd_buffer->batch,
1444                                CS_GPR(0), buffer->bo, dst_offset + 4, flags);
1445       }
1446
1447       dst_offset += destStride;
1448    }
1449 }
1450
1451 #else
1452 void genX(CmdCopyQueryPoolResults)(
1453     VkCommandBuffer                             commandBuffer,
1454     VkQueryPool                                 queryPool,
1455     uint32_t                                    firstQuery,
1456     uint32_t                                    queryCount,
1457     VkBuffer                                    destBuffer,
1458     VkDeviceSize                                destOffset,
1459     VkDeviceSize                                destStride,
1460     VkQueryResultFlags                          flags)
1461 {
1462    anv_finishme("Queries not yet supported on Ivy Bridge");
1463 }
1464 #endif