OSDN Git Service

anv/image: Rename hiz_surface to aux_surface
[android-x86/external-mesa.git] / src / intel / vulkan / genX_cmd_buffer.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <assert.h>
25 #include <stdbool.h>
26
27 #include "anv_private.h"
28
29 #include "common/gen_l3_config.h"
30 #include "genxml/gen_macros.h"
31 #include "genxml/genX_pack.h"
32
33 static void
34 emit_lrm(struct anv_batch *batch,
35          uint32_t reg, struct anv_bo *bo, uint32_t offset)
36 {
37    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
38       lrm.RegisterAddress  = reg;
39       lrm.MemoryAddress    = (struct anv_address) { bo, offset };
40    }
41 }
42
43 static void
44 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
45 {
46    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
47       lri.RegisterOffset   = reg;
48       lri.DataDWord        = imm;
49    }
50 }
51
52 void
53 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
54 {
55    struct anv_device *device = cmd_buffer->device;
56
57 /* XXX: Do we need this on more than just BDW? */
58 #if (GEN_GEN >= 8)
59    /* Emit a render target cache flush.
60     *
61     * This isn't documented anywhere in the PRM.  However, it seems to be
62     * necessary prior to changing the surface state base adress.  Without
63     * this, we get GPU hangs when using multi-level command buffers which
64     * clear depth, reset state base address, and then go render stuff.
65     */
66    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
67       pc.RenderTargetCacheFlushEnable = true;
68    }
69 #endif
70
71    anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
72       sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
73       sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
74       sba.GeneralStateBaseAddressModifyEnable = true;
75
76       sba.SurfaceStateBaseAddress =
77          anv_cmd_buffer_surface_base_address(cmd_buffer);
78       sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
79       sba.SurfaceStateBaseAddressModifyEnable = true;
80
81       sba.DynamicStateBaseAddress =
82          (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
83       sba.DynamicStateMemoryObjectControlState = GENX(MOCS);
84       sba.DynamicStateBaseAddressModifyEnable = true;
85
86       sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
87       sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
88       sba.IndirectObjectBaseAddressModifyEnable = true;
89
90       sba.InstructionBaseAddress =
91          (struct anv_address) { &device->instruction_block_pool.bo, 0 };
92       sba.InstructionMemoryObjectControlState = GENX(MOCS);
93       sba.InstructionBaseAddressModifyEnable = true;
94
95 #  if (GEN_GEN >= 8)
96       /* Broadwell requires that we specify a buffer size for a bunch of
97        * these fields.  However, since we will be growing the BO's live, we
98        * just set them all to the maximum.
99        */
100       sba.GeneralStateBufferSize                = 0xfffff;
101       sba.GeneralStateBufferSizeModifyEnable    = true;
102       sba.DynamicStateBufferSize                = 0xfffff;
103       sba.DynamicStateBufferSizeModifyEnable    = true;
104       sba.IndirectObjectBufferSize              = 0xfffff;
105       sba.IndirectObjectBufferSizeModifyEnable  = true;
106       sba.InstructionBufferSize                 = 0xfffff;
107       sba.InstructionBuffersizeModifyEnable     = true;
108 #  endif
109    }
110
111    /* After re-setting the surface state base address, we have to do some
112     * cache flusing so that the sampler engine will pick up the new
113     * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
114     * Shared Function > 3D Sampler > State > State Caching (page 96):
115     *
116     *    Coherency with system memory in the state cache, like the texture
117     *    cache is handled partially by software. It is expected that the
118     *    command stream or shader will issue Cache Flush operation or
119     *    Cache_Flush sampler message to ensure that the L1 cache remains
120     *    coherent with system memory.
121     *
122     *    [...]
123     *
124     *    Whenever the value of the Dynamic_State_Base_Addr,
125     *    Surface_State_Base_Addr are altered, the L1 state cache must be
126     *    invalidated to ensure the new surface or sampler state is fetched
127     *    from system memory.
128     *
129     * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
130     * which, according the PIPE_CONTROL instruction documentation in the
131     * Broadwell PRM:
132     *
133     *    Setting this bit is independent of any other bit in this packet.
134     *    This bit controls the invalidation of the L1 and L2 state caches
135     *    at the top of the pipe i.e. at the parsing time.
136     *
137     * Unfortunately, experimentation seems to indicate that state cache
138     * invalidation through a PIPE_CONTROL does nothing whatsoever in
139     * regards to surface state and binding tables.  In stead, it seems that
140     * invalidating the texture cache is what is actually needed.
141     *
142     * XXX:  As far as we have been able to determine through
143     * experimentation, shows that flush the texture cache appears to be
144     * sufficient.  The theory here is that all of the sampling/rendering
145     * units cache the binding table in the texture cache.  However, we have
146     * yet to be able to actually confirm this.
147     */
148    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
149       pc.TextureCacheInvalidationEnable = true;
150    }
151 }
152
153 VkResult
154 genX(BeginCommandBuffer)(
155     VkCommandBuffer                             commandBuffer,
156     const VkCommandBufferBeginInfo*             pBeginInfo)
157 {
158    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
159
160    /* If this is the first vkBeginCommandBuffer, we must *initialize* the
161     * command buffer's state. Otherwise, we must *reset* its state. In both
162     * cases we reset it.
163     *
164     * From the Vulkan 1.0 spec:
165     *
166     *    If a command buffer is in the executable state and the command buffer
167     *    was allocated from a command pool with the
168     *    VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
169     *    vkBeginCommandBuffer implicitly resets the command buffer, behaving
170     *    as if vkResetCommandBuffer had been called with
171     *    VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
172     *    the command buffer in the recording state.
173     */
174    anv_cmd_buffer_reset(cmd_buffer);
175
176    cmd_buffer->usage_flags = pBeginInfo->flags;
177
178    assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
179           !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
180
181    genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
182
183    if (cmd_buffer->usage_flags &
184        VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
185       cmd_buffer->state.framebuffer =
186          anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
187       cmd_buffer->state.pass =
188          anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
189       cmd_buffer->state.subpass =
190          &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
191
192       cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
193    }
194
195    return VK_SUCCESS;
196 }
197
198 VkResult
199 genX(EndCommandBuffer)(
200     VkCommandBuffer                             commandBuffer)
201 {
202    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
203
204    anv_cmd_buffer_end_batch_buffer(cmd_buffer);
205
206    return VK_SUCCESS;
207 }
208
209 void
210 genX(CmdExecuteCommands)(
211     VkCommandBuffer                             commandBuffer,
212     uint32_t                                    commandBufferCount,
213     const VkCommandBuffer*                      pCmdBuffers)
214 {
215    ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
216
217    assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
218
219    for (uint32_t i = 0; i < commandBufferCount; i++) {
220       ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
221
222       assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
223
224       anv_cmd_buffer_add_secondary(primary, secondary);
225    }
226
227    /* Each of the secondary command buffers will use its own state base
228     * address.  We need to re-emit state base address for the primary after
229     * all of the secondaries are done.
230     *
231     * TODO: Maybe we want to make this a dirty bit to avoid extra state base
232     * address calls?
233     */
234    genX(cmd_buffer_emit_state_base_address)(primary);
235 }
236
237 #define IVB_L3SQCREG1_SQGHPCI_DEFAULT     0x00730000
238 #define VLV_L3SQCREG1_SQGHPCI_DEFAULT     0x00d30000
239 #define HSW_L3SQCREG1_SQGHPCI_DEFAULT     0x00610000
240
241 /**
242  * Program the hardware to use the specified L3 configuration.
243  */
244 void
245 genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
246                            const struct gen_l3_config *cfg)
247 {
248    assert(cfg);
249    if (cfg == cmd_buffer->state.current_l3_config)
250       return;
251
252    if (unlikely(INTEL_DEBUG & DEBUG_L3)) {
253       fprintf(stderr, "L3 config transition: ");
254       gen_dump_l3_config(cfg, stderr);
255    }
256
257    const bool has_slm = cfg->n[GEN_L3P_SLM];
258
259    /* According to the hardware docs, the L3 partitioning can only be changed
260     * while the pipeline is completely drained and the caches are flushed,
261     * which involves a first PIPE_CONTROL flush which stalls the pipeline...
262     */
263    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
264       pc.DCFlushEnable = true;
265       pc.PostSyncOperation = NoWrite;
266       pc.CommandStreamerStallEnable = true;
267    }
268
269    /* ...followed by a second pipelined PIPE_CONTROL that initiates
270     * invalidation of the relevant caches.  Note that because RO invalidation
271     * happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL
272     * command is processed by the CS) we cannot combine it with the previous
273     * stalling flush as the hardware documentation suggests, because that
274     * would cause the CS to stall on previous rendering *after* RO
275     * invalidation and wouldn't prevent the RO caches from being polluted by
276     * concurrent rendering before the stall completes.  This intentionally
277     * doesn't implement the SKL+ hardware workaround suggesting to enable CS
278     * stall on PIPE_CONTROLs with the texture cache invalidation bit set for
279     * GPGPU workloads because the previous and subsequent PIPE_CONTROLs
280     * already guarantee that there is no concurrent GPGPU kernel execution
281     * (see SKL HSD 2132585).
282     */
283    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
284       pc.TextureCacheInvalidationEnable = true;
285       pc.ConstantCacheInvalidationEnable = true;
286       pc.InstructionCacheInvalidateEnable = true;
287       pc.StateCacheInvalidationEnable = true;
288       pc.PostSyncOperation = NoWrite;
289    }
290
291    /* Now send a third stalling flush to make sure that invalidation is
292     * complete when the L3 configuration registers are modified.
293     */
294    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
295       pc.DCFlushEnable = true;
296       pc.PostSyncOperation = NoWrite;
297       pc.CommandStreamerStallEnable = true;
298    }
299
300 #if GEN_GEN >= 8
301
302    assert(!cfg->n[GEN_L3P_IS] && !cfg->n[GEN_L3P_C] && !cfg->n[GEN_L3P_T]);
303
304    uint32_t l3cr;
305    anv_pack_struct(&l3cr, GENX(L3CNTLREG),
306                    .SLMEnable = has_slm,
307                    .URBAllocation = cfg->n[GEN_L3P_URB],
308                    .ROAllocation = cfg->n[GEN_L3P_RO],
309                    .DCAllocation = cfg->n[GEN_L3P_DC],
310                    .AllAllocation = cfg->n[GEN_L3P_ALL]);
311
312    /* Set up the L3 partitioning. */
313    emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG_num), l3cr);
314
315 #else
316
317    const bool has_dc = cfg->n[GEN_L3P_DC] || cfg->n[GEN_L3P_ALL];
318    const bool has_is = cfg->n[GEN_L3P_IS] || cfg->n[GEN_L3P_RO] ||
319                        cfg->n[GEN_L3P_ALL];
320    const bool has_c = cfg->n[GEN_L3P_C] || cfg->n[GEN_L3P_RO] ||
321                       cfg->n[GEN_L3P_ALL];
322    const bool has_t = cfg->n[GEN_L3P_T] || cfg->n[GEN_L3P_RO] ||
323                       cfg->n[GEN_L3P_ALL];
324
325    assert(!cfg->n[GEN_L3P_ALL]);
326
327    /* When enabled SLM only uses a portion of the L3 on half of the banks,
328     * the matching space on the remaining banks has to be allocated to a
329     * client (URB for all validated configurations) set to the
330     * lower-bandwidth 2-bank address hashing mode.
331     */
332    const struct gen_device_info *devinfo = &cmd_buffer->device->info;
333    const bool urb_low_bw = has_slm && !devinfo->is_baytrail;
334    assert(!urb_low_bw || cfg->n[GEN_L3P_URB] == cfg->n[GEN_L3P_SLM]);
335
336    /* Minimum number of ways that can be allocated to the URB. */
337    const unsigned n0_urb = (devinfo->is_baytrail ? 32 : 0);
338    assert(cfg->n[GEN_L3P_URB] >= n0_urb);
339
340    uint32_t l3sqcr1, l3cr2, l3cr3;
341    anv_pack_struct(&l3sqcr1, GENX(L3SQCREG1),
342                    .ConvertDC_UC = !has_dc,
343                    .ConvertIS_UC = !has_is,
344                    .ConvertC_UC = !has_c,
345                    .ConvertT_UC = !has_t);
346    l3sqcr1 |=
347       GEN_IS_HASWELL ? HSW_L3SQCREG1_SQGHPCI_DEFAULT :
348       devinfo->is_baytrail ? VLV_L3SQCREG1_SQGHPCI_DEFAULT :
349       IVB_L3SQCREG1_SQGHPCI_DEFAULT;
350
351    anv_pack_struct(&l3cr2, GENX(L3CNTLREG2),
352                    .SLMEnable = has_slm,
353                    .URBLowBandwidth = urb_low_bw,
354                    .URBAllocation = cfg->n[GEN_L3P_URB],
355 #if !GEN_IS_HASWELL
356                    .ALLAllocation = cfg->n[GEN_L3P_ALL],
357 #endif
358                    .ROAllocation = cfg->n[GEN_L3P_RO],
359                    .DCAllocation = cfg->n[GEN_L3P_DC]);
360
361    anv_pack_struct(&l3cr3, GENX(L3CNTLREG3),
362                    .ISAllocation = cfg->n[GEN_L3P_IS],
363                    .ISLowBandwidth = 0,
364                    .CAllocation = cfg->n[GEN_L3P_C],
365                    .CLowBandwidth = 0,
366                    .TAllocation = cfg->n[GEN_L3P_T],
367                    .TLowBandwidth = 0);
368
369    /* Set up the L3 partitioning. */
370    emit_lri(&cmd_buffer->batch, GENX(L3SQCREG1_num), l3sqcr1);
371    emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG2_num), l3cr2);
372    emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3_num), l3cr3);
373
374 #if GEN_IS_HASWELL
375    if (cmd_buffer->device->instance->physicalDevice.cmd_parser_version >= 4) {
376       /* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
377        * them disabled to avoid crashing the system hard.
378        */
379       uint32_t scratch1, chicken3;
380       anv_pack_struct(&scratch1, GENX(SCRATCH1),
381                       .L3AtomicDisable = !has_dc);
382       anv_pack_struct(&chicken3, GENX(CHICKEN3),
383                       .L3AtomicDisableMask = true,
384                       .L3AtomicDisable = !has_dc);
385       emit_lri(&cmd_buffer->batch, GENX(SCRATCH1_num), scratch1);
386       emit_lri(&cmd_buffer->batch, GENX(CHICKEN3_num), chicken3);
387    }
388 #endif
389
390 #endif
391
392    cmd_buffer->state.current_l3_config = cfg;
393 }
394
395 void
396 genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
397 {
398    enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
399
400    /* Flushes are pipelined while invalidations are handled immediately.
401     * Therefore, if we're flushing anything then we need to schedule a stall
402     * before any invalidations can happen.
403     */
404    if (bits & ANV_PIPE_FLUSH_BITS)
405       bits |= ANV_PIPE_NEEDS_CS_STALL_BIT;
406
407    /* If we're going to do an invalidate and we have a pending CS stall that
408     * has yet to be resolved, we do the CS stall now.
409     */
410    if ((bits & ANV_PIPE_INVALIDATE_BITS) &&
411        (bits & ANV_PIPE_NEEDS_CS_STALL_BIT)) {
412       bits |= ANV_PIPE_CS_STALL_BIT;
413       bits &= ~ANV_PIPE_NEEDS_CS_STALL_BIT;
414    }
415
416    if (bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT)) {
417       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
418          pipe.DepthCacheFlushEnable = bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
419          pipe.DCFlushEnable = bits & ANV_PIPE_DATA_CACHE_FLUSH_BIT;
420          pipe.RenderTargetCacheFlushEnable =
421             bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
422
423          pipe.DepthStallEnable = bits & ANV_PIPE_DEPTH_STALL_BIT;
424          pipe.CommandStreamerStallEnable = bits & ANV_PIPE_CS_STALL_BIT;
425          pipe.StallAtPixelScoreboard = bits & ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
426
427          /*
428           * According to the Broadwell documentation, any PIPE_CONTROL with the
429           * "Command Streamer Stall" bit set must also have another bit set,
430           * with five different options:
431           *
432           *  - Render Target Cache Flush
433           *  - Depth Cache Flush
434           *  - Stall at Pixel Scoreboard
435           *  - Post-Sync Operation
436           *  - Depth Stall
437           *  - DC Flush Enable
438           *
439           * I chose "Stall at Pixel Scoreboard" since that's what we use in
440           * mesa and it seems to work fine. The choice is fairly arbitrary.
441           */
442          if ((bits & ANV_PIPE_CS_STALL_BIT) &&
443              !(bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_DEPTH_STALL_BIT |
444                        ANV_PIPE_STALL_AT_SCOREBOARD_BIT)))
445             pipe.StallAtPixelScoreboard = true;
446       }
447
448       bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT);
449    }
450
451    if (bits & ANV_PIPE_INVALIDATE_BITS) {
452       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
453          pipe.StateCacheInvalidationEnable =
454             bits & ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
455          pipe.ConstantCacheInvalidationEnable =
456             bits & ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
457          pipe.VFCacheInvalidationEnable =
458             bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
459          pipe.TextureCacheInvalidationEnable =
460             bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
461          pipe.InstructionCacheInvalidateEnable =
462             bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT;
463       }
464
465       bits &= ~ANV_PIPE_INVALIDATE_BITS;
466    }
467
468    cmd_buffer->state.pending_pipe_bits = bits;
469 }
470
471 void genX(CmdPipelineBarrier)(
472     VkCommandBuffer                             commandBuffer,
473     VkPipelineStageFlags                        srcStageMask,
474     VkPipelineStageFlags                        destStageMask,
475     VkBool32                                    byRegion,
476     uint32_t                                    memoryBarrierCount,
477     const VkMemoryBarrier*                      pMemoryBarriers,
478     uint32_t                                    bufferMemoryBarrierCount,
479     const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
480     uint32_t                                    imageMemoryBarrierCount,
481     const VkImageMemoryBarrier*                 pImageMemoryBarriers)
482 {
483    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
484    uint32_t b;
485
486    /* XXX: Right now, we're really dumb and just flush whatever categories
487     * the app asks for.  One of these days we may make this a bit better
488     * but right now that's all the hardware allows for in most areas.
489     */
490    VkAccessFlags src_flags = 0;
491    VkAccessFlags dst_flags = 0;
492
493    for (uint32_t i = 0; i < memoryBarrierCount; i++) {
494       src_flags |= pMemoryBarriers[i].srcAccessMask;
495       dst_flags |= pMemoryBarriers[i].dstAccessMask;
496    }
497
498    for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
499       src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
500       dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
501    }
502
503    for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
504       src_flags |= pImageMemoryBarriers[i].srcAccessMask;
505       dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
506    }
507
508    enum anv_pipe_bits pipe_bits = 0;
509
510    for_each_bit(b, src_flags) {
511       switch ((VkAccessFlagBits)(1 << b)) {
512       case VK_ACCESS_SHADER_WRITE_BIT:
513          pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
514          break;
515       case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
516          pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
517          break;
518       case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
519          pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
520          break;
521       case VK_ACCESS_TRANSFER_WRITE_BIT:
522          pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
523          pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
524          break;
525       default:
526          break; /* Nothing to do */
527       }
528    }
529
530    for_each_bit(b, dst_flags) {
531       switch ((VkAccessFlagBits)(1 << b)) {
532       case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
533       case VK_ACCESS_INDEX_READ_BIT:
534       case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
535          pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
536          break;
537       case VK_ACCESS_UNIFORM_READ_BIT:
538          pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
539          pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
540          break;
541       case VK_ACCESS_SHADER_READ_BIT:
542       case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
543       case VK_ACCESS_TRANSFER_READ_BIT:
544          pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
545          break;
546       default:
547          break; /* Nothing to do */
548       }
549    }
550
551    cmd_buffer->state.pending_pipe_bits |= pipe_bits;
552 }
553
554 static void
555 cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
556 {
557    VkShaderStageFlags stages = cmd_buffer->state.pipeline->active_stages;
558
559    /* In order to avoid thrash, we assume that vertex and fragment stages
560     * always exist.  In the rare case where one is missing *and* the other
561     * uses push concstants, this may be suboptimal.  However, avoiding stalls
562     * seems more important.
563     */
564    stages |= VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT;
565
566    if (stages == cmd_buffer->state.push_constant_stages)
567       return;
568
569 #if GEN_GEN >= 8
570    const unsigned push_constant_kb = 32;
571 #elif GEN_IS_HASWELL
572    const unsigned push_constant_kb = cmd_buffer->device->info.gt == 3 ? 32 : 16;
573 #else
574    const unsigned push_constant_kb = 16;
575 #endif
576
577    const unsigned num_stages =
578       _mesa_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
579    unsigned size_per_stage = push_constant_kb / num_stages;
580
581    /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
582     * units of 2KB.  Incidentally, these are the same platforms that have
583     * 32KB worth of push constant space.
584     */
585    if (push_constant_kb == 32)
586       size_per_stage &= ~1u;
587
588    uint32_t kb_used = 0;
589    for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
590       unsigned push_size = (stages & (1 << i)) ? size_per_stage : 0;
591       anv_batch_emit(&cmd_buffer->batch,
592                      GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
593          alloc._3DCommandSubOpcode  = 18 + i;
594          alloc.ConstantBufferOffset = (push_size > 0) ? kb_used : 0;
595          alloc.ConstantBufferSize   = push_size;
596       }
597       kb_used += push_size;
598    }
599
600    anv_batch_emit(&cmd_buffer->batch,
601                   GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), alloc) {
602       alloc.ConstantBufferOffset = kb_used;
603       alloc.ConstantBufferSize = push_constant_kb - kb_used;
604    }
605
606    cmd_buffer->state.push_constant_stages = stages;
607
608    /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
609     *
610     *    "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
611     *    the next 3DPRIMITIVE command after programming the
612     *    3DSTATE_PUSH_CONSTANT_ALLOC_VS"
613     *
614     * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
615     * pipeline setup, we need to dirty push constants.
616     */
617    cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
618 }
619
620 static void
621 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
622                         struct anv_state state, struct anv_bo *bo,
623                         uint32_t offset)
624 {
625    /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
626     * 9 for gen8+.  We only write the first dword for gen8+ here and rely on
627     * the initial state to set the high bits to 0. */
628
629    const uint32_t dword = GEN_GEN < 8 ? 1 : 8;
630
631    anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
632                       state.offset + dword * 4, bo, offset);
633 }
634
635 static struct anv_state
636 alloc_null_surface_state(struct anv_cmd_buffer *cmd_buffer,
637                          struct anv_framebuffer *fb)
638 {
639    struct anv_state state =
640       anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
641
642    struct GENX(RENDER_SURFACE_STATE) null_ss = {
643       .SurfaceType = SURFTYPE_NULL,
644       .SurfaceArray = fb->layers > 0,
645       .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
646 #if GEN_GEN >= 8
647       .TileMode = YMAJOR,
648 #else
649       .TiledSurface = true,
650 #endif
651       .Width = fb->width - 1,
652       .Height = fb->height - 1,
653       .Depth = fb->layers - 1,
654       .RenderTargetViewExtent = fb->layers - 1,
655    };
656
657    GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
658
659    if (!cmd_buffer->device->info.has_llc)
660       anv_state_clflush(state);
661
662    return state;
663 }
664
665
666 static VkResult
667 emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
668                    gl_shader_stage stage,
669                    struct anv_state *bt_state)
670 {
671    struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
672    struct anv_subpass *subpass = cmd_buffer->state.subpass;
673    struct anv_pipeline *pipeline;
674    uint32_t bias, state_offset;
675
676    switch (stage) {
677    case  MESA_SHADER_COMPUTE:
678       pipeline = cmd_buffer->state.compute_pipeline;
679       bias = 1;
680       break;
681    default:
682       pipeline = cmd_buffer->state.pipeline;
683       bias = 0;
684       break;
685    }
686
687    if (!anv_pipeline_has_stage(pipeline, stage)) {
688       *bt_state = (struct anv_state) { 0, };
689       return VK_SUCCESS;
690    }
691
692    struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
693    if (bias + map->surface_count == 0) {
694       *bt_state = (struct anv_state) { 0, };
695       return VK_SUCCESS;
696    }
697
698    *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
699                                                   bias + map->surface_count,
700                                                   &state_offset);
701    uint32_t *bt_map = bt_state->map;
702
703    if (bt_state->map == NULL)
704       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
705
706    if (stage == MESA_SHADER_COMPUTE &&
707        get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
708       struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
709       uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
710
711       struct anv_state surface_state;
712       surface_state =
713          anv_cmd_buffer_alloc_surface_state(cmd_buffer);
714
715       const enum isl_format format =
716          anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
717       anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
718                                     format, bo_offset, 12, 1);
719
720       bt_map[0] = surface_state.offset + state_offset;
721       add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
722    }
723
724    if (map->surface_count == 0)
725       goto out;
726
727    if (map->image_count > 0) {
728       VkResult result =
729          anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
730       if (result != VK_SUCCESS)
731          return result;
732
733       cmd_buffer->state.push_constants_dirty |= 1 << stage;
734    }
735
736    uint32_t image = 0;
737    for (uint32_t s = 0; s < map->surface_count; s++) {
738       struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
739
740       struct anv_state surface_state;
741       struct anv_bo *bo;
742       uint32_t bo_offset;
743
744       if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) {
745          /* Color attachment binding */
746          assert(stage == MESA_SHADER_FRAGMENT);
747          assert(binding->binding == 0);
748          if (binding->index < subpass->color_count) {
749             const struct anv_image_view *iview =
750                fb->attachments[subpass->color_attachments[binding->index]];
751
752             assert(iview->color_rt_surface_state.alloc_size);
753             surface_state = iview->color_rt_surface_state;
754             add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
755                                     iview->bo, iview->offset);
756          } else {
757             /* Null render target */
758             struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
759             surface_state = alloc_null_surface_state(cmd_buffer, fb);
760          }
761
762          bt_map[bias + s] = surface_state.offset + state_offset;
763          continue;
764       }
765
766       struct anv_descriptor_set *set =
767          cmd_buffer->state.descriptors[binding->set];
768       uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
769       struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
770
771       switch (desc->type) {
772       case VK_DESCRIPTOR_TYPE_SAMPLER:
773          /* Nothing for us to do here */
774          continue;
775
776       case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
777       case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
778       case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
779          surface_state = desc->image_view->sampler_surface_state;
780          assert(surface_state.alloc_size);
781          bo = desc->image_view->bo;
782          bo_offset = desc->image_view->offset;
783          break;
784
785       case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
786          surface_state = desc->image_view->storage_surface_state;
787          assert(surface_state.alloc_size);
788          bo = desc->image_view->bo;
789          bo_offset = desc->image_view->offset;
790
791          struct brw_image_param *image_param =
792             &cmd_buffer->state.push_constants[stage]->images[image++];
793
794          *image_param = desc->image_view->storage_image_param;
795          image_param->surface_idx = bias + s;
796          break;
797       }
798
799       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
800       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
801       case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
802       case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
803       case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
804          surface_state = desc->buffer_view->surface_state;
805          assert(surface_state.alloc_size);
806          bo = desc->buffer_view->bo;
807          bo_offset = desc->buffer_view->offset;
808          break;
809
810       case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
811          surface_state = desc->buffer_view->storage_surface_state;
812          assert(surface_state.alloc_size);
813          bo = desc->buffer_view->bo;
814          bo_offset = desc->buffer_view->offset;
815
816          struct brw_image_param *image_param =
817             &cmd_buffer->state.push_constants[stage]->images[image++];
818
819          *image_param = desc->buffer_view->storage_image_param;
820          image_param->surface_idx = bias + s;
821          break;
822
823       default:
824          assert(!"Invalid descriptor type");
825          continue;
826       }
827
828       bt_map[bias + s] = surface_state.offset + state_offset;
829       add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
830    }
831    assert(image == map->image_count);
832
833  out:
834    if (!cmd_buffer->device->info.has_llc)
835       anv_state_clflush(*bt_state);
836
837    return VK_SUCCESS;
838 }
839
840 static VkResult
841 emit_samplers(struct anv_cmd_buffer *cmd_buffer,
842               gl_shader_stage stage,
843               struct anv_state *state)
844 {
845    struct anv_pipeline *pipeline;
846
847    if (stage == MESA_SHADER_COMPUTE)
848       pipeline = cmd_buffer->state.compute_pipeline;
849    else
850       pipeline = cmd_buffer->state.pipeline;
851
852    if (!anv_pipeline_has_stage(pipeline, stage)) {
853       *state = (struct anv_state) { 0, };
854       return VK_SUCCESS;
855    }
856
857    struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
858    if (map->sampler_count == 0) {
859       *state = (struct anv_state) { 0, };
860       return VK_SUCCESS;
861    }
862
863    uint32_t size = map->sampler_count * 16;
864    *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
865
866    if (state->map == NULL)
867       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
868
869    for (uint32_t s = 0; s < map->sampler_count; s++) {
870       struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
871       struct anv_descriptor_set *set =
872          cmd_buffer->state.descriptors[binding->set];
873       uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
874       struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
875
876       if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
877           desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
878          continue;
879
880       struct anv_sampler *sampler = desc->sampler;
881
882       /* This can happen if we have an unfilled slot since TYPE_SAMPLER
883        * happens to be zero.
884        */
885       if (sampler == NULL)
886          continue;
887
888       memcpy(state->map + (s * 16),
889              sampler->state, sizeof(sampler->state));
890    }
891
892    if (!cmd_buffer->device->info.has_llc)
893       anv_state_clflush(*state);
894
895    return VK_SUCCESS;
896 }
897
898 static uint32_t
899 flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
900 {
901    VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
902                               cmd_buffer->state.pipeline->active_stages;
903
904    VkResult result = VK_SUCCESS;
905    anv_foreach_stage(s, dirty) {
906       result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
907       if (result != VK_SUCCESS)
908          break;
909       result = emit_binding_table(cmd_buffer, s,
910                                   &cmd_buffer->state.binding_tables[s]);
911       if (result != VK_SUCCESS)
912          break;
913    }
914
915    if (result != VK_SUCCESS) {
916       assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
917
918       result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
919       assert(result == VK_SUCCESS);
920
921       /* Re-emit state base addresses so we get the new surface state base
922        * address before we start emitting binding tables etc.
923        */
924       genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
925
926       /* Re-emit all active binding tables */
927       dirty |= cmd_buffer->state.pipeline->active_stages;
928       anv_foreach_stage(s, dirty) {
929          result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
930          if (result != VK_SUCCESS)
931             return result;
932          result = emit_binding_table(cmd_buffer, s,
933                                      &cmd_buffer->state.binding_tables[s]);
934          if (result != VK_SUCCESS)
935             return result;
936       }
937    }
938
939    cmd_buffer->state.descriptors_dirty &= ~dirty;
940
941    return dirty;
942 }
943
944 static void
945 cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
946                                     uint32_t stages)
947 {
948    static const uint32_t sampler_state_opcodes[] = {
949       [MESA_SHADER_VERTEX]                      = 43,
950       [MESA_SHADER_TESS_CTRL]                   = 44, /* HS */
951       [MESA_SHADER_TESS_EVAL]                   = 45, /* DS */
952       [MESA_SHADER_GEOMETRY]                    = 46,
953       [MESA_SHADER_FRAGMENT]                    = 47,
954       [MESA_SHADER_COMPUTE]                     = 0,
955    };
956
957    static const uint32_t binding_table_opcodes[] = {
958       [MESA_SHADER_VERTEX]                      = 38,
959       [MESA_SHADER_TESS_CTRL]                   = 39,
960       [MESA_SHADER_TESS_EVAL]                   = 40,
961       [MESA_SHADER_GEOMETRY]                    = 41,
962       [MESA_SHADER_FRAGMENT]                    = 42,
963       [MESA_SHADER_COMPUTE]                     = 0,
964    };
965
966    anv_foreach_stage(s, stages) {
967       if (cmd_buffer->state.samplers[s].alloc_size > 0) {
968          anv_batch_emit(&cmd_buffer->batch,
969                         GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) {
970             ssp._3DCommandSubOpcode = sampler_state_opcodes[s];
971             ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset;
972          }
973       }
974
975       /* Always emit binding table pointers if we're asked to, since on SKL
976        * this is what flushes push constants. */
977       anv_batch_emit(&cmd_buffer->batch,
978                      GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) {
979          btp._3DCommandSubOpcode = binding_table_opcodes[s];
980          btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset;
981       }
982    }
983 }
984
985 static uint32_t
986 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
987 {
988    static const uint32_t push_constant_opcodes[] = {
989       [MESA_SHADER_VERTEX]                      = 21,
990       [MESA_SHADER_TESS_CTRL]                   = 25, /* HS */
991       [MESA_SHADER_TESS_EVAL]                   = 26, /* DS */
992       [MESA_SHADER_GEOMETRY]                    = 22,
993       [MESA_SHADER_FRAGMENT]                    = 23,
994       [MESA_SHADER_COMPUTE]                     = 0,
995    };
996
997    VkShaderStageFlags flushed = 0;
998
999    anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
1000       if (stage == MESA_SHADER_COMPUTE)
1001          continue;
1002
1003       struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
1004
1005       if (state.offset == 0) {
1006          anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
1007             c._3DCommandSubOpcode = push_constant_opcodes[stage];
1008       } else {
1009          anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
1010             c._3DCommandSubOpcode = push_constant_opcodes[stage],
1011             c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
1012 #if GEN_GEN >= 9
1013                .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
1014                .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
1015 #else
1016                .PointerToConstantBuffer0 = { .offset = state.offset },
1017                .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
1018 #endif
1019             };
1020          }
1021       }
1022
1023       flushed |= mesa_to_vk_shader_stage(stage);
1024    }
1025
1026    cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
1027
1028    return flushed;
1029 }
1030
1031 void
1032 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
1033 {
1034    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1035    uint32_t *p;
1036
1037    uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
1038
1039    assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
1040
1041    genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config);
1042
1043    genX(flush_pipeline_select_3d)(cmd_buffer);
1044
1045    if (vb_emit) {
1046       const uint32_t num_buffers = __builtin_popcount(vb_emit);
1047       const uint32_t num_dwords = 1 + num_buffers * 4;
1048
1049       p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
1050                           GENX(3DSTATE_VERTEX_BUFFERS));
1051       uint32_t vb, i = 0;
1052       for_each_bit(vb, vb_emit) {
1053          struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
1054          uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
1055
1056          struct GENX(VERTEX_BUFFER_STATE) state = {
1057             .VertexBufferIndex = vb,
1058
1059 #if GEN_GEN >= 8
1060             .MemoryObjectControlState = GENX(MOCS),
1061 #else
1062             .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
1063             .InstanceDataStepRate = 1,
1064             .VertexBufferMemoryObjectControlState = GENX(MOCS),
1065 #endif
1066
1067             .AddressModifyEnable = true,
1068             .BufferPitch = pipeline->binding_stride[vb],
1069             .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
1070
1071 #if GEN_GEN >= 8
1072             .BufferSize = buffer->size - offset
1073 #else
1074             .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
1075 #endif
1076          };
1077
1078          GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
1079          i++;
1080       }
1081    }
1082
1083    cmd_buffer->state.vb_dirty &= ~vb_emit;
1084
1085    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
1086       anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
1087
1088       /* The exact descriptor layout is pulled from the pipeline, so we need
1089        * to re-emit binding tables on every pipeline change.
1090        */
1091       cmd_buffer->state.descriptors_dirty |=
1092          cmd_buffer->state.pipeline->active_stages;
1093
1094       /* If the pipeline changed, we may need to re-allocate push constant
1095        * space in the URB.
1096        */
1097       cmd_buffer_alloc_push_constants(cmd_buffer);
1098    }
1099
1100 #if GEN_GEN <= 7
1101    if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
1102        cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
1103       /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
1104        *
1105        *    "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
1106        *    stall needs to be sent just prior to any 3DSTATE_VS,
1107        *    3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
1108        *    3DSTATE_BINDING_TABLE_POINTER_VS,
1109        *    3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one
1110        *    PIPE_CONTROL needs to be sent before any combination of VS
1111        *    associated 3DSTATE."
1112        */
1113       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1114          pc.DepthStallEnable  = true;
1115          pc.PostSyncOperation = WriteImmediateData;
1116          pc.Address           =
1117             (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
1118       }
1119    }
1120 #endif
1121
1122    /* Render targets live in the same binding table as fragment descriptors */
1123    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_RENDER_TARGETS)
1124       cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
1125
1126    /* We emit the binding tables and sampler tables first, then emit push
1127     * constants and then finally emit binding table and sampler table
1128     * pointers.  It has to happen in this order, since emitting the binding
1129     * tables may change the push constants (in case of storage images). After
1130     * emitting push constants, on SKL+ we have to emit the corresponding
1131     * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
1132     */
1133    uint32_t dirty = 0;
1134    if (cmd_buffer->state.descriptors_dirty)
1135       dirty = flush_descriptor_sets(cmd_buffer);
1136
1137    if (cmd_buffer->state.push_constants_dirty) {
1138 #if GEN_GEN >= 9
1139       /* On Sky Lake and later, the binding table pointers commands are
1140        * what actually flush the changes to push constant state so we need
1141        * to dirty them so they get re-emitted below.
1142        */
1143       dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
1144 #else
1145       cmd_buffer_flush_push_constants(cmd_buffer);
1146 #endif
1147    }
1148
1149    if (dirty)
1150       cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
1151
1152    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
1153       gen8_cmd_buffer_emit_viewport(cmd_buffer);
1154
1155    if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
1156                                   ANV_CMD_DIRTY_PIPELINE)) {
1157       gen8_cmd_buffer_emit_depth_viewport(cmd_buffer,
1158                                           pipeline->depth_clamp_enable);
1159    }
1160
1161    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
1162       gen7_cmd_buffer_emit_scissor(cmd_buffer);
1163
1164    genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
1165
1166    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1167 }
1168
1169 static void
1170 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
1171                              struct anv_bo *bo, uint32_t offset)
1172 {
1173    uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
1174                                  GENX(3DSTATE_VERTEX_BUFFERS));
1175
1176    GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
1177       &(struct GENX(VERTEX_BUFFER_STATE)) {
1178          .VertexBufferIndex = 32, /* Reserved for this */
1179          .AddressModifyEnable = true,
1180          .BufferPitch = 0,
1181 #if (GEN_GEN >= 8)
1182          .MemoryObjectControlState = GENX(MOCS),
1183          .BufferStartingAddress = { bo, offset },
1184          .BufferSize = 8
1185 #else
1186          .VertexBufferMemoryObjectControlState = GENX(MOCS),
1187          .BufferStartingAddress = { bo, offset },
1188          .EndAddress = { bo, offset + 8 },
1189 #endif
1190       });
1191 }
1192
1193 static void
1194 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
1195                           uint32_t base_vertex, uint32_t base_instance)
1196 {
1197    struct anv_state id_state =
1198       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
1199
1200    ((uint32_t *)id_state.map)[0] = base_vertex;
1201    ((uint32_t *)id_state.map)[1] = base_instance;
1202
1203    if (!cmd_buffer->device->info.has_llc)
1204       anv_state_clflush(id_state);
1205
1206    emit_base_vertex_instance_bo(cmd_buffer,
1207       &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
1208 }
1209
1210 void genX(CmdDraw)(
1211     VkCommandBuffer                             commandBuffer,
1212     uint32_t                                    vertexCount,
1213     uint32_t                                    instanceCount,
1214     uint32_t                                    firstVertex,
1215     uint32_t                                    firstInstance)
1216 {
1217    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1218    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1219    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1220
1221    genX(cmd_buffer_flush_state)(cmd_buffer);
1222
1223    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
1224       emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
1225
1226    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
1227       prim.VertexAccessType         = SEQUENTIAL;
1228       prim.PrimitiveTopologyType    = pipeline->topology;
1229       prim.VertexCountPerInstance   = vertexCount;
1230       prim.StartVertexLocation      = firstVertex;
1231       prim.InstanceCount            = instanceCount;
1232       prim.StartInstanceLocation    = firstInstance;
1233       prim.BaseVertexLocation       = 0;
1234    }
1235 }
1236
1237 void genX(CmdDrawIndexed)(
1238     VkCommandBuffer                             commandBuffer,
1239     uint32_t                                    indexCount,
1240     uint32_t                                    instanceCount,
1241     uint32_t                                    firstIndex,
1242     int32_t                                     vertexOffset,
1243     uint32_t                                    firstInstance)
1244 {
1245    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1246    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1247    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1248
1249    genX(cmd_buffer_flush_state)(cmd_buffer);
1250
1251    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
1252       emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
1253
1254    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
1255       prim.VertexAccessType         = RANDOM;
1256       prim.PrimitiveTopologyType    = pipeline->topology;
1257       prim.VertexCountPerInstance   = indexCount;
1258       prim.StartVertexLocation      = firstIndex;
1259       prim.InstanceCount            = instanceCount;
1260       prim.StartInstanceLocation    = firstInstance;
1261       prim.BaseVertexLocation       = vertexOffset;
1262    }
1263 }
1264
1265 /* Auto-Draw / Indirect Registers */
1266 #define GEN7_3DPRIM_END_OFFSET          0x2420
1267 #define GEN7_3DPRIM_START_VERTEX        0x2430
1268 #define GEN7_3DPRIM_VERTEX_COUNT        0x2434
1269 #define GEN7_3DPRIM_INSTANCE_COUNT      0x2438
1270 #define GEN7_3DPRIM_START_INSTANCE      0x243C
1271 #define GEN7_3DPRIM_BASE_VERTEX         0x2440
1272
1273 void genX(CmdDrawIndirect)(
1274     VkCommandBuffer                             commandBuffer,
1275     VkBuffer                                    _buffer,
1276     VkDeviceSize                                offset,
1277     uint32_t                                    drawCount,
1278     uint32_t                                    stride)
1279 {
1280    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1281    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1282    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1283    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1284    struct anv_bo *bo = buffer->bo;
1285    uint32_t bo_offset = buffer->offset + offset;
1286
1287    genX(cmd_buffer_flush_state)(cmd_buffer);
1288
1289    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
1290       emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
1291
1292    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
1293    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
1294    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
1295    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
1296    emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
1297
1298    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
1299       prim.IndirectParameterEnable  = true;
1300       prim.VertexAccessType         = SEQUENTIAL;
1301       prim.PrimitiveTopologyType    = pipeline->topology;
1302    }
1303 }
1304
1305 void genX(CmdDrawIndexedIndirect)(
1306     VkCommandBuffer                             commandBuffer,
1307     VkBuffer                                    _buffer,
1308     VkDeviceSize                                offset,
1309     uint32_t                                    drawCount,
1310     uint32_t                                    stride)
1311 {
1312    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1313    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1314    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1315    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1316    struct anv_bo *bo = buffer->bo;
1317    uint32_t bo_offset = buffer->offset + offset;
1318
1319    genX(cmd_buffer_flush_state)(cmd_buffer);
1320
1321    /* TODO: We need to stomp base vertex to 0 somehow */
1322    if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
1323       emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
1324
1325    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
1326    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
1327    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
1328    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
1329    emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
1330
1331    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
1332       prim.IndirectParameterEnable  = true;
1333       prim.VertexAccessType         = RANDOM;
1334       prim.PrimitiveTopologyType    = pipeline->topology;
1335    }
1336 }
1337
1338 static VkResult
1339 flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
1340 {
1341    struct anv_device *device = cmd_buffer->device;
1342    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1343    struct anv_state surfaces = { 0, }, samplers = { 0, };
1344    VkResult result;
1345
1346    result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces);
1347    if (result != VK_SUCCESS) {
1348       result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
1349       assert(result == VK_SUCCESS);
1350
1351       /* Re-emit state base addresses so we get the new surface state base
1352        * address before we start emitting binding tables etc.
1353        */
1354       genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
1355
1356       result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces);
1357       assert(result == VK_SUCCESS);
1358    }
1359
1360    result = emit_samplers(cmd_buffer, MESA_SHADER_COMPUTE, &samplers);
1361    assert(result == VK_SUCCESS);
1362
1363    const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1364    const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1365
1366    const uint32_t slm_size = encode_slm_size(GEN_GEN, prog_data->total_shared);
1367
1368    struct anv_state state =
1369       anv_state_pool_emit(&device->dynamic_state_pool,
1370                           GENX(INTERFACE_DESCRIPTOR_DATA), 64,
1371                           .KernelStartPointer = pipeline->cs_simd,
1372                           .BindingTablePointer = surfaces.offset,
1373                           .BindingTableEntryCount = 0,
1374                           .SamplerStatePointer = samplers.offset,
1375                           .SamplerCount = 0,
1376 #if !GEN_IS_HASWELL
1377                           .ConstantURBEntryReadOffset = 0,
1378 #endif
1379                           .ConstantURBEntryReadLength =
1380                              cs_prog_data->push.per_thread.regs,
1381 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1382                           .CrossThreadConstantDataReadLength =
1383                              cs_prog_data->push.cross_thread.regs,
1384 #endif
1385                           .BarrierEnable = cs_prog_data->uses_barrier,
1386                           .SharedLocalMemorySize = slm_size,
1387                           .NumberofThreadsinGPGPUThreadGroup =
1388                              cs_prog_data->threads);
1389
1390    uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
1391    anv_batch_emit(&cmd_buffer->batch,
1392                   GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) {
1393       mid.InterfaceDescriptorTotalLength        = size;
1394       mid.InterfaceDescriptorDataStartAddress   = state.offset;
1395    }
1396
1397    return VK_SUCCESS;
1398 }
1399
1400 void
1401 genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
1402 {
1403    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1404    MAYBE_UNUSED VkResult result;
1405
1406    assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
1407
1408    genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config);
1409
1410    genX(flush_pipeline_select_gpgpu)(cmd_buffer);
1411
1412    if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE) {
1413       /* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
1414        *
1415        *    "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
1416        *    the only bits that are changed are scoreboard related: Scoreboard
1417        *    Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
1418        *    these scoreboard related states, a MEDIA_STATE_FLUSH is
1419        *    sufficient."
1420        */
1421       cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
1422       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1423
1424       anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
1425    }
1426
1427    if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
1428        (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
1429       /* FIXME: figure out descriptors for gen7 */
1430       result = flush_compute_descriptor_set(cmd_buffer);
1431       assert(result == VK_SUCCESS);
1432       cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
1433    }
1434
1435    if (cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_COMPUTE_BIT) {
1436       struct anv_state push_state =
1437          anv_cmd_buffer_cs_push_constants(cmd_buffer);
1438
1439       if (push_state.alloc_size) {
1440          anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
1441             curbe.CURBETotalDataLength    = push_state.alloc_size;
1442             curbe.CURBEDataStartAddress   = push_state.offset;
1443          }
1444       }
1445    }
1446
1447    cmd_buffer->state.compute_dirty = 0;
1448
1449    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1450 }
1451
1452 #if GEN_GEN == 7
1453
1454 static bool
1455 verify_cmd_parser(const struct anv_device *device,
1456                   int required_version,
1457                   const char *function)
1458 {
1459    if (device->instance->physicalDevice.cmd_parser_version < required_version) {
1460       vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
1461                 "cmd parser version %d is required for %s",
1462                 required_version, function);
1463       return false;
1464    } else {
1465       return true;
1466    }
1467 }
1468
1469 #endif
1470
1471 void genX(CmdDispatch)(
1472     VkCommandBuffer                             commandBuffer,
1473     uint32_t                                    x,
1474     uint32_t                                    y,
1475     uint32_t                                    z)
1476 {
1477    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1478    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1479    const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
1480
1481    if (prog_data->uses_num_work_groups) {
1482       struct anv_state state =
1483          anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
1484       uint32_t *sizes = state.map;
1485       sizes[0] = x;
1486       sizes[1] = y;
1487       sizes[2] = z;
1488       if (!cmd_buffer->device->info.has_llc)
1489          anv_state_clflush(state);
1490       cmd_buffer->state.num_workgroups_offset = state.offset;
1491       cmd_buffer->state.num_workgroups_bo =
1492          &cmd_buffer->device->dynamic_state_block_pool.bo;
1493    }
1494
1495    genX(cmd_buffer_flush_compute_state)(cmd_buffer);
1496
1497    anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
1498       ggw.SIMDSize                     = prog_data->simd_size / 16;
1499       ggw.ThreadDepthCounterMaximum    = 0;
1500       ggw.ThreadHeightCounterMaximum   = 0;
1501       ggw.ThreadWidthCounterMaximum    = prog_data->threads - 1;
1502       ggw.ThreadGroupIDXDimension      = x;
1503       ggw.ThreadGroupIDYDimension      = y;
1504       ggw.ThreadGroupIDZDimension      = z;
1505       ggw.RightExecutionMask           = pipeline->cs_right_mask;
1506       ggw.BottomExecutionMask          = 0xffffffff;
1507    }
1508
1509    anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
1510 }
1511
1512 #define GPGPU_DISPATCHDIMX 0x2500
1513 #define GPGPU_DISPATCHDIMY 0x2504
1514 #define GPGPU_DISPATCHDIMZ 0x2508
1515
1516 #define MI_PREDICATE_SRC0  0x2400
1517 #define MI_PREDICATE_SRC1  0x2408
1518
1519 void genX(CmdDispatchIndirect)(
1520     VkCommandBuffer                             commandBuffer,
1521     VkBuffer                                    _buffer,
1522     VkDeviceSize                                offset)
1523 {
1524    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1525    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1526    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1527    const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
1528    struct anv_bo *bo = buffer->bo;
1529    uint32_t bo_offset = buffer->offset + offset;
1530    struct anv_batch *batch = &cmd_buffer->batch;
1531
1532 #if GEN_GEN == 7
1533    /* Linux 4.4 added command parser version 5 which allows the GPGPU
1534     * indirect dispatch registers to be written.
1535     */
1536    if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
1537       return;
1538 #endif
1539
1540    if (prog_data->uses_num_work_groups) {
1541       cmd_buffer->state.num_workgroups_offset = bo_offset;
1542       cmd_buffer->state.num_workgroups_bo = bo;
1543    }
1544
1545    genX(cmd_buffer_flush_compute_state)(cmd_buffer);
1546
1547    emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
1548    emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
1549    emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
1550
1551 #if GEN_GEN <= 7
1552    /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
1553    emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
1554    emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
1555    emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
1556
1557    /* Load compute_dispatch_indirect_x_size into SRC0 */
1558    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
1559
1560    /* predicate = (compute_dispatch_indirect_x_size == 0); */
1561    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
1562       mip.LoadOperation    = LOAD_LOAD;
1563       mip.CombineOperation = COMBINE_SET;
1564       mip.CompareOperation = COMPARE_SRCS_EQUAL;
1565    }
1566
1567    /* Load compute_dispatch_indirect_y_size into SRC0 */
1568    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
1569
1570    /* predicate |= (compute_dispatch_indirect_y_size == 0); */
1571    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
1572       mip.LoadOperation    = LOAD_LOAD;
1573       mip.CombineOperation = COMBINE_OR;
1574       mip.CompareOperation = COMPARE_SRCS_EQUAL;
1575    }
1576
1577    /* Load compute_dispatch_indirect_z_size into SRC0 */
1578    emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
1579
1580    /* predicate |= (compute_dispatch_indirect_z_size == 0); */
1581    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
1582       mip.LoadOperation    = LOAD_LOAD;
1583       mip.CombineOperation = COMBINE_OR;
1584       mip.CompareOperation = COMPARE_SRCS_EQUAL;
1585    }
1586
1587    /* predicate = !predicate; */
1588 #define COMPARE_FALSE                           1
1589    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
1590       mip.LoadOperation    = LOAD_LOADINV;
1591       mip.CombineOperation = COMBINE_OR;
1592       mip.CompareOperation = COMPARE_FALSE;
1593    }
1594 #endif
1595
1596    anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
1597       ggw.IndirectParameterEnable      = true;
1598       ggw.PredicateEnable              = GEN_GEN <= 7;
1599       ggw.SIMDSize                     = prog_data->simd_size / 16;
1600       ggw.ThreadDepthCounterMaximum    = 0;
1601       ggw.ThreadHeightCounterMaximum   = 0;
1602       ggw.ThreadWidthCounterMaximum    = prog_data->threads - 1;
1603       ggw.RightExecutionMask           = pipeline->cs_right_mask;
1604       ggw.BottomExecutionMask          = 0xffffffff;
1605    }
1606
1607    anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
1608 }
1609
1610 static void
1611 flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
1612                                       uint32_t pipeline)
1613 {
1614 #if GEN_GEN >= 8 && GEN_GEN < 10
1615    /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
1616     *
1617     *   Software must clear the COLOR_CALC_STATE Valid field in
1618     *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
1619     *   with Pipeline Select set to GPGPU.
1620     *
1621     * The internal hardware docs recommend the same workaround for Gen9
1622     * hardware too.
1623     */
1624    if (pipeline == GPGPU)
1625       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
1626 #elif GEN_GEN <= 7
1627       /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
1628        * PIPELINE_SELECT [DevBWR+]":
1629        *
1630        *   Project: DEVSNB+
1631        *
1632        *   Software must ensure all the write caches are flushed through a
1633        *   stalling PIPE_CONTROL command followed by another PIPE_CONTROL
1634        *   command to invalidate read only caches prior to programming
1635        *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
1636        */
1637       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1638          pc.RenderTargetCacheFlushEnable  = true;
1639          pc.DepthCacheFlushEnable         = true;
1640          pc.DCFlushEnable                 = true;
1641          pc.PostSyncOperation             = NoWrite;
1642          pc.CommandStreamerStallEnable    = true;
1643       }
1644
1645       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1646          pc.TextureCacheInvalidationEnable   = true;
1647          pc.ConstantCacheInvalidationEnable  = true;
1648          pc.StateCacheInvalidationEnable     = true;
1649          pc.InstructionCacheInvalidateEnable = true;
1650          pc.PostSyncOperation                = NoWrite;
1651       }
1652 #endif
1653 }
1654
1655 void
1656 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
1657 {
1658    if (cmd_buffer->state.current_pipeline != _3D) {
1659       flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
1660
1661       anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
1662 #if GEN_GEN >= 9
1663          ps.MaskBits = 3;
1664 #endif
1665          ps.PipelineSelection = _3D;
1666       }
1667
1668       cmd_buffer->state.current_pipeline = _3D;
1669    }
1670 }
1671
1672 void
1673 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
1674 {
1675    if (cmd_buffer->state.current_pipeline != GPGPU) {
1676       flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
1677
1678       anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
1679 #if GEN_GEN >= 9
1680          ps.MaskBits = 3;
1681 #endif
1682          ps.PipelineSelection = GPGPU;
1683       }
1684
1685       cmd_buffer->state.current_pipeline = GPGPU;
1686    }
1687 }
1688
1689 void
1690 genX(cmd_buffer_emit_gen7_depth_flush)(struct anv_cmd_buffer *cmd_buffer)
1691 {
1692    if (GEN_GEN >= 8)
1693       return;
1694
1695    /* From the Haswell PRM, documentation for 3DSTATE_DEPTH_BUFFER:
1696     *
1697     *    "Restriction: Prior to changing Depth/Stencil Buffer state (i.e., any
1698     *    combination of 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS,
1699     *    3DSTATE_STENCIL_BUFFER, 3DSTATE_HIER_DEPTH_BUFFER) SW must first
1700     *    issue a pipelined depth stall (PIPE_CONTROL with Depth Stall bit
1701     *    set), followed by a pipelined depth cache flush (PIPE_CONTROL with
1702     *    Depth Flush Bit set, followed by another pipelined depth stall
1703     *    (PIPE_CONTROL with Depth Stall Bit set), unless SW can otherwise
1704     *    guarantee that the pipeline from WM onwards is already flushed (e.g.,
1705     *    via a preceding MI_FLUSH)."
1706     */
1707    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
1708       pipe.DepthStallEnable = true;
1709    }
1710    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
1711       pipe.DepthCacheFlushEnable = true;
1712    }
1713    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
1714       pipe.DepthStallEnable = true;
1715    }
1716 }
1717
1718 static void
1719 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
1720 {
1721    struct anv_device *device = cmd_buffer->device;
1722    const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1723    const struct anv_image_view *iview =
1724       anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
1725    const struct anv_image *image = iview ? iview->image : NULL;
1726    const bool has_depth = image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
1727    const bool has_hiz = image != NULL && anv_image_has_hiz(image);
1728    const bool has_stencil =
1729       image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
1730
1731    /* FIXME: Implement the PMA stall W/A */
1732    /* FIXME: Width and Height are wrong */
1733
1734    genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer);
1735
1736    /* Emit 3DSTATE_DEPTH_BUFFER */
1737    if (has_depth) {
1738       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
1739          db.SurfaceType                   = SURFTYPE_2D;
1740          db.DepthWriteEnable              = true;
1741          db.StencilWriteEnable            = has_stencil;
1742
1743          if (cmd_buffer->state.pass->subpass_count == 1) {
1744             db.HierarchicalDepthBufferEnable = has_hiz;
1745          } else {
1746             anv_finishme("Multiple-subpass HiZ not implemented");
1747          }
1748
1749          db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
1750                                                       &image->depth_surface.isl);
1751
1752          db.SurfaceBaseAddress = (struct anv_address) {
1753             .bo = image->bo,
1754             .offset = image->offset + image->depth_surface.offset,
1755          };
1756          db.DepthBufferObjectControlState = GENX(MOCS);
1757
1758          db.SurfacePitch         = image->depth_surface.isl.row_pitch - 1;
1759          db.Height               = image->extent.height - 1;
1760          db.Width                = image->extent.width - 1;
1761          db.LOD                  = iview->isl.base_level;
1762          db.Depth                = image->array_size - 1; /* FIXME: 3-D */
1763          db.MinimumArrayElement  = iview->isl.base_array_layer;
1764
1765 #if GEN_GEN >= 8
1766          db.SurfaceQPitch =
1767             isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2;
1768 #endif
1769          db.RenderTargetViewExtent = 1 - 1;
1770       }
1771    } else {
1772       /* Even when no depth buffer is present, the hardware requires that
1773        * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
1774        *
1775        *    If a null depth buffer is bound, the driver must instead bind depth as:
1776        *       3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
1777        *       3DSTATE_DEPTH.Width = 1
1778        *       3DSTATE_DEPTH.Height = 1
1779        *       3DSTATE_DEPTH.SuraceFormat = D16_UNORM
1780        *       3DSTATE_DEPTH.SurfaceBaseAddress = 0
1781        *       3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
1782        *       3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
1783        *       3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
1784        *
1785        * The PRM is wrong, though. The width and height must be programmed to
1786        * actual framebuffer's width and height, even when neither depth buffer
1787        * nor stencil buffer is present.  Also, D16_UNORM is not allowed to
1788        * be combined with a stencil buffer so we use D32_FLOAT instead.
1789        */
1790       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
1791          db.SurfaceType          = SURFTYPE_2D;
1792          db.SurfaceFormat        = D32_FLOAT;
1793          db.Width                = fb->width - 1;
1794          db.Height               = fb->height - 1;
1795          db.StencilWriteEnable   = has_stencil;
1796       }
1797    }
1798
1799    if (has_hiz) {
1800       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hdb) {
1801          hdb.HierarchicalDepthBufferObjectControlState = GENX(MOCS);
1802          hdb.SurfacePitch = image->aux_surface.isl.row_pitch - 1;
1803          hdb.SurfaceBaseAddress = (struct anv_address) {
1804             .bo = image->bo,
1805             .offset = image->offset + image->aux_surface.offset,
1806          };
1807 #if GEN_GEN >= 8
1808          /* From the SKL PRM Vol2a:
1809           *
1810           *    The interpretation of this field is dependent on Surface Type
1811           *    as follows:
1812           *    - SURFTYPE_1D: distance in pixels between array slices
1813           *    - SURFTYPE_2D/CUBE: distance in rows between array slices
1814           *    - SURFTYPE_3D: distance in rows between R - slices
1815           */
1816          hdb.SurfaceQPitch =
1817             image->aux_surface.isl.dim == ISL_SURF_DIM_1D ?
1818                isl_surf_get_array_pitch_el(&image->aux_surface.isl) >> 2 :
1819                isl_surf_get_array_pitch_el_rows(&image->aux_surface.isl) >> 2;
1820 #endif
1821       }
1822    } else {
1823       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hdb);
1824    }
1825
1826    /* Emit 3DSTATE_STENCIL_BUFFER */
1827    if (has_stencil) {
1828       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
1829 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1830          sb.StencilBufferEnable = true;
1831 #endif
1832          sb.StencilBufferObjectControlState = GENX(MOCS);
1833
1834          sb.SurfacePitch = image->stencil_surface.isl.row_pitch - 1;
1835
1836 #if GEN_GEN >= 8
1837          sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2;
1838 #endif
1839          sb.SurfaceBaseAddress = (struct anv_address) {
1840             .bo = image->bo,
1841             .offset = image->offset + image->stencil_surface.offset,
1842          };
1843       }
1844    } else {
1845       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
1846    }
1847
1848    /* From the IVB PRM Vol2P1, 11.5.5.4 3DSTATE_CLEAR_PARAMS:
1849     *
1850     *    3DSTATE_CLEAR_PARAMS must always be programmed in the along with
1851     *    the other Depth/Stencil state commands(i.e. 3DSTATE_DEPTH_BUFFER,
1852     *    3DSTATE_STENCIL_BUFFER, or 3DSTATE_HIER_DEPTH_BUFFER)
1853     *
1854     * Testing also shows that some variant of this restriction may exist HSW+.
1855     * On BDW+, it is not possible to emit 2 of these packets consecutively when
1856     * both have DepthClearValueValid set. An analysis of such state programming
1857     * on SKL showed that the GPU doesn't register the latter packet's clear
1858     * value.
1859     */
1860    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp) {
1861       if (has_hiz) {
1862          cp.DepthClearValueValid = true;
1863          const uint32_t ds =
1864             cmd_buffer->state.subpass->depth_stencil_attachment;
1865          cp.DepthClearValue =
1866             cmd_buffer->state.attachments[ds].clear_value.depthStencil.depth;
1867       }
1868    }
1869 }
1870
1871 static void
1872 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
1873                              struct anv_subpass *subpass)
1874 {
1875    cmd_buffer->state.subpass = subpass;
1876
1877    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
1878
1879    cmd_buffer_emit_depth_stencil(cmd_buffer);
1880    genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_HIZ_RESOLVE);
1881    genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_DEPTH_CLEAR);
1882
1883    anv_cmd_buffer_clear_subpass(cmd_buffer);
1884 }
1885
1886 void genX(CmdBeginRenderPass)(
1887     VkCommandBuffer                             commandBuffer,
1888     const VkRenderPassBeginInfo*                pRenderPassBegin,
1889     VkSubpassContents                           contents)
1890 {
1891    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1892    ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
1893    ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1894
1895    cmd_buffer->state.framebuffer = framebuffer;
1896    cmd_buffer->state.pass = pass;
1897    cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
1898    anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
1899
1900    genX(flush_pipeline_select_3d)(cmd_buffer);
1901
1902    genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
1903 }
1904
1905 void genX(CmdNextSubpass)(
1906     VkCommandBuffer                             commandBuffer,
1907     VkSubpassContents                           contents)
1908 {
1909    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1910
1911    assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1912
1913    anv_cmd_buffer_resolve_subpass(cmd_buffer);
1914    genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
1915 }
1916
1917 void genX(CmdEndRenderPass)(
1918     VkCommandBuffer                             commandBuffer)
1919 {
1920    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1921
1922    genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_DEPTH_RESOLVE);
1923    anv_cmd_buffer_resolve_subpass(cmd_buffer);
1924
1925 #ifndef NDEBUG
1926    anv_dump_add_framebuffer(cmd_buffer, cmd_buffer->state.framebuffer);
1927 #endif
1928 }
1929
1930 static void
1931 emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
1932                     struct anv_bo *bo, uint32_t offset)
1933 {
1934    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1935       pc.DestinationAddressType  = DAT_PPGTT;
1936       pc.PostSyncOperation       = WritePSDepthCount;
1937       pc.DepthStallEnable        = true;
1938       pc.Address                 = (struct anv_address) { bo, offset };
1939
1940       if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
1941          pc.CommandStreamerStallEnable = true;
1942    }
1943 }
1944
1945 static void
1946 emit_query_availability(struct anv_cmd_buffer *cmd_buffer,
1947                         struct anv_bo *bo, uint32_t offset)
1948 {
1949    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1950       pc.DestinationAddressType  = DAT_PPGTT;
1951       pc.PostSyncOperation       = WriteImmediateData;
1952       pc.Address                 = (struct anv_address) { bo, offset };
1953       pc.ImmediateData           = 1;
1954    }
1955 }
1956
1957 void genX(CmdBeginQuery)(
1958     VkCommandBuffer                             commandBuffer,
1959     VkQueryPool                                 queryPool,
1960     uint32_t                                    query,
1961     VkQueryControlFlags                         flags)
1962 {
1963    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1964    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1965
1966    /* Workaround: When meta uses the pipeline with the VS disabled, it seems
1967     * that the pipelining of the depth write breaks. What we see is that
1968     * samples from the render pass clear leaks into the first query
1969     * immediately after the clear. Doing a pipecontrol with a post-sync
1970     * operation and DepthStallEnable seems to work around the issue.
1971     */
1972    if (cmd_buffer->state.need_query_wa) {
1973       cmd_buffer->state.need_query_wa = false;
1974       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1975          pc.DepthCacheFlushEnable   = true;
1976          pc.DepthStallEnable        = true;
1977       }
1978    }
1979
1980    switch (pool->type) {
1981    case VK_QUERY_TYPE_OCCLUSION:
1982       emit_ps_depth_count(cmd_buffer, &pool->bo,
1983                           query * sizeof(struct anv_query_pool_slot));
1984       break;
1985
1986    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1987    default:
1988       unreachable("");
1989    }
1990 }
1991
1992 void genX(CmdEndQuery)(
1993     VkCommandBuffer                             commandBuffer,
1994     VkQueryPool                                 queryPool,
1995     uint32_t                                    query)
1996 {
1997    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1998    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1999
2000    switch (pool->type) {
2001    case VK_QUERY_TYPE_OCCLUSION:
2002       emit_ps_depth_count(cmd_buffer, &pool->bo,
2003                           query * sizeof(struct anv_query_pool_slot) + 8);
2004
2005       emit_query_availability(cmd_buffer, &pool->bo,
2006                               query * sizeof(struct anv_query_pool_slot) + 16);
2007       break;
2008
2009    case VK_QUERY_TYPE_PIPELINE_STATISTICS:
2010    default:
2011       unreachable("");
2012    }
2013 }
2014
2015 #define TIMESTAMP 0x2358
2016
2017 void genX(CmdWriteTimestamp)(
2018     VkCommandBuffer                             commandBuffer,
2019     VkPipelineStageFlagBits                     pipelineStage,
2020     VkQueryPool                                 queryPool,
2021     uint32_t                                    query)
2022 {
2023    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2024    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
2025    uint32_t offset = query * sizeof(struct anv_query_pool_slot);
2026
2027    assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
2028
2029    switch (pipelineStage) {
2030    case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
2031       anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
2032          srm.RegisterAddress  = TIMESTAMP;
2033          srm.MemoryAddress    = (struct anv_address) { &pool->bo, offset };
2034       }
2035       anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
2036          srm.RegisterAddress  = TIMESTAMP + 4;
2037          srm.MemoryAddress    = (struct anv_address) { &pool->bo, offset + 4 };
2038       }
2039       break;
2040
2041    default:
2042       /* Everything else is bottom-of-pipe */
2043       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2044          pc.DestinationAddressType  = DAT_PPGTT;
2045          pc.PostSyncOperation       = WriteTimestamp;
2046          pc.Address = (struct anv_address) { &pool->bo, offset };
2047
2048          if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
2049             pc.CommandStreamerStallEnable = true;
2050       }
2051       break;
2052    }
2053
2054    emit_query_availability(cmd_buffer, &pool->bo, query + 16);
2055 }
2056
2057 #if GEN_GEN > 7 || GEN_IS_HASWELL
2058
2059 #define alu_opcode(v)   __gen_uint((v),  20, 31)
2060 #define alu_operand1(v) __gen_uint((v),  10, 19)
2061 #define alu_operand2(v) __gen_uint((v),   0,  9)
2062 #define alu(opcode, operand1, operand2) \
2063    alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
2064
2065 #define OPCODE_NOOP      0x000
2066 #define OPCODE_LOAD      0x080
2067 #define OPCODE_LOADINV   0x480
2068 #define OPCODE_LOAD0     0x081
2069 #define OPCODE_LOAD1     0x481
2070 #define OPCODE_ADD       0x100
2071 #define OPCODE_SUB       0x101
2072 #define OPCODE_AND       0x102
2073 #define OPCODE_OR        0x103
2074 #define OPCODE_XOR       0x104
2075 #define OPCODE_STORE     0x180
2076 #define OPCODE_STOREINV  0x580
2077
2078 #define OPERAND_R0   0x00
2079 #define OPERAND_R1   0x01
2080 #define OPERAND_R2   0x02
2081 #define OPERAND_R3   0x03
2082 #define OPERAND_R4   0x04
2083 #define OPERAND_SRCA 0x20
2084 #define OPERAND_SRCB 0x21
2085 #define OPERAND_ACCU 0x31
2086 #define OPERAND_ZF   0x32
2087 #define OPERAND_CF   0x33
2088
2089 #define CS_GPR(n) (0x2600 + (n) * 8)
2090
2091 static void
2092 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
2093                       struct anv_bo *bo, uint32_t offset)
2094 {
2095    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
2096       lrm.RegisterAddress  = reg,
2097       lrm.MemoryAddress    = (struct anv_address) { bo, offset };
2098    }
2099    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
2100       lrm.RegisterAddress  = reg + 4;
2101       lrm.MemoryAddress    = (struct anv_address) { bo, offset + 4 };
2102    }
2103 }
2104
2105 static void
2106 store_query_result(struct anv_batch *batch, uint32_t reg,
2107                    struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
2108 {
2109    anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
2110       srm.RegisterAddress  = reg;
2111       srm.MemoryAddress    = (struct anv_address) { bo, offset };
2112    }
2113
2114    if (flags & VK_QUERY_RESULT_64_BIT) {
2115       anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
2116          srm.RegisterAddress  = reg + 4;
2117          srm.MemoryAddress    = (struct anv_address) { bo, offset + 4 };
2118       }
2119    }
2120 }
2121
2122 void genX(CmdCopyQueryPoolResults)(
2123     VkCommandBuffer                             commandBuffer,
2124     VkQueryPool                                 queryPool,
2125     uint32_t                                    firstQuery,
2126     uint32_t                                    queryCount,
2127     VkBuffer                                    destBuffer,
2128     VkDeviceSize                                destOffset,
2129     VkDeviceSize                                destStride,
2130     VkQueryResultFlags                          flags)
2131 {
2132    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2133    ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
2134    ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
2135    uint32_t slot_offset, dst_offset;
2136
2137    if (flags & VK_QUERY_RESULT_WAIT_BIT) {
2138       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2139          pc.CommandStreamerStallEnable = true;
2140          pc.StallAtPixelScoreboard     = true;
2141       }
2142    }
2143
2144    dst_offset = buffer->offset + destOffset;
2145    for (uint32_t i = 0; i < queryCount; i++) {
2146
2147       slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
2148       switch (pool->type) {
2149       case VK_QUERY_TYPE_OCCLUSION:
2150          emit_load_alu_reg_u64(&cmd_buffer->batch,
2151                                CS_GPR(0), &pool->bo, slot_offset);
2152          emit_load_alu_reg_u64(&cmd_buffer->batch,
2153                                CS_GPR(1), &pool->bo, slot_offset + 8);
2154
2155          /* FIXME: We need to clamp the result for 32 bit. */
2156
2157          uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
2158          dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
2159          dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
2160          dw[3] = alu(OPCODE_SUB, 0, 0);
2161          dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
2162          break;
2163
2164       case VK_QUERY_TYPE_TIMESTAMP:
2165          emit_load_alu_reg_u64(&cmd_buffer->batch,
2166                                CS_GPR(2), &pool->bo, slot_offset);
2167          break;
2168
2169       default:
2170          unreachable("unhandled query type");
2171       }
2172
2173       store_query_result(&cmd_buffer->batch,
2174                          CS_GPR(2), buffer->bo, dst_offset, flags);
2175
2176       if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
2177          emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
2178                                &pool->bo, slot_offset + 16);
2179          if (flags & VK_QUERY_RESULT_64_BIT)
2180             store_query_result(&cmd_buffer->batch,
2181                                CS_GPR(0), buffer->bo, dst_offset + 8, flags);
2182          else
2183             store_query_result(&cmd_buffer->batch,
2184                                CS_GPR(0), buffer->bo, dst_offset + 4, flags);
2185       }
2186
2187       dst_offset += destStride;
2188    }
2189 }
2190
2191 #else
2192 void genX(CmdCopyQueryPoolResults)(
2193     VkCommandBuffer                             commandBuffer,
2194     VkQueryPool                                 queryPool,
2195     uint32_t                                    firstQuery,
2196     uint32_t                                    queryCount,
2197     VkBuffer                                    destBuffer,
2198     VkDeviceSize                                destOffset,
2199     VkDeviceSize                                destStride,
2200     VkQueryResultFlags                          flags)
2201 {
2202    anv_finishme("Queries not yet supported on Ivy Bridge");
2203 }
2204 #endif