2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "anv_private.h"
29 #include "common/gen_l3_config.h"
30 #include "genxml/gen_macros.h"
31 #include "genxml/genX_pack.h"
34 emit_lrm(struct anv_batch *batch,
35 uint32_t reg, struct anv_bo *bo, uint32_t offset)
37 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
38 lrm.RegisterAddress = reg;
39 lrm.MemoryAddress = (struct anv_address) { bo, offset };
44 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
46 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
47 lri.RegisterOffset = reg;
53 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
55 struct anv_device *device = cmd_buffer->device;
57 /* XXX: Do we need this on more than just BDW? */
59 /* Emit a render target cache flush.
61 * This isn't documented anywhere in the PRM. However, it seems to be
62 * necessary prior to changing the surface state base adress. Without
63 * this, we get GPU hangs when using multi-level command buffers which
64 * clear depth, reset state base address, and then go render stuff.
66 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
67 pc.RenderTargetCacheFlushEnable = true;
71 anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
72 sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
73 sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
74 sba.GeneralStateBaseAddressModifyEnable = true;
76 sba.SurfaceStateBaseAddress =
77 anv_cmd_buffer_surface_base_address(cmd_buffer);
78 sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
79 sba.SurfaceStateBaseAddressModifyEnable = true;
81 sba.DynamicStateBaseAddress =
82 (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
83 sba.DynamicStateMemoryObjectControlState = GENX(MOCS);
84 sba.DynamicStateBaseAddressModifyEnable = true;
86 sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
87 sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
88 sba.IndirectObjectBaseAddressModifyEnable = true;
90 sba.InstructionBaseAddress =
91 (struct anv_address) { &device->instruction_block_pool.bo, 0 };
92 sba.InstructionMemoryObjectControlState = GENX(MOCS);
93 sba.InstructionBaseAddressModifyEnable = true;
96 /* Broadwell requires that we specify a buffer size for a bunch of
97 * these fields. However, since we will be growing the BO's live, we
98 * just set them all to the maximum.
100 sba.GeneralStateBufferSize = 0xfffff;
101 sba.GeneralStateBufferSizeModifyEnable = true;
102 sba.DynamicStateBufferSize = 0xfffff;
103 sba.DynamicStateBufferSizeModifyEnable = true;
104 sba.IndirectObjectBufferSize = 0xfffff;
105 sba.IndirectObjectBufferSizeModifyEnable = true;
106 sba.InstructionBufferSize = 0xfffff;
107 sba.InstructionBuffersizeModifyEnable = true;
111 /* After re-setting the surface state base address, we have to do some
112 * cache flusing so that the sampler engine will pick up the new
113 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
114 * Shared Function > 3D Sampler > State > State Caching (page 96):
116 * Coherency with system memory in the state cache, like the texture
117 * cache is handled partially by software. It is expected that the
118 * command stream or shader will issue Cache Flush operation or
119 * Cache_Flush sampler message to ensure that the L1 cache remains
120 * coherent with system memory.
124 * Whenever the value of the Dynamic_State_Base_Addr,
125 * Surface_State_Base_Addr are altered, the L1 state cache must be
126 * invalidated to ensure the new surface or sampler state is fetched
127 * from system memory.
129 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
130 * which, according the PIPE_CONTROL instruction documentation in the
133 * Setting this bit is independent of any other bit in this packet.
134 * This bit controls the invalidation of the L1 and L2 state caches
135 * at the top of the pipe i.e. at the parsing time.
137 * Unfortunately, experimentation seems to indicate that state cache
138 * invalidation through a PIPE_CONTROL does nothing whatsoever in
139 * regards to surface state and binding tables. In stead, it seems that
140 * invalidating the texture cache is what is actually needed.
142 * XXX: As far as we have been able to determine through
143 * experimentation, shows that flush the texture cache appears to be
144 * sufficient. The theory here is that all of the sampling/rendering
145 * units cache the binding table in the texture cache. However, we have
146 * yet to be able to actually confirm this.
148 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
149 pc.TextureCacheInvalidationEnable = true;
154 genX(BeginCommandBuffer)(
155 VkCommandBuffer commandBuffer,
156 const VkCommandBufferBeginInfo* pBeginInfo)
158 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
160 /* If this is the first vkBeginCommandBuffer, we must *initialize* the
161 * command buffer's state. Otherwise, we must *reset* its state. In both
164 * From the Vulkan 1.0 spec:
166 * If a command buffer is in the executable state and the command buffer
167 * was allocated from a command pool with the
168 * VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
169 * vkBeginCommandBuffer implicitly resets the command buffer, behaving
170 * as if vkResetCommandBuffer had been called with
171 * VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
172 * the command buffer in the recording state.
174 anv_cmd_buffer_reset(cmd_buffer);
176 cmd_buffer->usage_flags = pBeginInfo->flags;
178 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY ||
179 !(cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT));
181 genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
183 if (cmd_buffer->usage_flags &
184 VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
185 cmd_buffer->state.framebuffer =
186 anv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
187 cmd_buffer->state.pass =
188 anv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
189 cmd_buffer->state.subpass =
190 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
192 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
199 genX(EndCommandBuffer)(
200 VkCommandBuffer commandBuffer)
202 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
204 anv_cmd_buffer_end_batch_buffer(cmd_buffer);
210 genX(CmdExecuteCommands)(
211 VkCommandBuffer commandBuffer,
212 uint32_t commandBufferCount,
213 const VkCommandBuffer* pCmdBuffers)
215 ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
217 assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
219 for (uint32_t i = 0; i < commandBufferCount; i++) {
220 ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
222 assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
224 anv_cmd_buffer_add_secondary(primary, secondary);
227 /* Each of the secondary command buffers will use its own state base
228 * address. We need to re-emit state base address for the primary after
229 * all of the secondaries are done.
231 * TODO: Maybe we want to make this a dirty bit to avoid extra state base
234 genX(cmd_buffer_emit_state_base_address)(primary);
237 #define IVB_L3SQCREG1_SQGHPCI_DEFAULT 0x00730000
238 #define VLV_L3SQCREG1_SQGHPCI_DEFAULT 0x00d30000
239 #define HSW_L3SQCREG1_SQGHPCI_DEFAULT 0x00610000
242 * Program the hardware to use the specified L3 configuration.
245 genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
246 const struct gen_l3_config *cfg)
249 if (cfg == cmd_buffer->state.current_l3_config)
252 if (unlikely(INTEL_DEBUG & DEBUG_L3)) {
253 fprintf(stderr, "L3 config transition: ");
254 gen_dump_l3_config(cfg, stderr);
257 const bool has_slm = cfg->n[GEN_L3P_SLM];
259 /* According to the hardware docs, the L3 partitioning can only be changed
260 * while the pipeline is completely drained and the caches are flushed,
261 * which involves a first PIPE_CONTROL flush which stalls the pipeline...
263 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
264 pc.DCFlushEnable = true;
265 pc.PostSyncOperation = NoWrite;
266 pc.CommandStreamerStallEnable = true;
269 /* ...followed by a second pipelined PIPE_CONTROL that initiates
270 * invalidation of the relevant caches. Note that because RO invalidation
271 * happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL
272 * command is processed by the CS) we cannot combine it with the previous
273 * stalling flush as the hardware documentation suggests, because that
274 * would cause the CS to stall on previous rendering *after* RO
275 * invalidation and wouldn't prevent the RO caches from being polluted by
276 * concurrent rendering before the stall completes. This intentionally
277 * doesn't implement the SKL+ hardware workaround suggesting to enable CS
278 * stall on PIPE_CONTROLs with the texture cache invalidation bit set for
279 * GPGPU workloads because the previous and subsequent PIPE_CONTROLs
280 * already guarantee that there is no concurrent GPGPU kernel execution
281 * (see SKL HSD 2132585).
283 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
284 pc.TextureCacheInvalidationEnable = true;
285 pc.ConstantCacheInvalidationEnable = true;
286 pc.InstructionCacheInvalidateEnable = true;
287 pc.StateCacheInvalidationEnable = true;
288 pc.PostSyncOperation = NoWrite;
291 /* Now send a third stalling flush to make sure that invalidation is
292 * complete when the L3 configuration registers are modified.
294 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
295 pc.DCFlushEnable = true;
296 pc.PostSyncOperation = NoWrite;
297 pc.CommandStreamerStallEnable = true;
302 assert(!cfg->n[GEN_L3P_IS] && !cfg->n[GEN_L3P_C] && !cfg->n[GEN_L3P_T]);
305 anv_pack_struct(&l3cr, GENX(L3CNTLREG),
306 .SLMEnable = has_slm,
307 .URBAllocation = cfg->n[GEN_L3P_URB],
308 .ROAllocation = cfg->n[GEN_L3P_RO],
309 .DCAllocation = cfg->n[GEN_L3P_DC],
310 .AllAllocation = cfg->n[GEN_L3P_ALL]);
312 /* Set up the L3 partitioning. */
313 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG_num), l3cr);
317 const bool has_dc = cfg->n[GEN_L3P_DC] || cfg->n[GEN_L3P_ALL];
318 const bool has_is = cfg->n[GEN_L3P_IS] || cfg->n[GEN_L3P_RO] ||
320 const bool has_c = cfg->n[GEN_L3P_C] || cfg->n[GEN_L3P_RO] ||
322 const bool has_t = cfg->n[GEN_L3P_T] || cfg->n[GEN_L3P_RO] ||
325 assert(!cfg->n[GEN_L3P_ALL]);
327 /* When enabled SLM only uses a portion of the L3 on half of the banks,
328 * the matching space on the remaining banks has to be allocated to a
329 * client (URB for all validated configurations) set to the
330 * lower-bandwidth 2-bank address hashing mode.
332 const struct gen_device_info *devinfo = &cmd_buffer->device->info;
333 const bool urb_low_bw = has_slm && !devinfo->is_baytrail;
334 assert(!urb_low_bw || cfg->n[GEN_L3P_URB] == cfg->n[GEN_L3P_SLM]);
336 /* Minimum number of ways that can be allocated to the URB. */
337 const unsigned n0_urb = (devinfo->is_baytrail ? 32 : 0);
338 assert(cfg->n[GEN_L3P_URB] >= n0_urb);
340 uint32_t l3sqcr1, l3cr2, l3cr3;
341 anv_pack_struct(&l3sqcr1, GENX(L3SQCREG1),
342 .ConvertDC_UC = !has_dc,
343 .ConvertIS_UC = !has_is,
344 .ConvertC_UC = !has_c,
345 .ConvertT_UC = !has_t);
347 GEN_IS_HASWELL ? HSW_L3SQCREG1_SQGHPCI_DEFAULT :
348 devinfo->is_baytrail ? VLV_L3SQCREG1_SQGHPCI_DEFAULT :
349 IVB_L3SQCREG1_SQGHPCI_DEFAULT;
351 anv_pack_struct(&l3cr2, GENX(L3CNTLREG2),
352 .SLMEnable = has_slm,
353 .URBLowBandwidth = urb_low_bw,
354 .URBAllocation = cfg->n[GEN_L3P_URB],
356 .ALLAllocation = cfg->n[GEN_L3P_ALL],
358 .ROAllocation = cfg->n[GEN_L3P_RO],
359 .DCAllocation = cfg->n[GEN_L3P_DC]);
361 anv_pack_struct(&l3cr3, GENX(L3CNTLREG3),
362 .ISAllocation = cfg->n[GEN_L3P_IS],
364 .CAllocation = cfg->n[GEN_L3P_C],
366 .TAllocation = cfg->n[GEN_L3P_T],
369 /* Set up the L3 partitioning. */
370 emit_lri(&cmd_buffer->batch, GENX(L3SQCREG1_num), l3sqcr1);
371 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG2_num), l3cr2);
372 emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3_num), l3cr3);
375 if (cmd_buffer->device->instance->physicalDevice.cmd_parser_version >= 4) {
376 /* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
377 * them disabled to avoid crashing the system hard.
379 uint32_t scratch1, chicken3;
380 anv_pack_struct(&scratch1, GENX(SCRATCH1),
381 .L3AtomicDisable = !has_dc);
382 anv_pack_struct(&chicken3, GENX(CHICKEN3),
383 .L3AtomicDisableMask = true,
384 .L3AtomicDisable = !has_dc);
385 emit_lri(&cmd_buffer->batch, GENX(SCRATCH1_num), scratch1);
386 emit_lri(&cmd_buffer->batch, GENX(CHICKEN3_num), chicken3);
392 cmd_buffer->state.current_l3_config = cfg;
396 genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
398 enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
400 /* Flushes are pipelined while invalidations are handled immediately.
401 * Therefore, if we're flushing anything then we need to schedule a stall
402 * before any invalidations can happen.
404 if (bits & ANV_PIPE_FLUSH_BITS)
405 bits |= ANV_PIPE_NEEDS_CS_STALL_BIT;
407 /* If we're going to do an invalidate and we have a pending CS stall that
408 * has yet to be resolved, we do the CS stall now.
410 if ((bits & ANV_PIPE_INVALIDATE_BITS) &&
411 (bits & ANV_PIPE_NEEDS_CS_STALL_BIT)) {
412 bits |= ANV_PIPE_CS_STALL_BIT;
413 bits &= ~ANV_PIPE_NEEDS_CS_STALL_BIT;
416 if (bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT)) {
417 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
418 pipe.DepthCacheFlushEnable = bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
419 pipe.DCFlushEnable = bits & ANV_PIPE_DATA_CACHE_FLUSH_BIT;
420 pipe.RenderTargetCacheFlushEnable =
421 bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
423 pipe.DepthStallEnable = bits & ANV_PIPE_DEPTH_STALL_BIT;
424 pipe.CommandStreamerStallEnable = bits & ANV_PIPE_CS_STALL_BIT;
425 pipe.StallAtPixelScoreboard = bits & ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
428 * According to the Broadwell documentation, any PIPE_CONTROL with the
429 * "Command Streamer Stall" bit set must also have another bit set,
430 * with five different options:
432 * - Render Target Cache Flush
433 * - Depth Cache Flush
434 * - Stall at Pixel Scoreboard
435 * - Post-Sync Operation
439 * I chose "Stall at Pixel Scoreboard" since that's what we use in
440 * mesa and it seems to work fine. The choice is fairly arbitrary.
442 if ((bits & ANV_PIPE_CS_STALL_BIT) &&
443 !(bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_DEPTH_STALL_BIT |
444 ANV_PIPE_STALL_AT_SCOREBOARD_BIT)))
445 pipe.StallAtPixelScoreboard = true;
448 bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT);
451 if (bits & ANV_PIPE_INVALIDATE_BITS) {
452 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
453 pipe.StateCacheInvalidationEnable =
454 bits & ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
455 pipe.ConstantCacheInvalidationEnable =
456 bits & ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
457 pipe.VFCacheInvalidationEnable =
458 bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
459 pipe.TextureCacheInvalidationEnable =
460 bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
461 pipe.InstructionCacheInvalidateEnable =
462 bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT;
465 bits &= ~ANV_PIPE_INVALIDATE_BITS;
468 cmd_buffer->state.pending_pipe_bits = bits;
471 void genX(CmdPipelineBarrier)(
472 VkCommandBuffer commandBuffer,
473 VkPipelineStageFlags srcStageMask,
474 VkPipelineStageFlags destStageMask,
476 uint32_t memoryBarrierCount,
477 const VkMemoryBarrier* pMemoryBarriers,
478 uint32_t bufferMemoryBarrierCount,
479 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
480 uint32_t imageMemoryBarrierCount,
481 const VkImageMemoryBarrier* pImageMemoryBarriers)
483 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
486 /* XXX: Right now, we're really dumb and just flush whatever categories
487 * the app asks for. One of these days we may make this a bit better
488 * but right now that's all the hardware allows for in most areas.
490 VkAccessFlags src_flags = 0;
491 VkAccessFlags dst_flags = 0;
493 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
494 src_flags |= pMemoryBarriers[i].srcAccessMask;
495 dst_flags |= pMemoryBarriers[i].dstAccessMask;
498 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
499 src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
500 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
503 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
504 src_flags |= pImageMemoryBarriers[i].srcAccessMask;
505 dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
508 enum anv_pipe_bits pipe_bits = 0;
510 for_each_bit(b, src_flags) {
511 switch ((VkAccessFlagBits)(1 << b)) {
512 case VK_ACCESS_SHADER_WRITE_BIT:
513 pipe_bits |= ANV_PIPE_DATA_CACHE_FLUSH_BIT;
515 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
516 pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
518 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
519 pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
521 case VK_ACCESS_TRANSFER_WRITE_BIT:
522 pipe_bits |= ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
523 pipe_bits |= ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
526 break; /* Nothing to do */
530 for_each_bit(b, dst_flags) {
531 switch ((VkAccessFlagBits)(1 << b)) {
532 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
533 case VK_ACCESS_INDEX_READ_BIT:
534 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
535 pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
537 case VK_ACCESS_UNIFORM_READ_BIT:
538 pipe_bits |= ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
539 pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
541 case VK_ACCESS_SHADER_READ_BIT:
542 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
543 case VK_ACCESS_TRANSFER_READ_BIT:
544 pipe_bits |= ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
547 break; /* Nothing to do */
551 cmd_buffer->state.pending_pipe_bits |= pipe_bits;
555 cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
557 VkShaderStageFlags stages = cmd_buffer->state.pipeline->active_stages;
559 /* In order to avoid thrash, we assume that vertex and fragment stages
560 * always exist. In the rare case where one is missing *and* the other
561 * uses push concstants, this may be suboptimal. However, avoiding stalls
562 * seems more important.
564 stages |= VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT;
566 if (stages == cmd_buffer->state.push_constant_stages)
570 const unsigned push_constant_kb = 32;
572 const unsigned push_constant_kb = cmd_buffer->device->info.gt == 3 ? 32 : 16;
574 const unsigned push_constant_kb = 16;
577 const unsigned num_stages =
578 _mesa_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
579 unsigned size_per_stage = push_constant_kb / num_stages;
581 /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
582 * units of 2KB. Incidentally, these are the same platforms that have
583 * 32KB worth of push constant space.
585 if (push_constant_kb == 32)
586 size_per_stage &= ~1u;
588 uint32_t kb_used = 0;
589 for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
590 unsigned push_size = (stages & (1 << i)) ? size_per_stage : 0;
591 anv_batch_emit(&cmd_buffer->batch,
592 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
593 alloc._3DCommandSubOpcode = 18 + i;
594 alloc.ConstantBufferOffset = (push_size > 0) ? kb_used : 0;
595 alloc.ConstantBufferSize = push_size;
597 kb_used += push_size;
600 anv_batch_emit(&cmd_buffer->batch,
601 GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), alloc) {
602 alloc.ConstantBufferOffset = kb_used;
603 alloc.ConstantBufferSize = push_constant_kb - kb_used;
606 cmd_buffer->state.push_constant_stages = stages;
608 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
610 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
611 * the next 3DPRIMITIVE command after programming the
612 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
614 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
615 * pipeline setup, we need to dirty push constants.
617 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
621 add_surface_state_reloc(struct anv_cmd_buffer *cmd_buffer,
622 struct anv_state state, struct anv_bo *bo,
625 /* The address goes in SURFACE_STATE dword 1 for gens < 8 and dwords 8 and
626 * 9 for gen8+. We only write the first dword for gen8+ here and rely on
627 * the initial state to set the high bits to 0. */
629 const uint32_t dword = GEN_GEN < 8 ? 1 : 8;
631 anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
632 state.offset + dword * 4, bo, offset);
635 static struct anv_state
636 alloc_null_surface_state(struct anv_cmd_buffer *cmd_buffer,
637 struct anv_framebuffer *fb)
639 struct anv_state state =
640 anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
642 struct GENX(RENDER_SURFACE_STATE) null_ss = {
643 .SurfaceType = SURFTYPE_NULL,
644 .SurfaceArray = fb->layers > 0,
645 .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
649 .TiledSurface = true,
651 .Width = fb->width - 1,
652 .Height = fb->height - 1,
653 .Depth = fb->layers - 1,
654 .RenderTargetViewExtent = fb->layers - 1,
657 GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
659 if (!cmd_buffer->device->info.has_llc)
660 anv_state_clflush(state);
667 emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
668 gl_shader_stage stage,
669 struct anv_state *bt_state)
671 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
672 struct anv_subpass *subpass = cmd_buffer->state.subpass;
673 struct anv_pipeline *pipeline;
674 uint32_t bias, state_offset;
677 case MESA_SHADER_COMPUTE:
678 pipeline = cmd_buffer->state.compute_pipeline;
682 pipeline = cmd_buffer->state.pipeline;
687 if (!anv_pipeline_has_stage(pipeline, stage)) {
688 *bt_state = (struct anv_state) { 0, };
692 struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
693 if (bias + map->surface_count == 0) {
694 *bt_state = (struct anv_state) { 0, };
698 *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
699 bias + map->surface_count,
701 uint32_t *bt_map = bt_state->map;
703 if (bt_state->map == NULL)
704 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
706 if (stage == MESA_SHADER_COMPUTE &&
707 get_cs_prog_data(cmd_buffer->state.compute_pipeline)->uses_num_work_groups) {
708 struct anv_bo *bo = cmd_buffer->state.num_workgroups_bo;
709 uint32_t bo_offset = cmd_buffer->state.num_workgroups_offset;
711 struct anv_state surface_state;
713 anv_cmd_buffer_alloc_surface_state(cmd_buffer);
715 const enum isl_format format =
716 anv_isl_format_for_descriptor_type(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
717 anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
718 format, bo_offset, 12, 1);
720 bt_map[0] = surface_state.offset + state_offset;
721 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
724 if (map->surface_count == 0)
727 if (map->image_count > 0) {
729 anv_cmd_buffer_ensure_push_constant_field(cmd_buffer, stage, images);
730 if (result != VK_SUCCESS)
733 cmd_buffer->state.push_constants_dirty |= 1 << stage;
737 for (uint32_t s = 0; s < map->surface_count; s++) {
738 struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
740 struct anv_state surface_state;
744 if (binding->set == ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS) {
745 /* Color attachment binding */
746 assert(stage == MESA_SHADER_FRAGMENT);
747 assert(binding->binding == 0);
748 if (binding->index < subpass->color_count) {
749 const struct anv_image_view *iview =
750 fb->attachments[subpass->color_attachments[binding->index]];
752 assert(iview->color_rt_surface_state.alloc_size);
753 surface_state = iview->color_rt_surface_state;
754 add_surface_state_reloc(cmd_buffer, iview->color_rt_surface_state,
755 iview->bo, iview->offset);
757 /* Null render target */
758 struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
759 surface_state = alloc_null_surface_state(cmd_buffer, fb);
762 bt_map[bias + s] = surface_state.offset + state_offset;
766 struct anv_descriptor_set *set =
767 cmd_buffer->state.descriptors[binding->set];
768 uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
769 struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
771 switch (desc->type) {
772 case VK_DESCRIPTOR_TYPE_SAMPLER:
773 /* Nothing for us to do here */
776 case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
777 case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE:
778 case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
779 surface_state = desc->image_view->sampler_surface_state;
780 assert(surface_state.alloc_size);
781 bo = desc->image_view->bo;
782 bo_offset = desc->image_view->offset;
785 case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
786 surface_state = desc->image_view->storage_surface_state;
787 assert(surface_state.alloc_size);
788 bo = desc->image_view->bo;
789 bo_offset = desc->image_view->offset;
791 struct brw_image_param *image_param =
792 &cmd_buffer->state.push_constants[stage]->images[image++];
794 *image_param = desc->image_view->storage_image_param;
795 image_param->surface_idx = bias + s;
799 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
800 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
801 case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
802 case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
803 case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
804 surface_state = desc->buffer_view->surface_state;
805 assert(surface_state.alloc_size);
806 bo = desc->buffer_view->bo;
807 bo_offset = desc->buffer_view->offset;
810 case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
811 surface_state = desc->buffer_view->storage_surface_state;
812 assert(surface_state.alloc_size);
813 bo = desc->buffer_view->bo;
814 bo_offset = desc->buffer_view->offset;
816 struct brw_image_param *image_param =
817 &cmd_buffer->state.push_constants[stage]->images[image++];
819 *image_param = desc->buffer_view->storage_image_param;
820 image_param->surface_idx = bias + s;
824 assert(!"Invalid descriptor type");
828 bt_map[bias + s] = surface_state.offset + state_offset;
829 add_surface_state_reloc(cmd_buffer, surface_state, bo, bo_offset);
831 assert(image == map->image_count);
834 if (!cmd_buffer->device->info.has_llc)
835 anv_state_clflush(*bt_state);
841 emit_samplers(struct anv_cmd_buffer *cmd_buffer,
842 gl_shader_stage stage,
843 struct anv_state *state)
845 struct anv_pipeline *pipeline;
847 if (stage == MESA_SHADER_COMPUTE)
848 pipeline = cmd_buffer->state.compute_pipeline;
850 pipeline = cmd_buffer->state.pipeline;
852 if (!anv_pipeline_has_stage(pipeline, stage)) {
853 *state = (struct anv_state) { 0, };
857 struct anv_pipeline_bind_map *map = &pipeline->shaders[stage]->bind_map;
858 if (map->sampler_count == 0) {
859 *state = (struct anv_state) { 0, };
863 uint32_t size = map->sampler_count * 16;
864 *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
866 if (state->map == NULL)
867 return VK_ERROR_OUT_OF_DEVICE_MEMORY;
869 for (uint32_t s = 0; s < map->sampler_count; s++) {
870 struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
871 struct anv_descriptor_set *set =
872 cmd_buffer->state.descriptors[binding->set];
873 uint32_t offset = set->layout->binding[binding->binding].descriptor_index;
874 struct anv_descriptor *desc = &set->descriptors[offset + binding->index];
876 if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
877 desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
880 struct anv_sampler *sampler = desc->sampler;
882 /* This can happen if we have an unfilled slot since TYPE_SAMPLER
883 * happens to be zero.
888 memcpy(state->map + (s * 16),
889 sampler->state, sizeof(sampler->state));
892 if (!cmd_buffer->device->info.has_llc)
893 anv_state_clflush(*state);
899 flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer)
901 VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
902 cmd_buffer->state.pipeline->active_stages;
904 VkResult result = VK_SUCCESS;
905 anv_foreach_stage(s, dirty) {
906 result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
907 if (result != VK_SUCCESS)
909 result = emit_binding_table(cmd_buffer, s,
910 &cmd_buffer->state.binding_tables[s]);
911 if (result != VK_SUCCESS)
915 if (result != VK_SUCCESS) {
916 assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
918 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
919 assert(result == VK_SUCCESS);
921 /* Re-emit state base addresses so we get the new surface state base
922 * address before we start emitting binding tables etc.
924 genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
926 /* Re-emit all active binding tables */
927 dirty |= cmd_buffer->state.pipeline->active_stages;
928 anv_foreach_stage(s, dirty) {
929 result = emit_samplers(cmd_buffer, s, &cmd_buffer->state.samplers[s]);
930 if (result != VK_SUCCESS)
932 result = emit_binding_table(cmd_buffer, s,
933 &cmd_buffer->state.binding_tables[s]);
934 if (result != VK_SUCCESS)
939 cmd_buffer->state.descriptors_dirty &= ~dirty;
945 cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
948 static const uint32_t sampler_state_opcodes[] = {
949 [MESA_SHADER_VERTEX] = 43,
950 [MESA_SHADER_TESS_CTRL] = 44, /* HS */
951 [MESA_SHADER_TESS_EVAL] = 45, /* DS */
952 [MESA_SHADER_GEOMETRY] = 46,
953 [MESA_SHADER_FRAGMENT] = 47,
954 [MESA_SHADER_COMPUTE] = 0,
957 static const uint32_t binding_table_opcodes[] = {
958 [MESA_SHADER_VERTEX] = 38,
959 [MESA_SHADER_TESS_CTRL] = 39,
960 [MESA_SHADER_TESS_EVAL] = 40,
961 [MESA_SHADER_GEOMETRY] = 41,
962 [MESA_SHADER_FRAGMENT] = 42,
963 [MESA_SHADER_COMPUTE] = 0,
966 anv_foreach_stage(s, stages) {
967 if (cmd_buffer->state.samplers[s].alloc_size > 0) {
968 anv_batch_emit(&cmd_buffer->batch,
969 GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) {
970 ssp._3DCommandSubOpcode = sampler_state_opcodes[s];
971 ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset;
975 /* Always emit binding table pointers if we're asked to, since on SKL
976 * this is what flushes push constants. */
977 anv_batch_emit(&cmd_buffer->batch,
978 GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) {
979 btp._3DCommandSubOpcode = binding_table_opcodes[s];
980 btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset;
986 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
988 static const uint32_t push_constant_opcodes[] = {
989 [MESA_SHADER_VERTEX] = 21,
990 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
991 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
992 [MESA_SHADER_GEOMETRY] = 22,
993 [MESA_SHADER_FRAGMENT] = 23,
994 [MESA_SHADER_COMPUTE] = 0,
997 VkShaderStageFlags flushed = 0;
999 anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
1000 if (stage == MESA_SHADER_COMPUTE)
1003 struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
1005 if (state.offset == 0) {
1006 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
1007 c._3DCommandSubOpcode = push_constant_opcodes[stage];
1009 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
1010 c._3DCommandSubOpcode = push_constant_opcodes[stage],
1011 c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
1013 .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
1014 .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
1016 .PointerToConstantBuffer0 = { .offset = state.offset },
1017 .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
1023 flushed |= mesa_to_vk_shader_stage(stage);
1026 cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
1032 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
1034 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1037 uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
1039 assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
1041 genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config);
1043 genX(flush_pipeline_select_3d)(cmd_buffer);
1046 const uint32_t num_buffers = __builtin_popcount(vb_emit);
1047 const uint32_t num_dwords = 1 + num_buffers * 4;
1049 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
1050 GENX(3DSTATE_VERTEX_BUFFERS));
1052 for_each_bit(vb, vb_emit) {
1053 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
1054 uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
1056 struct GENX(VERTEX_BUFFER_STATE) state = {
1057 .VertexBufferIndex = vb,
1060 .MemoryObjectControlState = GENX(MOCS),
1062 .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
1063 .InstanceDataStepRate = 1,
1064 .VertexBufferMemoryObjectControlState = GENX(MOCS),
1067 .AddressModifyEnable = true,
1068 .BufferPitch = pipeline->binding_stride[vb],
1069 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
1072 .BufferSize = buffer->size - offset
1074 .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
1078 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
1083 cmd_buffer->state.vb_dirty &= ~vb_emit;
1085 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
1086 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
1088 /* The exact descriptor layout is pulled from the pipeline, so we need
1089 * to re-emit binding tables on every pipeline change.
1091 cmd_buffer->state.descriptors_dirty |=
1092 cmd_buffer->state.pipeline->active_stages;
1094 /* If the pipeline changed, we may need to re-allocate push constant
1097 cmd_buffer_alloc_push_constants(cmd_buffer);
1101 if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
1102 cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
1103 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
1105 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
1106 * stall needs to be sent just prior to any 3DSTATE_VS,
1107 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
1108 * 3DSTATE_BINDING_TABLE_POINTER_VS,
1109 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
1110 * PIPE_CONTROL needs to be sent before any combination of VS
1111 * associated 3DSTATE."
1113 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1114 pc.DepthStallEnable = true;
1115 pc.PostSyncOperation = WriteImmediateData;
1117 (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
1122 /* Render targets live in the same binding table as fragment descriptors */
1123 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_RENDER_TARGETS)
1124 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
1126 /* We emit the binding tables and sampler tables first, then emit push
1127 * constants and then finally emit binding table and sampler table
1128 * pointers. It has to happen in this order, since emitting the binding
1129 * tables may change the push constants (in case of storage images). After
1130 * emitting push constants, on SKL+ we have to emit the corresponding
1131 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
1134 if (cmd_buffer->state.descriptors_dirty)
1135 dirty = flush_descriptor_sets(cmd_buffer);
1137 if (cmd_buffer->state.push_constants_dirty) {
1139 /* On Sky Lake and later, the binding table pointers commands are
1140 * what actually flush the changes to push constant state so we need
1141 * to dirty them so they get re-emitted below.
1143 dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
1145 cmd_buffer_flush_push_constants(cmd_buffer);
1150 cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
1152 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
1153 gen8_cmd_buffer_emit_viewport(cmd_buffer);
1155 if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
1156 ANV_CMD_DIRTY_PIPELINE)) {
1157 gen8_cmd_buffer_emit_depth_viewport(cmd_buffer,
1158 pipeline->depth_clamp_enable);
1161 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
1162 gen7_cmd_buffer_emit_scissor(cmd_buffer);
1164 genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
1166 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1170 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
1171 struct anv_bo *bo, uint32_t offset)
1173 uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
1174 GENX(3DSTATE_VERTEX_BUFFERS));
1176 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
1177 &(struct GENX(VERTEX_BUFFER_STATE)) {
1178 .VertexBufferIndex = 32, /* Reserved for this */
1179 .AddressModifyEnable = true,
1182 .MemoryObjectControlState = GENX(MOCS),
1183 .BufferStartingAddress = { bo, offset },
1186 .VertexBufferMemoryObjectControlState = GENX(MOCS),
1187 .BufferStartingAddress = { bo, offset },
1188 .EndAddress = { bo, offset + 8 },
1194 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
1195 uint32_t base_vertex, uint32_t base_instance)
1197 struct anv_state id_state =
1198 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
1200 ((uint32_t *)id_state.map)[0] = base_vertex;
1201 ((uint32_t *)id_state.map)[1] = base_instance;
1203 if (!cmd_buffer->device->info.has_llc)
1204 anv_state_clflush(id_state);
1206 emit_base_vertex_instance_bo(cmd_buffer,
1207 &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
1211 VkCommandBuffer commandBuffer,
1212 uint32_t vertexCount,
1213 uint32_t instanceCount,
1214 uint32_t firstVertex,
1215 uint32_t firstInstance)
1217 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1218 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1219 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1221 genX(cmd_buffer_flush_state)(cmd_buffer);
1223 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
1224 emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
1226 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
1227 prim.VertexAccessType = SEQUENTIAL;
1228 prim.PrimitiveTopologyType = pipeline->topology;
1229 prim.VertexCountPerInstance = vertexCount;
1230 prim.StartVertexLocation = firstVertex;
1231 prim.InstanceCount = instanceCount;
1232 prim.StartInstanceLocation = firstInstance;
1233 prim.BaseVertexLocation = 0;
1237 void genX(CmdDrawIndexed)(
1238 VkCommandBuffer commandBuffer,
1239 uint32_t indexCount,
1240 uint32_t instanceCount,
1241 uint32_t firstIndex,
1242 int32_t vertexOffset,
1243 uint32_t firstInstance)
1245 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1246 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1247 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1249 genX(cmd_buffer_flush_state)(cmd_buffer);
1251 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
1252 emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
1254 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
1255 prim.VertexAccessType = RANDOM;
1256 prim.PrimitiveTopologyType = pipeline->topology;
1257 prim.VertexCountPerInstance = indexCount;
1258 prim.StartVertexLocation = firstIndex;
1259 prim.InstanceCount = instanceCount;
1260 prim.StartInstanceLocation = firstInstance;
1261 prim.BaseVertexLocation = vertexOffset;
1265 /* Auto-Draw / Indirect Registers */
1266 #define GEN7_3DPRIM_END_OFFSET 0x2420
1267 #define GEN7_3DPRIM_START_VERTEX 0x2430
1268 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
1269 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
1270 #define GEN7_3DPRIM_START_INSTANCE 0x243C
1271 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
1273 void genX(CmdDrawIndirect)(
1274 VkCommandBuffer commandBuffer,
1276 VkDeviceSize offset,
1280 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1281 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1282 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1283 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1284 struct anv_bo *bo = buffer->bo;
1285 uint32_t bo_offset = buffer->offset + offset;
1287 genX(cmd_buffer_flush_state)(cmd_buffer);
1289 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
1290 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
1292 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
1293 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
1294 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
1295 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
1296 emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
1298 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
1299 prim.IndirectParameterEnable = true;
1300 prim.VertexAccessType = SEQUENTIAL;
1301 prim.PrimitiveTopologyType = pipeline->topology;
1305 void genX(CmdDrawIndexedIndirect)(
1306 VkCommandBuffer commandBuffer,
1308 VkDeviceSize offset,
1312 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1313 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1314 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
1315 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1316 struct anv_bo *bo = buffer->bo;
1317 uint32_t bo_offset = buffer->offset + offset;
1319 genX(cmd_buffer_flush_state)(cmd_buffer);
1321 /* TODO: We need to stomp base vertex to 0 somehow */
1322 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
1323 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
1325 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
1326 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
1327 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
1328 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
1329 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
1331 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
1332 prim.IndirectParameterEnable = true;
1333 prim.VertexAccessType = RANDOM;
1334 prim.PrimitiveTopologyType = pipeline->topology;
1339 flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
1341 struct anv_device *device = cmd_buffer->device;
1342 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1343 struct anv_state surfaces = { 0, }, samplers = { 0, };
1346 result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces);
1347 if (result != VK_SUCCESS) {
1348 result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
1349 assert(result == VK_SUCCESS);
1351 /* Re-emit state base addresses so we get the new surface state base
1352 * address before we start emitting binding tables etc.
1354 genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
1356 result = emit_binding_table(cmd_buffer, MESA_SHADER_COMPUTE, &surfaces);
1357 assert(result == VK_SUCCESS);
1360 result = emit_samplers(cmd_buffer, MESA_SHADER_COMPUTE, &samplers);
1361 assert(result == VK_SUCCESS);
1363 const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1364 const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
1366 const uint32_t slm_size = encode_slm_size(GEN_GEN, prog_data->total_shared);
1368 struct anv_state state =
1369 anv_state_pool_emit(&device->dynamic_state_pool,
1370 GENX(INTERFACE_DESCRIPTOR_DATA), 64,
1371 .KernelStartPointer = pipeline->cs_simd,
1372 .BindingTablePointer = surfaces.offset,
1373 .BindingTableEntryCount = 0,
1374 .SamplerStatePointer = samplers.offset,
1377 .ConstantURBEntryReadOffset = 0,
1379 .ConstantURBEntryReadLength =
1380 cs_prog_data->push.per_thread.regs,
1381 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1382 .CrossThreadConstantDataReadLength =
1383 cs_prog_data->push.cross_thread.regs,
1385 .BarrierEnable = cs_prog_data->uses_barrier,
1386 .SharedLocalMemorySize = slm_size,
1387 .NumberofThreadsinGPGPUThreadGroup =
1388 cs_prog_data->threads);
1390 uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
1391 anv_batch_emit(&cmd_buffer->batch,
1392 GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) {
1393 mid.InterfaceDescriptorTotalLength = size;
1394 mid.InterfaceDescriptorDataStartAddress = state.offset;
1401 genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
1403 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1404 MAYBE_UNUSED VkResult result;
1406 assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
1408 genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->urb.l3_config);
1410 genX(flush_pipeline_select_gpgpu)(cmd_buffer);
1412 if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE) {
1413 /* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
1415 * "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
1416 * the only bits that are changed are scoreboard related: Scoreboard
1417 * Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
1418 * these scoreboard related states, a MEDIA_STATE_FLUSH is
1421 cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
1422 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1424 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
1427 if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
1428 (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
1429 /* FIXME: figure out descriptors for gen7 */
1430 result = flush_compute_descriptor_set(cmd_buffer);
1431 assert(result == VK_SUCCESS);
1432 cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
1435 if (cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_COMPUTE_BIT) {
1436 struct anv_state push_state =
1437 anv_cmd_buffer_cs_push_constants(cmd_buffer);
1439 if (push_state.alloc_size) {
1440 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
1441 curbe.CURBETotalDataLength = push_state.alloc_size;
1442 curbe.CURBEDataStartAddress = push_state.offset;
1447 cmd_buffer->state.compute_dirty = 0;
1449 genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1455 verify_cmd_parser(const struct anv_device *device,
1456 int required_version,
1457 const char *function)
1459 if (device->instance->physicalDevice.cmd_parser_version < required_version) {
1460 vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
1461 "cmd parser version %d is required for %s",
1462 required_version, function);
1471 void genX(CmdDispatch)(
1472 VkCommandBuffer commandBuffer,
1477 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1478 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1479 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
1481 if (prog_data->uses_num_work_groups) {
1482 struct anv_state state =
1483 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
1484 uint32_t *sizes = state.map;
1488 if (!cmd_buffer->device->info.has_llc)
1489 anv_state_clflush(state);
1490 cmd_buffer->state.num_workgroups_offset = state.offset;
1491 cmd_buffer->state.num_workgroups_bo =
1492 &cmd_buffer->device->dynamic_state_block_pool.bo;
1495 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
1497 anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
1498 ggw.SIMDSize = prog_data->simd_size / 16;
1499 ggw.ThreadDepthCounterMaximum = 0;
1500 ggw.ThreadHeightCounterMaximum = 0;
1501 ggw.ThreadWidthCounterMaximum = prog_data->threads - 1;
1502 ggw.ThreadGroupIDXDimension = x;
1503 ggw.ThreadGroupIDYDimension = y;
1504 ggw.ThreadGroupIDZDimension = z;
1505 ggw.RightExecutionMask = pipeline->cs_right_mask;
1506 ggw.BottomExecutionMask = 0xffffffff;
1509 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
1512 #define GPGPU_DISPATCHDIMX 0x2500
1513 #define GPGPU_DISPATCHDIMY 0x2504
1514 #define GPGPU_DISPATCHDIMZ 0x2508
1516 #define MI_PREDICATE_SRC0 0x2400
1517 #define MI_PREDICATE_SRC1 0x2408
1519 void genX(CmdDispatchIndirect)(
1520 VkCommandBuffer commandBuffer,
1522 VkDeviceSize offset)
1524 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1525 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1526 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
1527 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
1528 struct anv_bo *bo = buffer->bo;
1529 uint32_t bo_offset = buffer->offset + offset;
1530 struct anv_batch *batch = &cmd_buffer->batch;
1533 /* Linux 4.4 added command parser version 5 which allows the GPGPU
1534 * indirect dispatch registers to be written.
1536 if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
1540 if (prog_data->uses_num_work_groups) {
1541 cmd_buffer->state.num_workgroups_offset = bo_offset;
1542 cmd_buffer->state.num_workgroups_bo = bo;
1545 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
1547 emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
1548 emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
1549 emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
1552 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
1553 emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
1554 emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
1555 emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
1557 /* Load compute_dispatch_indirect_x_size into SRC0 */
1558 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
1560 /* predicate = (compute_dispatch_indirect_x_size == 0); */
1561 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
1562 mip.LoadOperation = LOAD_LOAD;
1563 mip.CombineOperation = COMBINE_SET;
1564 mip.CompareOperation = COMPARE_SRCS_EQUAL;
1567 /* Load compute_dispatch_indirect_y_size into SRC0 */
1568 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
1570 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
1571 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
1572 mip.LoadOperation = LOAD_LOAD;
1573 mip.CombineOperation = COMBINE_OR;
1574 mip.CompareOperation = COMPARE_SRCS_EQUAL;
1577 /* Load compute_dispatch_indirect_z_size into SRC0 */
1578 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
1580 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
1581 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
1582 mip.LoadOperation = LOAD_LOAD;
1583 mip.CombineOperation = COMBINE_OR;
1584 mip.CompareOperation = COMPARE_SRCS_EQUAL;
1587 /* predicate = !predicate; */
1588 #define COMPARE_FALSE 1
1589 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
1590 mip.LoadOperation = LOAD_LOADINV;
1591 mip.CombineOperation = COMBINE_OR;
1592 mip.CompareOperation = COMPARE_FALSE;
1596 anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
1597 ggw.IndirectParameterEnable = true;
1598 ggw.PredicateEnable = GEN_GEN <= 7;
1599 ggw.SIMDSize = prog_data->simd_size / 16;
1600 ggw.ThreadDepthCounterMaximum = 0;
1601 ggw.ThreadHeightCounterMaximum = 0;
1602 ggw.ThreadWidthCounterMaximum = prog_data->threads - 1;
1603 ggw.RightExecutionMask = pipeline->cs_right_mask;
1604 ggw.BottomExecutionMask = 0xffffffff;
1607 anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
1611 flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
1614 #if GEN_GEN >= 8 && GEN_GEN < 10
1615 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
1617 * Software must clear the COLOR_CALC_STATE Valid field in
1618 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
1619 * with Pipeline Select set to GPGPU.
1621 * The internal hardware docs recommend the same workaround for Gen9
1624 if (pipeline == GPGPU)
1625 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
1627 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
1628 * PIPELINE_SELECT [DevBWR+]":
1632 * Software must ensure all the write caches are flushed through a
1633 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
1634 * command to invalidate read only caches prior to programming
1635 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
1637 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1638 pc.RenderTargetCacheFlushEnable = true;
1639 pc.DepthCacheFlushEnable = true;
1640 pc.DCFlushEnable = true;
1641 pc.PostSyncOperation = NoWrite;
1642 pc.CommandStreamerStallEnable = true;
1645 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1646 pc.TextureCacheInvalidationEnable = true;
1647 pc.ConstantCacheInvalidationEnable = true;
1648 pc.StateCacheInvalidationEnable = true;
1649 pc.InstructionCacheInvalidateEnable = true;
1650 pc.PostSyncOperation = NoWrite;
1656 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
1658 if (cmd_buffer->state.current_pipeline != _3D) {
1659 flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
1661 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
1665 ps.PipelineSelection = _3D;
1668 cmd_buffer->state.current_pipeline = _3D;
1673 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
1675 if (cmd_buffer->state.current_pipeline != GPGPU) {
1676 flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
1678 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
1682 ps.PipelineSelection = GPGPU;
1685 cmd_buffer->state.current_pipeline = GPGPU;
1690 genX(cmd_buffer_emit_gen7_depth_flush)(struct anv_cmd_buffer *cmd_buffer)
1695 /* From the Haswell PRM, documentation for 3DSTATE_DEPTH_BUFFER:
1697 * "Restriction: Prior to changing Depth/Stencil Buffer state (i.e., any
1698 * combination of 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS,
1699 * 3DSTATE_STENCIL_BUFFER, 3DSTATE_HIER_DEPTH_BUFFER) SW must first
1700 * issue a pipelined depth stall (PIPE_CONTROL with Depth Stall bit
1701 * set), followed by a pipelined depth cache flush (PIPE_CONTROL with
1702 * Depth Flush Bit set, followed by another pipelined depth stall
1703 * (PIPE_CONTROL with Depth Stall Bit set), unless SW can otherwise
1704 * guarantee that the pipeline from WM onwards is already flushed (e.g.,
1705 * via a preceding MI_FLUSH)."
1707 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
1708 pipe.DepthStallEnable = true;
1710 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
1711 pipe.DepthCacheFlushEnable = true;
1713 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
1714 pipe.DepthStallEnable = true;
1719 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
1721 struct anv_device *device = cmd_buffer->device;
1722 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
1723 const struct anv_image_view *iview =
1724 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
1725 const struct anv_image *image = iview ? iview->image : NULL;
1726 const bool has_depth = image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
1727 const bool has_hiz = image != NULL && anv_image_has_hiz(image);
1728 const bool has_stencil =
1729 image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
1731 /* FIXME: Implement the PMA stall W/A */
1732 /* FIXME: Width and Height are wrong */
1734 genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer);
1736 /* Emit 3DSTATE_DEPTH_BUFFER */
1738 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
1739 db.SurfaceType = SURFTYPE_2D;
1740 db.DepthWriteEnable = true;
1741 db.StencilWriteEnable = has_stencil;
1743 if (cmd_buffer->state.pass->subpass_count == 1) {
1744 db.HierarchicalDepthBufferEnable = has_hiz;
1746 anv_finishme("Multiple-subpass HiZ not implemented");
1749 db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
1750 &image->depth_surface.isl);
1752 db.SurfaceBaseAddress = (struct anv_address) {
1754 .offset = image->offset + image->depth_surface.offset,
1756 db.DepthBufferObjectControlState = GENX(MOCS);
1758 db.SurfacePitch = image->depth_surface.isl.row_pitch - 1;
1759 db.Height = image->extent.height - 1;
1760 db.Width = image->extent.width - 1;
1761 db.LOD = iview->isl.base_level;
1762 db.Depth = image->array_size - 1; /* FIXME: 3-D */
1763 db.MinimumArrayElement = iview->isl.base_array_layer;
1767 isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2;
1769 db.RenderTargetViewExtent = 1 - 1;
1772 /* Even when no depth buffer is present, the hardware requires that
1773 * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
1775 * If a null depth buffer is bound, the driver must instead bind depth as:
1776 * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
1777 * 3DSTATE_DEPTH.Width = 1
1778 * 3DSTATE_DEPTH.Height = 1
1779 * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
1780 * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
1781 * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
1782 * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
1783 * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
1785 * The PRM is wrong, though. The width and height must be programmed to
1786 * actual framebuffer's width and height, even when neither depth buffer
1787 * nor stencil buffer is present. Also, D16_UNORM is not allowed to
1788 * be combined with a stencil buffer so we use D32_FLOAT instead.
1790 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
1791 db.SurfaceType = SURFTYPE_2D;
1792 db.SurfaceFormat = D32_FLOAT;
1793 db.Width = fb->width - 1;
1794 db.Height = fb->height - 1;
1795 db.StencilWriteEnable = has_stencil;
1800 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hdb) {
1801 hdb.HierarchicalDepthBufferObjectControlState = GENX(MOCS);
1802 hdb.SurfacePitch = image->aux_surface.isl.row_pitch - 1;
1803 hdb.SurfaceBaseAddress = (struct anv_address) {
1805 .offset = image->offset + image->aux_surface.offset,
1808 /* From the SKL PRM Vol2a:
1810 * The interpretation of this field is dependent on Surface Type
1812 * - SURFTYPE_1D: distance in pixels between array slices
1813 * - SURFTYPE_2D/CUBE: distance in rows between array slices
1814 * - SURFTYPE_3D: distance in rows between R - slices
1817 image->aux_surface.isl.dim == ISL_SURF_DIM_1D ?
1818 isl_surf_get_array_pitch_el(&image->aux_surface.isl) >> 2 :
1819 isl_surf_get_array_pitch_el_rows(&image->aux_surface.isl) >> 2;
1823 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hdb);
1826 /* Emit 3DSTATE_STENCIL_BUFFER */
1828 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
1829 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1830 sb.StencilBufferEnable = true;
1832 sb.StencilBufferObjectControlState = GENX(MOCS);
1834 sb.SurfacePitch = image->stencil_surface.isl.row_pitch - 1;
1837 sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2;
1839 sb.SurfaceBaseAddress = (struct anv_address) {
1841 .offset = image->offset + image->stencil_surface.offset,
1845 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
1848 /* From the IVB PRM Vol2P1, 11.5.5.4 3DSTATE_CLEAR_PARAMS:
1850 * 3DSTATE_CLEAR_PARAMS must always be programmed in the along with
1851 * the other Depth/Stencil state commands(i.e. 3DSTATE_DEPTH_BUFFER,
1852 * 3DSTATE_STENCIL_BUFFER, or 3DSTATE_HIER_DEPTH_BUFFER)
1854 * Testing also shows that some variant of this restriction may exist HSW+.
1855 * On BDW+, it is not possible to emit 2 of these packets consecutively when
1856 * both have DepthClearValueValid set. An analysis of such state programming
1857 * on SKL showed that the GPU doesn't register the latter packet's clear
1860 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp) {
1862 cp.DepthClearValueValid = true;
1864 cmd_buffer->state.subpass->depth_stencil_attachment;
1865 cp.DepthClearValue =
1866 cmd_buffer->state.attachments[ds].clear_value.depthStencil.depth;
1872 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
1873 struct anv_subpass *subpass)
1875 cmd_buffer->state.subpass = subpass;
1877 cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
1879 cmd_buffer_emit_depth_stencil(cmd_buffer);
1880 genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_HIZ_RESOLVE);
1881 genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_DEPTH_CLEAR);
1883 anv_cmd_buffer_clear_subpass(cmd_buffer);
1886 void genX(CmdBeginRenderPass)(
1887 VkCommandBuffer commandBuffer,
1888 const VkRenderPassBeginInfo* pRenderPassBegin,
1889 VkSubpassContents contents)
1891 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1892 ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
1893 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1895 cmd_buffer->state.framebuffer = framebuffer;
1896 cmd_buffer->state.pass = pass;
1897 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
1898 anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
1900 genX(flush_pipeline_select_3d)(cmd_buffer);
1902 genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
1905 void genX(CmdNextSubpass)(
1906 VkCommandBuffer commandBuffer,
1907 VkSubpassContents contents)
1909 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1911 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1913 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1914 genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
1917 void genX(CmdEndRenderPass)(
1918 VkCommandBuffer commandBuffer)
1920 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1922 genX(cmd_buffer_emit_hz_op)(cmd_buffer, BLORP_HIZ_OP_DEPTH_RESOLVE);
1923 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1926 anv_dump_add_framebuffer(cmd_buffer, cmd_buffer->state.framebuffer);
1931 emit_ps_depth_count(struct anv_cmd_buffer *cmd_buffer,
1932 struct anv_bo *bo, uint32_t offset)
1934 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1935 pc.DestinationAddressType = DAT_PPGTT;
1936 pc.PostSyncOperation = WritePSDepthCount;
1937 pc.DepthStallEnable = true;
1938 pc.Address = (struct anv_address) { bo, offset };
1940 if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
1941 pc.CommandStreamerStallEnable = true;
1946 emit_query_availability(struct anv_cmd_buffer *cmd_buffer,
1947 struct anv_bo *bo, uint32_t offset)
1949 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1950 pc.DestinationAddressType = DAT_PPGTT;
1951 pc.PostSyncOperation = WriteImmediateData;
1952 pc.Address = (struct anv_address) { bo, offset };
1953 pc.ImmediateData = 1;
1957 void genX(CmdBeginQuery)(
1958 VkCommandBuffer commandBuffer,
1959 VkQueryPool queryPool,
1961 VkQueryControlFlags flags)
1963 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1964 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1966 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
1967 * that the pipelining of the depth write breaks. What we see is that
1968 * samples from the render pass clear leaks into the first query
1969 * immediately after the clear. Doing a pipecontrol with a post-sync
1970 * operation and DepthStallEnable seems to work around the issue.
1972 if (cmd_buffer->state.need_query_wa) {
1973 cmd_buffer->state.need_query_wa = false;
1974 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1975 pc.DepthCacheFlushEnable = true;
1976 pc.DepthStallEnable = true;
1980 switch (pool->type) {
1981 case VK_QUERY_TYPE_OCCLUSION:
1982 emit_ps_depth_count(cmd_buffer, &pool->bo,
1983 query * sizeof(struct anv_query_pool_slot));
1986 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1992 void genX(CmdEndQuery)(
1993 VkCommandBuffer commandBuffer,
1994 VkQueryPool queryPool,
1997 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1998 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
2000 switch (pool->type) {
2001 case VK_QUERY_TYPE_OCCLUSION:
2002 emit_ps_depth_count(cmd_buffer, &pool->bo,
2003 query * sizeof(struct anv_query_pool_slot) + 8);
2005 emit_query_availability(cmd_buffer, &pool->bo,
2006 query * sizeof(struct anv_query_pool_slot) + 16);
2009 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
2015 #define TIMESTAMP 0x2358
2017 void genX(CmdWriteTimestamp)(
2018 VkCommandBuffer commandBuffer,
2019 VkPipelineStageFlagBits pipelineStage,
2020 VkQueryPool queryPool,
2023 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2024 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
2025 uint32_t offset = query * sizeof(struct anv_query_pool_slot);
2027 assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
2029 switch (pipelineStage) {
2030 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
2031 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
2032 srm.RegisterAddress = TIMESTAMP;
2033 srm.MemoryAddress = (struct anv_address) { &pool->bo, offset };
2035 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
2036 srm.RegisterAddress = TIMESTAMP + 4;
2037 srm.MemoryAddress = (struct anv_address) { &pool->bo, offset + 4 };
2042 /* Everything else is bottom-of-pipe */
2043 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2044 pc.DestinationAddressType = DAT_PPGTT;
2045 pc.PostSyncOperation = WriteTimestamp;
2046 pc.Address = (struct anv_address) { &pool->bo, offset };
2048 if (GEN_GEN == 9 && cmd_buffer->device->info.gt == 4)
2049 pc.CommandStreamerStallEnable = true;
2054 emit_query_availability(cmd_buffer, &pool->bo, query + 16);
2057 #if GEN_GEN > 7 || GEN_IS_HASWELL
2059 #define alu_opcode(v) __gen_uint((v), 20, 31)
2060 #define alu_operand1(v) __gen_uint((v), 10, 19)
2061 #define alu_operand2(v) __gen_uint((v), 0, 9)
2062 #define alu(opcode, operand1, operand2) \
2063 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
2065 #define OPCODE_NOOP 0x000
2066 #define OPCODE_LOAD 0x080
2067 #define OPCODE_LOADINV 0x480
2068 #define OPCODE_LOAD0 0x081
2069 #define OPCODE_LOAD1 0x481
2070 #define OPCODE_ADD 0x100
2071 #define OPCODE_SUB 0x101
2072 #define OPCODE_AND 0x102
2073 #define OPCODE_OR 0x103
2074 #define OPCODE_XOR 0x104
2075 #define OPCODE_STORE 0x180
2076 #define OPCODE_STOREINV 0x580
2078 #define OPERAND_R0 0x00
2079 #define OPERAND_R1 0x01
2080 #define OPERAND_R2 0x02
2081 #define OPERAND_R3 0x03
2082 #define OPERAND_R4 0x04
2083 #define OPERAND_SRCA 0x20
2084 #define OPERAND_SRCB 0x21
2085 #define OPERAND_ACCU 0x31
2086 #define OPERAND_ZF 0x32
2087 #define OPERAND_CF 0x33
2089 #define CS_GPR(n) (0x2600 + (n) * 8)
2092 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
2093 struct anv_bo *bo, uint32_t offset)
2095 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
2096 lrm.RegisterAddress = reg,
2097 lrm.MemoryAddress = (struct anv_address) { bo, offset };
2099 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
2100 lrm.RegisterAddress = reg + 4;
2101 lrm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
2106 store_query_result(struct anv_batch *batch, uint32_t reg,
2107 struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
2109 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
2110 srm.RegisterAddress = reg;
2111 srm.MemoryAddress = (struct anv_address) { bo, offset };
2114 if (flags & VK_QUERY_RESULT_64_BIT) {
2115 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
2116 srm.RegisterAddress = reg + 4;
2117 srm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
2122 void genX(CmdCopyQueryPoolResults)(
2123 VkCommandBuffer commandBuffer,
2124 VkQueryPool queryPool,
2125 uint32_t firstQuery,
2126 uint32_t queryCount,
2127 VkBuffer destBuffer,
2128 VkDeviceSize destOffset,
2129 VkDeviceSize destStride,
2130 VkQueryResultFlags flags)
2132 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2133 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
2134 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
2135 uint32_t slot_offset, dst_offset;
2137 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
2138 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
2139 pc.CommandStreamerStallEnable = true;
2140 pc.StallAtPixelScoreboard = true;
2144 dst_offset = buffer->offset + destOffset;
2145 for (uint32_t i = 0; i < queryCount; i++) {
2147 slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
2148 switch (pool->type) {
2149 case VK_QUERY_TYPE_OCCLUSION:
2150 emit_load_alu_reg_u64(&cmd_buffer->batch,
2151 CS_GPR(0), &pool->bo, slot_offset);
2152 emit_load_alu_reg_u64(&cmd_buffer->batch,
2153 CS_GPR(1), &pool->bo, slot_offset + 8);
2155 /* FIXME: We need to clamp the result for 32 bit. */
2157 uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
2158 dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
2159 dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
2160 dw[3] = alu(OPCODE_SUB, 0, 0);
2161 dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
2164 case VK_QUERY_TYPE_TIMESTAMP:
2165 emit_load_alu_reg_u64(&cmd_buffer->batch,
2166 CS_GPR(2), &pool->bo, slot_offset);
2170 unreachable("unhandled query type");
2173 store_query_result(&cmd_buffer->batch,
2174 CS_GPR(2), buffer->bo, dst_offset, flags);
2176 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
2177 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
2178 &pool->bo, slot_offset + 16);
2179 if (flags & VK_QUERY_RESULT_64_BIT)
2180 store_query_result(&cmd_buffer->batch,
2181 CS_GPR(0), buffer->bo, dst_offset + 8, flags);
2183 store_query_result(&cmd_buffer->batch,
2184 CS_GPR(0), buffer->bo, dst_offset + 4, flags);
2187 dst_offset += destStride;
2192 void genX(CmdCopyQueryPoolResults)(
2193 VkCommandBuffer commandBuffer,
2194 VkQueryPool queryPool,
2195 uint32_t firstQuery,
2196 uint32_t queryCount,
2197 VkBuffer destBuffer,
2198 VkDeviceSize destOffset,
2199 VkDeviceSize destStride,
2200 VkQueryResultFlags flags)
2202 anv_finishme("Queries not yet supported on Ivy Bridge");