2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27 #include "anv_private.h"
29 #include "genxml/gen_macros.h"
30 #include "genxml/genX_pack.h"
33 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
35 struct anv_device *device = cmd_buffer->device;
36 struct anv_bo *scratch_bo = NULL;
38 cmd_buffer->state.scratch_size =
39 anv_block_pool_size(&device->scratch_block_pool);
40 if (cmd_buffer->state.scratch_size > 0)
41 scratch_bo = &device->scratch_block_pool.bo;
43 /* XXX: Do we need this on more than just BDW? */
45 /* Emit a render target cache flush.
47 * This isn't documented anywhere in the PRM. However, it seems to be
48 * necessary prior to changing the surface state base adress. Without
49 * this, we get GPU hangs when using multi-level command buffers which
50 * clear depth, reset state base address, and then go render stuff.
52 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
53 pc.RenderTargetCacheFlushEnable = true;
57 anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
58 sba.GeneralStateBaseAddress = (struct anv_address) { scratch_bo, 0 };
59 sba.GeneralStateMemoryObjectControlState = GENX(MOCS);
60 sba.GeneralStateBaseAddressModifyEnable = true;
62 sba.SurfaceStateBaseAddress =
63 anv_cmd_buffer_surface_base_address(cmd_buffer);
64 sba.SurfaceStateMemoryObjectControlState = GENX(MOCS);
65 sba.SurfaceStateBaseAddressModifyEnable = true;
67 sba.DynamicStateBaseAddress =
68 (struct anv_address) { &device->dynamic_state_block_pool.bo, 0 };
69 sba.DynamicStateMemoryObjectControlState = GENX(MOCS),
70 sba.DynamicStateBaseAddressModifyEnable = true,
72 sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
73 sba.IndirectObjectMemoryObjectControlState = GENX(MOCS);
74 sba.IndirectObjectBaseAddressModifyEnable = true;
76 sba.InstructionBaseAddress =
77 (struct anv_address) { &device->instruction_block_pool.bo, 0 };
78 sba.InstructionMemoryObjectControlState = GENX(MOCS);
79 sba.InstructionBaseAddressModifyEnable = true;
82 /* Broadwell requires that we specify a buffer size for a bunch of
83 * these fields. However, since we will be growing the BO's live, we
84 * just set them all to the maximum.
86 sba.GeneralStateBufferSize = 0xfffff;
87 sba.GeneralStateBufferSizeModifyEnable = true;
88 sba.DynamicStateBufferSize = 0xfffff;
89 sba.DynamicStateBufferSizeModifyEnable = true;
90 sba.IndirectObjectBufferSize = 0xfffff;
91 sba.IndirectObjectBufferSizeModifyEnable = true;
92 sba.InstructionBufferSize = 0xfffff;
93 sba.InstructionBuffersizeModifyEnable = true;
97 /* After re-setting the surface state base address, we have to do some
98 * cache flusing so that the sampler engine will pick up the new
99 * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
100 * Shared Function > 3D Sampler > State > State Caching (page 96):
102 * Coherency with system memory in the state cache, like the texture
103 * cache is handled partially by software. It is expected that the
104 * command stream or shader will issue Cache Flush operation or
105 * Cache_Flush sampler message to ensure that the L1 cache remains
106 * coherent with system memory.
110 * Whenever the value of the Dynamic_State_Base_Addr,
111 * Surface_State_Base_Addr are altered, the L1 state cache must be
112 * invalidated to ensure the new surface or sampler state is fetched
113 * from system memory.
115 * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
116 * which, according the PIPE_CONTROL instruction documentation in the
119 * Setting this bit is independent of any other bit in this packet.
120 * This bit controls the invalidation of the L1 and L2 state caches
121 * at the top of the pipe i.e. at the parsing time.
123 * Unfortunately, experimentation seems to indicate that state cache
124 * invalidation through a PIPE_CONTROL does nothing whatsoever in
125 * regards to surface state and binding tables. In stead, it seems that
126 * invalidating the texture cache is what is actually needed.
128 * XXX: As far as we have been able to determine through
129 * experimentation, shows that flush the texture cache appears to be
130 * sufficient. The theory here is that all of the sampling/rendering
131 * units cache the binding table in the texture cache. However, we have
132 * yet to be able to actually confirm this.
134 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
135 pc.TextureCacheInvalidationEnable = true;
139 void genX(CmdPipelineBarrier)(
140 VkCommandBuffer commandBuffer,
141 VkPipelineStageFlags srcStageMask,
142 VkPipelineStageFlags destStageMask,
144 uint32_t memoryBarrierCount,
145 const VkMemoryBarrier* pMemoryBarriers,
146 uint32_t bufferMemoryBarrierCount,
147 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
148 uint32_t imageMemoryBarrierCount,
149 const VkImageMemoryBarrier* pImageMemoryBarriers)
151 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
154 /* XXX: Right now, we're really dumb and just flush whatever categories
155 * the app asks for. One of these days we may make this a bit better
156 * but right now that's all the hardware allows for in most areas.
158 VkAccessFlags src_flags = 0;
159 VkAccessFlags dst_flags = 0;
161 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
162 src_flags |= pMemoryBarriers[i].srcAccessMask;
163 dst_flags |= pMemoryBarriers[i].dstAccessMask;
166 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
167 src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
168 dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
171 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
172 src_flags |= pImageMemoryBarriers[i].srcAccessMask;
173 dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
176 /* Mask out the Source access flags we care about */
177 const uint32_t src_mask =
178 VK_ACCESS_SHADER_WRITE_BIT |
179 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT |
180 VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT |
181 VK_ACCESS_TRANSFER_WRITE_BIT;
183 src_flags = src_flags & src_mask;
185 /* Mask out the destination access flags we care about */
186 const uint32_t dst_mask =
187 VK_ACCESS_INDIRECT_COMMAND_READ_BIT |
188 VK_ACCESS_INDEX_READ_BIT |
189 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT |
190 VK_ACCESS_UNIFORM_READ_BIT |
191 VK_ACCESS_SHADER_READ_BIT |
192 VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
193 VK_ACCESS_TRANSFER_READ_BIT;
195 dst_flags = dst_flags & dst_mask;
197 /* The src flags represent how things were used previously. This is
198 * what we use for doing flushes.
200 struct GENX(PIPE_CONTROL) flush_cmd = {
201 GENX(PIPE_CONTROL_header),
202 .PostSyncOperation = NoWrite,
205 for_each_bit(b, src_flags) {
206 switch ((VkAccessFlagBits)(1 << b)) {
207 case VK_ACCESS_SHADER_WRITE_BIT:
208 flush_cmd.DCFlushEnable = true;
210 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
211 flush_cmd.RenderTargetCacheFlushEnable = true;
213 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
214 flush_cmd.DepthCacheFlushEnable = true;
216 case VK_ACCESS_TRANSFER_WRITE_BIT:
217 flush_cmd.RenderTargetCacheFlushEnable = true;
218 flush_cmd.DepthCacheFlushEnable = true;
221 unreachable("should've masked this out by now");
225 /* If we end up doing two PIPE_CONTROLs, the first, flusing one also has to
226 * stall and wait for the flushing to finish, so we don't re-dirty the
227 * caches with in-flight rendering after the second PIPE_CONTROL
232 flush_cmd.CommandStreamerStallEnable = true;
234 if (src_flags && dst_flags) {
235 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
236 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &flush_cmd);
239 /* The dst flags represent how things will be used in the future. This
240 * is what we use for doing cache invalidations.
242 struct GENX(PIPE_CONTROL) invalidate_cmd = {
243 GENX(PIPE_CONTROL_header),
244 .PostSyncOperation = NoWrite,
247 for_each_bit(b, dst_flags) {
248 switch ((VkAccessFlagBits)(1 << b)) {
249 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
250 case VK_ACCESS_INDEX_READ_BIT:
251 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
252 invalidate_cmd.VFCacheInvalidationEnable = true;
254 case VK_ACCESS_UNIFORM_READ_BIT:
255 invalidate_cmd.ConstantCacheInvalidationEnable = true;
257 case VK_ACCESS_SHADER_READ_BIT:
258 invalidate_cmd.TextureCacheInvalidationEnable = true;
260 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
261 invalidate_cmd.TextureCacheInvalidationEnable = true;
263 case VK_ACCESS_TRANSFER_READ_BIT:
264 invalidate_cmd.TextureCacheInvalidationEnable = true;
267 unreachable("should've masked this out by now");
272 dw = anv_batch_emit_dwords(&cmd_buffer->batch, GENX(PIPE_CONTROL_length));
273 GENX(PIPE_CONTROL_pack)(&cmd_buffer->batch, dw, &invalidate_cmd);
278 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
280 static const uint32_t push_constant_opcodes[] = {
281 [MESA_SHADER_VERTEX] = 21,
282 [MESA_SHADER_TESS_CTRL] = 25, /* HS */
283 [MESA_SHADER_TESS_EVAL] = 26, /* DS */
284 [MESA_SHADER_GEOMETRY] = 22,
285 [MESA_SHADER_FRAGMENT] = 23,
286 [MESA_SHADER_COMPUTE] = 0,
289 VkShaderStageFlags flushed = 0;
291 anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
292 if (stage == MESA_SHADER_COMPUTE)
295 struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
297 if (state.offset == 0) {
298 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c)
299 c._3DCommandSubOpcode = push_constant_opcodes[stage];
301 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
302 c._3DCommandSubOpcode = push_constant_opcodes[stage],
303 c.ConstantBody = (struct GENX(3DSTATE_CONSTANT_BODY)) {
305 .PointerToConstantBuffer2 = { &cmd_buffer->device->dynamic_state_block_pool.bo, state.offset },
306 .ConstantBuffer2ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
308 .PointerToConstantBuffer0 = { .offset = state.offset },
309 .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
315 flushed |= mesa_to_vk_shader_stage(stage);
318 cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_ALL_GRAPHICS;
324 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
326 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
329 uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
331 assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
334 /* FIXME (jason): Currently, the config_l3 function causes problems on
335 * Haswell and prior if you have a kernel older than 4.4. In order to
336 * work, it requires a couple of registers be white-listed in the
337 * command parser and they weren't added until 4.4. What we should do
338 * is check the command parser version and make it a no-op if your
339 * command parser is either off or too old. Compute won't work 100%,
340 * but at least 3-D will. In the mean time, I'm going to make this
341 * gen8+ only so that we can get Haswell working again.
343 genX(cmd_buffer_config_l3)(cmd_buffer, pipeline);
346 genX(flush_pipeline_select_3d)(cmd_buffer);
349 const uint32_t num_buffers = __builtin_popcount(vb_emit);
350 const uint32_t num_dwords = 1 + num_buffers * 4;
352 p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
353 GENX(3DSTATE_VERTEX_BUFFERS));
355 for_each_bit(vb, vb_emit) {
356 struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
357 uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
359 struct GENX(VERTEX_BUFFER_STATE) state = {
360 .VertexBufferIndex = vb,
363 .MemoryObjectControlState = GENX(MOCS),
365 .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
366 .InstanceDataStepRate = 1,
367 .VertexBufferMemoryObjectControlState = GENX(MOCS),
370 .AddressModifyEnable = true,
371 .BufferPitch = pipeline->binding_stride[vb],
372 .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
375 .BufferSize = buffer->size - offset
377 .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
381 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
386 cmd_buffer->state.vb_dirty &= ~vb_emit;
388 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
389 /* If somebody compiled a pipeline after starting a command buffer the
390 * scratch bo may have grown since we started this cmd buffer (and
391 * emitted STATE_BASE_ADDRESS). If we're binding that pipeline now,
392 * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
393 if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
394 anv_cmd_buffer_emit_state_base_address(cmd_buffer);
396 anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
398 /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
400 * "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
401 * the next 3DPRIMITIVE command after programming the
402 * 3DSTATE_PUSH_CONSTANT_ALLOC_VS"
404 * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
405 * pipeline setup, we need to dirty push constants.
407 cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
411 if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
412 cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
413 /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
415 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
416 * stall needs to be sent just prior to any 3DSTATE_VS,
417 * 3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
418 * 3DSTATE_BINDING_TABLE_POINTER_VS,
419 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one
420 * PIPE_CONTROL needs to be sent before any combination of VS
421 * associated 3DSTATE."
423 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
424 pc.DepthStallEnable = true;
425 pc.PostSyncOperation = WriteImmediateData;
427 (struct anv_address) { &cmd_buffer->device->workaround_bo, 0 };
432 /* We emit the binding tables and sampler tables first, then emit push
433 * constants and then finally emit binding table and sampler table
434 * pointers. It has to happen in this order, since emitting the binding
435 * tables may change the push constants (in case of storage images). After
436 * emitting push constants, on SKL+ we have to emit the corresponding
437 * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
440 if (cmd_buffer->state.descriptors_dirty)
441 dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
443 if (cmd_buffer->state.push_constants_dirty) {
445 /* On Sky Lake and later, the binding table pointers commands are
446 * what actually flush the changes to push constant state so we need
447 * to dirty them so they get re-emitted below.
449 dirty |= cmd_buffer_flush_push_constants(cmd_buffer);
451 cmd_buffer_flush_push_constants(cmd_buffer);
456 gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
458 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
459 gen8_cmd_buffer_emit_viewport(cmd_buffer);
461 if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
462 gen7_cmd_buffer_emit_scissor(cmd_buffer);
464 genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
468 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
469 struct anv_bo *bo, uint32_t offset)
471 uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
472 GENX(3DSTATE_VERTEX_BUFFERS));
474 GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
475 &(struct GENX(VERTEX_BUFFER_STATE)) {
476 .VertexBufferIndex = 32, /* Reserved for this */
477 .AddressModifyEnable = true,
480 .MemoryObjectControlState = GENX(MOCS),
481 .BufferStartingAddress = { bo, offset },
484 .VertexBufferMemoryObjectControlState = GENX(MOCS),
485 .BufferStartingAddress = { bo, offset },
486 .EndAddress = { bo, offset + 8 },
492 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
493 uint32_t base_vertex, uint32_t base_instance)
495 struct anv_state id_state =
496 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
498 ((uint32_t *)id_state.map)[0] = base_vertex;
499 ((uint32_t *)id_state.map)[1] = base_instance;
501 if (!cmd_buffer->device->info.has_llc)
502 anv_state_clflush(id_state);
504 emit_base_vertex_instance_bo(cmd_buffer,
505 &cmd_buffer->device->dynamic_state_block_pool.bo, id_state.offset);
509 VkCommandBuffer commandBuffer,
510 uint32_t vertexCount,
511 uint32_t instanceCount,
512 uint32_t firstVertex,
513 uint32_t firstInstance)
515 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
516 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
517 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
519 genX(cmd_buffer_flush_state)(cmd_buffer);
521 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
522 emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
524 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
525 prim.VertexAccessType = SEQUENTIAL;
526 prim.PrimitiveTopologyType = pipeline->topology;
527 prim.VertexCountPerInstance = vertexCount;
528 prim.StartVertexLocation = firstVertex;
529 prim.InstanceCount = instanceCount;
530 prim.StartInstanceLocation = firstInstance;
531 prim.BaseVertexLocation = 0;
535 void genX(CmdDrawIndexed)(
536 VkCommandBuffer commandBuffer,
538 uint32_t instanceCount,
540 int32_t vertexOffset,
541 uint32_t firstInstance)
543 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
544 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
545 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
547 genX(cmd_buffer_flush_state)(cmd_buffer);
549 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
550 emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
552 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
553 prim.VertexAccessType = RANDOM;
554 prim.PrimitiveTopologyType = pipeline->topology;
555 prim.VertexCountPerInstance = indexCount;
556 prim.StartVertexLocation = firstIndex;
557 prim.InstanceCount = instanceCount;
558 prim.StartInstanceLocation = firstInstance;
559 prim.BaseVertexLocation = vertexOffset;
563 /* Auto-Draw / Indirect Registers */
564 #define GEN7_3DPRIM_END_OFFSET 0x2420
565 #define GEN7_3DPRIM_START_VERTEX 0x2430
566 #define GEN7_3DPRIM_VERTEX_COUNT 0x2434
567 #define GEN7_3DPRIM_INSTANCE_COUNT 0x2438
568 #define GEN7_3DPRIM_START_INSTANCE 0x243C
569 #define GEN7_3DPRIM_BASE_VERTEX 0x2440
572 emit_lrm(struct anv_batch *batch,
573 uint32_t reg, struct anv_bo *bo, uint32_t offset)
575 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
576 lrm.RegisterAddress = reg;
577 lrm.MemoryAddress = (struct anv_address) { bo, offset };
582 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
584 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
585 lri.RegisterOffset = reg;
590 void genX(CmdDrawIndirect)(
591 VkCommandBuffer commandBuffer,
597 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
598 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
599 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
600 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
601 struct anv_bo *bo = buffer->bo;
602 uint32_t bo_offset = buffer->offset + offset;
604 genX(cmd_buffer_flush_state)(cmd_buffer);
606 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
607 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 8);
609 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
610 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
611 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
612 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
613 emit_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
615 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
616 prim.IndirectParameterEnable = true;
617 prim.VertexAccessType = SEQUENTIAL;
618 prim.PrimitiveTopologyType = pipeline->topology;
622 void genX(CmdDrawIndexedIndirect)(
623 VkCommandBuffer commandBuffer,
629 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
630 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
631 struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
632 const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
633 struct anv_bo *bo = buffer->bo;
634 uint32_t bo_offset = buffer->offset + offset;
636 genX(cmd_buffer_flush_state)(cmd_buffer);
638 /* TODO: We need to stomp base vertex to 0 somehow */
639 if (vs_prog_data->uses_basevertex || vs_prog_data->uses_baseinstance)
640 emit_base_vertex_instance_bo(cmd_buffer, bo, bo_offset + 12);
642 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
643 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
644 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
645 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
646 emit_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
648 anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
649 prim.IndirectParameterEnable = true;
650 prim.VertexAccessType = RANDOM;
651 prim.PrimitiveTopologyType = pipeline->topology;
658 verify_cmd_parser(const struct anv_device *device,
659 int required_version,
660 const char *function)
662 if (device->instance->physicalDevice.cmd_parser_version < required_version) {
663 vk_errorf(VK_ERROR_FEATURE_NOT_PRESENT,
664 "cmd parser version %d is required for %s",
665 required_version, function);
674 void genX(CmdDispatch)(
675 VkCommandBuffer commandBuffer,
680 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
681 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
682 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
684 if (prog_data->uses_num_work_groups) {
685 struct anv_state state =
686 anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
687 uint32_t *sizes = state.map;
691 if (!cmd_buffer->device->info.has_llc)
692 anv_state_clflush(state);
693 cmd_buffer->state.num_workgroups_offset = state.offset;
694 cmd_buffer->state.num_workgroups_bo =
695 &cmd_buffer->device->dynamic_state_block_pool.bo;
698 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
700 anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
701 ggw.SIMDSize = prog_data->simd_size / 16;
702 ggw.ThreadDepthCounterMaximum = 0;
703 ggw.ThreadHeightCounterMaximum = 0;
704 ggw.ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1;
705 ggw.ThreadGroupIDXDimension = x;
706 ggw.ThreadGroupIDYDimension = y;
707 ggw.ThreadGroupIDZDimension = z;
708 ggw.RightExecutionMask = pipeline->cs_right_mask;
709 ggw.BottomExecutionMask = 0xffffffff;
712 anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
715 #define GPGPU_DISPATCHDIMX 0x2500
716 #define GPGPU_DISPATCHDIMY 0x2504
717 #define GPGPU_DISPATCHDIMZ 0x2508
719 #define MI_PREDICATE_SRC0 0x2400
720 #define MI_PREDICATE_SRC1 0x2408
722 void genX(CmdDispatchIndirect)(
723 VkCommandBuffer commandBuffer,
727 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
728 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
729 struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
730 const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
731 struct anv_bo *bo = buffer->bo;
732 uint32_t bo_offset = buffer->offset + offset;
733 struct anv_batch *batch = &cmd_buffer->batch;
736 /* Linux 4.4 added command parser version 5 which allows the GPGPU
737 * indirect dispatch registers to be written.
739 if (!verify_cmd_parser(cmd_buffer->device, 5, "vkCmdDispatchIndirect"))
743 if (prog_data->uses_num_work_groups) {
744 cmd_buffer->state.num_workgroups_offset = bo_offset;
745 cmd_buffer->state.num_workgroups_bo = bo;
748 genX(cmd_buffer_flush_compute_state)(cmd_buffer);
750 emit_lrm(batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
751 emit_lrm(batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
752 emit_lrm(batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
755 /* Clear upper 32-bits of SRC0 and all 64-bits of SRC1 */
756 emit_lri(batch, MI_PREDICATE_SRC0 + 4, 0);
757 emit_lri(batch, MI_PREDICATE_SRC1 + 0, 0);
758 emit_lri(batch, MI_PREDICATE_SRC1 + 4, 0);
760 /* Load compute_dispatch_indirect_x_size into SRC0 */
761 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 0);
763 /* predicate = (compute_dispatch_indirect_x_size == 0); */
764 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
765 mip.LoadOperation = LOAD_LOAD;
766 mip.CombineOperation = COMBINE_SET;
767 mip.CompareOperation = COMPARE_SRCS_EQUAL;
770 /* Load compute_dispatch_indirect_y_size into SRC0 */
771 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 4);
773 /* predicate |= (compute_dispatch_indirect_y_size == 0); */
774 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
775 mip.LoadOperation = LOAD_LOAD;
776 mip.CombineOperation = COMBINE_OR;
777 mip.CompareOperation = COMPARE_SRCS_EQUAL;
780 /* Load compute_dispatch_indirect_z_size into SRC0 */
781 emit_lrm(batch, MI_PREDICATE_SRC0, bo, bo_offset + 8);
783 /* predicate |= (compute_dispatch_indirect_z_size == 0); */
784 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
785 mip.LoadOperation = LOAD_LOAD;
786 mip.CombineOperation = COMBINE_OR;
787 mip.CompareOperation = COMPARE_SRCS_EQUAL;
790 /* predicate = !predicate; */
791 #define COMPARE_FALSE 1
792 anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
793 mip.LoadOperation = LOAD_LOADINV;
794 mip.CombineOperation = COMBINE_OR;
795 mip.CompareOperation = COMPARE_FALSE;
799 anv_batch_emit(batch, GENX(GPGPU_WALKER), ggw) {
800 ggw.IndirectParameterEnable = true;
801 ggw.PredicateEnable = GEN_GEN <= 7;
802 ggw.SIMDSize = prog_data->simd_size / 16;
803 ggw.ThreadDepthCounterMaximum = 0;
804 ggw.ThreadHeightCounterMaximum = 0;
805 ggw.ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1;
806 ggw.RightExecutionMask = pipeline->cs_right_mask;
807 ggw.BottomExecutionMask = 0xffffffff;
810 anv_batch_emit(batch, GENX(MEDIA_STATE_FLUSH), msf);
814 flush_pipeline_before_pipeline_select(struct anv_cmd_buffer *cmd_buffer,
817 #if GEN_GEN >= 8 && GEN_GEN < 10
818 /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
820 * Software must clear the COLOR_CALC_STATE Valid field in
821 * 3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
822 * with Pipeline Select set to GPGPU.
824 * The internal hardware docs recommend the same workaround for Gen9
827 if (pipeline == GPGPU)
828 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
830 /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
831 * PIPELINE_SELECT [DevBWR+]":
835 * Software must ensure all the write caches are flushed through a
836 * stalling PIPE_CONTROL command followed by another PIPE_CONTROL
837 * command to invalidate read only caches prior to programming
838 * MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
840 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
841 pc.RenderTargetCacheFlushEnable = true;
842 pc.DepthCacheFlushEnable = true;
843 pc.DCFlushEnable = true;
844 pc.PostSyncOperation = NoWrite;
845 pc.CommandStreamerStallEnable = true;
848 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
849 pc.TextureCacheInvalidationEnable = true;
850 pc.ConstantCacheInvalidationEnable = true;
851 pc.StateCacheInvalidationEnable = true;
852 pc.InstructionCacheInvalidateEnable = true;
853 pc.PostSyncOperation = NoWrite;
859 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
861 if (cmd_buffer->state.current_pipeline != _3D) {
862 flush_pipeline_before_pipeline_select(cmd_buffer, _3D);
864 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
868 ps.PipelineSelection = _3D;
871 cmd_buffer->state.current_pipeline = _3D;
876 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
878 if (cmd_buffer->state.current_pipeline != GPGPU) {
879 flush_pipeline_before_pipeline_select(cmd_buffer, GPGPU);
881 anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
885 ps.PipelineSelection = GPGPU;
888 cmd_buffer->state.current_pipeline = GPGPU;
893 genX(cmd_buffer_alloc_null_surface_state)(struct anv_cmd_buffer *cmd_buffer,
894 struct anv_framebuffer *fb)
896 struct anv_state state =
897 anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
899 struct GENX(RENDER_SURFACE_STATE) null_ss = {
900 .SurfaceType = SURFTYPE_NULL,
901 .SurfaceArray = fb->layers > 0,
902 .SurfaceFormat = ISL_FORMAT_R8G8B8A8_UNORM,
906 .TiledSurface = true,
908 .Width = fb->width - 1,
909 .Height = fb->height - 1,
910 .Depth = fb->layers - 1,
911 .RenderTargetViewExtent = fb->layers - 1,
914 GENX(RENDER_SURFACE_STATE_pack)(NULL, state.map, &null_ss);
916 if (!cmd_buffer->device->info.has_llc)
917 anv_state_clflush(state);
923 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
925 struct anv_device *device = cmd_buffer->device;
926 const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
927 const struct anv_image_view *iview =
928 anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
929 const struct anv_image *image = iview ? iview->image : NULL;
930 const bool has_depth = image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
931 const bool has_stencil =
932 image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
934 /* FIXME: Implement the PMA stall W/A */
935 /* FIXME: Width and Height are wrong */
937 /* Emit 3DSTATE_DEPTH_BUFFER */
939 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
940 db.SurfaceType = SURFTYPE_2D;
941 db.DepthWriteEnable = true;
942 db.StencilWriteEnable = has_stencil;
943 db.HierarchicalDepthBufferEnable = false;
945 db.SurfaceFormat = isl_surf_get_depth_format(&device->isl_dev,
946 &image->depth_surface.isl);
948 db.SurfaceBaseAddress = (struct anv_address) {
950 .offset = image->offset + image->depth_surface.offset,
952 db.DepthBufferObjectControlState = GENX(MOCS),
954 db.SurfacePitch = image->depth_surface.isl.row_pitch - 1;
955 db.Height = fb->height - 1;
956 db.Width = fb->width - 1;
959 db.MinimumArrayElement = 0;
963 isl_surf_get_array_pitch_el_rows(&image->depth_surface.isl) >> 2,
965 db.RenderTargetViewExtent = 1 - 1;
968 /* Even when no depth buffer is present, the hardware requires that
969 * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
971 * If a null depth buffer is bound, the driver must instead bind depth as:
972 * 3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
973 * 3DSTATE_DEPTH.Width = 1
974 * 3DSTATE_DEPTH.Height = 1
975 * 3DSTATE_DEPTH.SuraceFormat = D16_UNORM
976 * 3DSTATE_DEPTH.SurfaceBaseAddress = 0
977 * 3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
978 * 3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
979 * 3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
981 * The PRM is wrong, though. The width and height must be programmed to
982 * actual framebuffer's width and height, even when neither depth buffer
983 * nor stencil buffer is present. Also, D16_UNORM is not allowed to
984 * be combined with a stencil buffer so we use D32_FLOAT instead.
986 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER), db) {
987 db.SurfaceType = SURFTYPE_2D;
988 db.SurfaceFormat = D32_FLOAT;
989 db.Width = fb->width - 1;
990 db.Height = fb->height - 1;
991 db.StencilWriteEnable = has_stencil;
995 /* Emit 3DSTATE_STENCIL_BUFFER */
997 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb) {
998 #if GEN_GEN >= 8 || GEN_IS_HASWELL
999 sb.StencilBufferEnable = true,
1001 sb.StencilBufferObjectControlState = GENX(MOCS),
1003 /* Stencil buffers have strange pitch. The PRM says:
1005 * The pitch must be set to 2x the value computed based on width,
1006 * as the stencil buffer is stored with two rows interleaved.
1008 sb.SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
1011 sb.SurfaceQPitch = isl_surf_get_array_pitch_el_rows(&image->stencil_surface.isl) >> 2,
1013 sb.SurfaceBaseAddress = (struct anv_address) {
1015 .offset = image->offset + image->stencil_surface.offset,
1019 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER), sb);
1022 /* Disable hierarchial depth buffers. */
1023 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_HIER_DEPTH_BUFFER), hz);
1025 /* Clear the clear params. */
1026 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CLEAR_PARAMS), cp);
1030 * @see anv_cmd_buffer_set_subpass()
1033 genX(cmd_buffer_set_subpass)(struct anv_cmd_buffer *cmd_buffer,
1034 struct anv_subpass *subpass)
1036 cmd_buffer->state.subpass = subpass;
1038 cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
1040 cmd_buffer_emit_depth_stencil(cmd_buffer);
1043 void genX(CmdBeginRenderPass)(
1044 VkCommandBuffer commandBuffer,
1045 const VkRenderPassBeginInfo* pRenderPassBegin,
1046 VkSubpassContents contents)
1048 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1049 ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
1050 ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
1052 cmd_buffer->state.framebuffer = framebuffer;
1053 cmd_buffer->state.pass = pass;
1054 anv_cmd_state_setup_attachments(cmd_buffer, pRenderPassBegin);
1056 genX(flush_pipeline_select_3d)(cmd_buffer);
1058 const VkRect2D *render_area = &pRenderPassBegin->renderArea;
1060 anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DRAWING_RECTANGLE), r) {
1061 r.ClippedDrawingRectangleYMin = MAX2(render_area->offset.y, 0);
1062 r.ClippedDrawingRectangleXMin = MAX2(render_area->offset.x, 0);
1063 r.ClippedDrawingRectangleYMax =
1064 render_area->offset.y + render_area->extent.height - 1;
1065 r.ClippedDrawingRectangleXMax =
1066 render_area->offset.x + render_area->extent.width - 1;
1067 r.DrawingRectangleOriginY = 0;
1068 r.DrawingRectangleOriginX = 0;
1071 genX(cmd_buffer_set_subpass)(cmd_buffer, pass->subpasses);
1072 anv_cmd_buffer_clear_subpass(cmd_buffer);
1075 void genX(CmdNextSubpass)(
1076 VkCommandBuffer commandBuffer,
1077 VkSubpassContents contents)
1079 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1081 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1083 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1084 genX(cmd_buffer_set_subpass)(cmd_buffer, cmd_buffer->state.subpass + 1);
1085 anv_cmd_buffer_clear_subpass(cmd_buffer);
1088 void genX(CmdEndRenderPass)(
1089 VkCommandBuffer commandBuffer)
1091 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1093 anv_cmd_buffer_resolve_subpass(cmd_buffer);
1097 emit_ps_depth_count(struct anv_batch *batch,
1098 struct anv_bo *bo, uint32_t offset)
1100 anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
1101 pc.DestinationAddressType = DAT_PPGTT;
1102 pc.PostSyncOperation = WritePSDepthCount;
1103 pc.DepthStallEnable = true;
1104 pc.Address = (struct anv_address) { bo, offset };
1109 emit_query_availability(struct anv_batch *batch,
1110 struct anv_bo *bo, uint32_t offset)
1112 anv_batch_emit(batch, GENX(PIPE_CONTROL), pc) {
1113 pc.DestinationAddressType = DAT_PPGTT;
1114 pc.PostSyncOperation = WriteImmediateData;
1115 pc.Address = (struct anv_address) { bo, offset };
1116 pc.ImmediateData = 1;
1120 void genX(CmdBeginQuery)(
1121 VkCommandBuffer commandBuffer,
1122 VkQueryPool queryPool,
1124 VkQueryControlFlags flags)
1126 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1127 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1129 /* Workaround: When meta uses the pipeline with the VS disabled, it seems
1130 * that the pipelining of the depth write breaks. What we see is that
1131 * samples from the render pass clear leaks into the first query
1132 * immediately after the clear. Doing a pipecontrol with a post-sync
1133 * operation and DepthStallEnable seems to work around the issue.
1135 if (cmd_buffer->state.need_query_wa) {
1136 cmd_buffer->state.need_query_wa = false;
1137 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1138 pc.DepthCacheFlushEnable = true;
1139 pc.DepthStallEnable = true;
1143 switch (pool->type) {
1144 case VK_QUERY_TYPE_OCCLUSION:
1145 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1146 query * sizeof(struct anv_query_pool_slot));
1149 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1155 void genX(CmdEndQuery)(
1156 VkCommandBuffer commandBuffer,
1157 VkQueryPool queryPool,
1160 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1161 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1163 switch (pool->type) {
1164 case VK_QUERY_TYPE_OCCLUSION:
1165 emit_ps_depth_count(&cmd_buffer->batch, &pool->bo,
1166 query * sizeof(struct anv_query_pool_slot) + 8);
1168 emit_query_availability(&cmd_buffer->batch, &pool->bo,
1169 query * sizeof(struct anv_query_pool_slot) + 16);
1172 case VK_QUERY_TYPE_PIPELINE_STATISTICS:
1178 #define TIMESTAMP 0x2358
1180 void genX(CmdWriteTimestamp)(
1181 VkCommandBuffer commandBuffer,
1182 VkPipelineStageFlagBits pipelineStage,
1183 VkQueryPool queryPool,
1186 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1187 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1188 uint32_t offset = query * sizeof(struct anv_query_pool_slot);
1190 assert(pool->type == VK_QUERY_TYPE_TIMESTAMP);
1192 switch (pipelineStage) {
1193 case VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT:
1194 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1195 srm.RegisterAddress = TIMESTAMP;
1196 srm.MemoryAddress = (struct anv_address) { &pool->bo, offset };
1198 anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1199 srm.RegisterAddress = TIMESTAMP + 4;
1200 srm.MemoryAddress = (struct anv_address) { &pool->bo, offset + 4 };
1205 /* Everything else is bottom-of-pipe */
1206 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1207 pc.DestinationAddressType = DAT_PPGTT,
1208 pc.PostSyncOperation = WriteTimestamp,
1209 pc.Address = (struct anv_address) { &pool->bo, offset };
1214 emit_query_availability(&cmd_buffer->batch, &pool->bo, query + 16);
1217 #if GEN_GEN > 7 || GEN_IS_HASWELL
1219 #define alu_opcode(v) __gen_uint((v), 20, 31)
1220 #define alu_operand1(v) __gen_uint((v), 10, 19)
1221 #define alu_operand2(v) __gen_uint((v), 0, 9)
1222 #define alu(opcode, operand1, operand2) \
1223 alu_opcode(opcode) | alu_operand1(operand1) | alu_operand2(operand2)
1225 #define OPCODE_NOOP 0x000
1226 #define OPCODE_LOAD 0x080
1227 #define OPCODE_LOADINV 0x480
1228 #define OPCODE_LOAD0 0x081
1229 #define OPCODE_LOAD1 0x481
1230 #define OPCODE_ADD 0x100
1231 #define OPCODE_SUB 0x101
1232 #define OPCODE_AND 0x102
1233 #define OPCODE_OR 0x103
1234 #define OPCODE_XOR 0x104
1235 #define OPCODE_STORE 0x180
1236 #define OPCODE_STOREINV 0x580
1238 #define OPERAND_R0 0x00
1239 #define OPERAND_R1 0x01
1240 #define OPERAND_R2 0x02
1241 #define OPERAND_R3 0x03
1242 #define OPERAND_R4 0x04
1243 #define OPERAND_SRCA 0x20
1244 #define OPERAND_SRCB 0x21
1245 #define OPERAND_ACCU 0x31
1246 #define OPERAND_ZF 0x32
1247 #define OPERAND_CF 0x33
1249 #define CS_GPR(n) (0x2600 + (n) * 8)
1252 emit_load_alu_reg_u64(struct anv_batch *batch, uint32_t reg,
1253 struct anv_bo *bo, uint32_t offset)
1255 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
1256 lrm.RegisterAddress = reg,
1257 lrm.MemoryAddress = (struct anv_address) { bo, offset };
1259 anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
1260 lrm.RegisterAddress = reg + 4;
1261 lrm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
1266 store_query_result(struct anv_batch *batch, uint32_t reg,
1267 struct anv_bo *bo, uint32_t offset, VkQueryResultFlags flags)
1269 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1270 srm.RegisterAddress = reg;
1271 srm.MemoryAddress = (struct anv_address) { bo, offset };
1274 if (flags & VK_QUERY_RESULT_64_BIT) {
1275 anv_batch_emit(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
1276 srm.RegisterAddress = reg + 4;
1277 srm.MemoryAddress = (struct anv_address) { bo, offset + 4 };
1282 void genX(CmdCopyQueryPoolResults)(
1283 VkCommandBuffer commandBuffer,
1284 VkQueryPool queryPool,
1285 uint32_t firstQuery,
1286 uint32_t queryCount,
1287 VkBuffer destBuffer,
1288 VkDeviceSize destOffset,
1289 VkDeviceSize destStride,
1290 VkQueryResultFlags flags)
1292 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1293 ANV_FROM_HANDLE(anv_query_pool, pool, queryPool);
1294 ANV_FROM_HANDLE(anv_buffer, buffer, destBuffer);
1295 uint32_t slot_offset, dst_offset;
1297 if (flags & VK_QUERY_RESULT_WAIT_BIT) {
1298 anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1299 pc.CommandStreamerStallEnable = true;
1300 pc.StallAtPixelScoreboard = true;
1304 dst_offset = buffer->offset + destOffset;
1305 for (uint32_t i = 0; i < queryCount; i++) {
1307 slot_offset = (firstQuery + i) * sizeof(struct anv_query_pool_slot);
1308 switch (pool->type) {
1309 case VK_QUERY_TYPE_OCCLUSION:
1310 emit_load_alu_reg_u64(&cmd_buffer->batch,
1311 CS_GPR(0), &pool->bo, slot_offset);
1312 emit_load_alu_reg_u64(&cmd_buffer->batch,
1313 CS_GPR(1), &pool->bo, slot_offset + 8);
1315 /* FIXME: We need to clamp the result for 32 bit. */
1317 uint32_t *dw = anv_batch_emitn(&cmd_buffer->batch, 5, GENX(MI_MATH));
1318 dw[1] = alu(OPCODE_LOAD, OPERAND_SRCA, OPERAND_R1);
1319 dw[2] = alu(OPCODE_LOAD, OPERAND_SRCB, OPERAND_R0);
1320 dw[3] = alu(OPCODE_SUB, 0, 0);
1321 dw[4] = alu(OPCODE_STORE, OPERAND_R2, OPERAND_ACCU);
1324 case VK_QUERY_TYPE_TIMESTAMP:
1325 emit_load_alu_reg_u64(&cmd_buffer->batch,
1326 CS_GPR(2), &pool->bo, slot_offset);
1330 unreachable("unhandled query type");
1333 store_query_result(&cmd_buffer->batch,
1334 CS_GPR(2), buffer->bo, dst_offset, flags);
1336 if (flags & VK_QUERY_RESULT_WITH_AVAILABILITY_BIT) {
1337 emit_load_alu_reg_u64(&cmd_buffer->batch, CS_GPR(0),
1338 &pool->bo, slot_offset + 16);
1339 if (flags & VK_QUERY_RESULT_64_BIT)
1340 store_query_result(&cmd_buffer->batch,
1341 CS_GPR(0), buffer->bo, dst_offset + 8, flags);
1343 store_query_result(&cmd_buffer->batch,
1344 CS_GPR(0), buffer->bo, dst_offset + 4, flags);
1347 dst_offset += destStride;