2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_shader.h"
34 #include "vk_format.h"
35 #include "radv_debug.h"
36 #include "radv_meta.h"
41 RADV_PREFETCH_VBO_DESCRIPTORS = (1 << 0),
42 RADV_PREFETCH_VS = (1 << 1),
43 RADV_PREFETCH_TCS = (1 << 2),
44 RADV_PREFETCH_TES = (1 << 3),
45 RADV_PREFETCH_GS = (1 << 4),
46 RADV_PREFETCH_PS = (1 << 5),
47 RADV_PREFETCH_SHADERS = (RADV_PREFETCH_VS |
54 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
55 struct radv_image *image,
56 VkImageLayout src_layout,
57 VkImageLayout dst_layout,
60 const VkImageSubresourceRange *range,
61 VkImageAspectFlags pending_clears);
63 const struct radv_dynamic_state default_dynamic_state = {
76 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
81 .stencil_compare_mask = {
85 .stencil_write_mask = {
89 .stencil_reference = {
96 radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
97 const struct radv_dynamic_state *src)
99 struct radv_dynamic_state *dest = &cmd_buffer->state.dynamic;
100 uint32_t copy_mask = src->mask;
101 uint32_t dest_mask = 0;
103 /* Make sure to copy the number of viewports/scissors because they can
104 * only be specified at pipeline creation time.
106 dest->viewport.count = src->viewport.count;
107 dest->scissor.count = src->scissor.count;
108 dest->discard_rectangle.count = src->discard_rectangle.count;
110 if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
111 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
112 src->viewport.count * sizeof(VkViewport))) {
113 typed_memcpy(dest->viewport.viewports,
114 src->viewport.viewports,
115 src->viewport.count);
116 dest_mask |= RADV_DYNAMIC_VIEWPORT;
120 if (copy_mask & RADV_DYNAMIC_SCISSOR) {
121 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
122 src->scissor.count * sizeof(VkRect2D))) {
123 typed_memcpy(dest->scissor.scissors,
124 src->scissor.scissors, src->scissor.count);
125 dest_mask |= RADV_DYNAMIC_SCISSOR;
129 if (copy_mask & RADV_DYNAMIC_LINE_WIDTH) {
130 if (dest->line_width != src->line_width) {
131 dest->line_width = src->line_width;
132 dest_mask |= RADV_DYNAMIC_LINE_WIDTH;
136 if (copy_mask & RADV_DYNAMIC_DEPTH_BIAS) {
137 if (memcmp(&dest->depth_bias, &src->depth_bias,
138 sizeof(src->depth_bias))) {
139 dest->depth_bias = src->depth_bias;
140 dest_mask |= RADV_DYNAMIC_DEPTH_BIAS;
144 if (copy_mask & RADV_DYNAMIC_BLEND_CONSTANTS) {
145 if (memcmp(&dest->blend_constants, &src->blend_constants,
146 sizeof(src->blend_constants))) {
147 typed_memcpy(dest->blend_constants,
148 src->blend_constants, 4);
149 dest_mask |= RADV_DYNAMIC_BLEND_CONSTANTS;
153 if (copy_mask & RADV_DYNAMIC_DEPTH_BOUNDS) {
154 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
155 sizeof(src->depth_bounds))) {
156 dest->depth_bounds = src->depth_bounds;
157 dest_mask |= RADV_DYNAMIC_DEPTH_BOUNDS;
161 if (copy_mask & RADV_DYNAMIC_STENCIL_COMPARE_MASK) {
162 if (memcmp(&dest->stencil_compare_mask,
163 &src->stencil_compare_mask,
164 sizeof(src->stencil_compare_mask))) {
165 dest->stencil_compare_mask = src->stencil_compare_mask;
166 dest_mask |= RADV_DYNAMIC_STENCIL_COMPARE_MASK;
170 if (copy_mask & RADV_DYNAMIC_STENCIL_WRITE_MASK) {
171 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
172 sizeof(src->stencil_write_mask))) {
173 dest->stencil_write_mask = src->stencil_write_mask;
174 dest_mask |= RADV_DYNAMIC_STENCIL_WRITE_MASK;
178 if (copy_mask & RADV_DYNAMIC_STENCIL_REFERENCE) {
179 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
180 sizeof(src->stencil_reference))) {
181 dest->stencil_reference = src->stencil_reference;
182 dest_mask |= RADV_DYNAMIC_STENCIL_REFERENCE;
186 if (copy_mask & RADV_DYNAMIC_DISCARD_RECTANGLE) {
187 if (memcmp(&dest->discard_rectangle.rectangles, &src->discard_rectangle.rectangles,
188 src->discard_rectangle.count * sizeof(VkRect2D))) {
189 typed_memcpy(dest->discard_rectangle.rectangles,
190 src->discard_rectangle.rectangles,
191 src->discard_rectangle.count);
192 dest_mask |= RADV_DYNAMIC_DISCARD_RECTANGLE;
196 cmd_buffer->state.dirty |= dest_mask;
199 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
201 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
202 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
205 enum ring_type radv_queue_family_to_ring(int f) {
207 case RADV_QUEUE_GENERAL:
209 case RADV_QUEUE_COMPUTE:
211 case RADV_QUEUE_TRANSFER:
214 unreachable("Unknown queue family");
218 static VkResult radv_create_cmd_buffer(
219 struct radv_device * device,
220 struct radv_cmd_pool * pool,
221 VkCommandBufferLevel level,
222 VkCommandBuffer* pCommandBuffer)
224 struct radv_cmd_buffer *cmd_buffer;
226 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
227 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
228 if (cmd_buffer == NULL)
229 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
231 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
232 cmd_buffer->device = device;
233 cmd_buffer->pool = pool;
234 cmd_buffer->level = level;
237 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
238 cmd_buffer->queue_family_index = pool->queue_family_index;
241 /* Init the pool_link so we can safely call list_del when we destroy
244 list_inithead(&cmd_buffer->pool_link);
245 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
248 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
250 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
251 if (!cmd_buffer->cs) {
252 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
253 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
256 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
258 list_inithead(&cmd_buffer->upload.list);
264 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
266 list_del(&cmd_buffer->pool_link);
268 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
269 &cmd_buffer->upload.list, list) {
270 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
275 if (cmd_buffer->upload.upload_bo)
276 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
277 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
279 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
280 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
282 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
286 radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
289 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
291 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
292 &cmd_buffer->upload.list, list) {
293 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
298 cmd_buffer->push_constant_stages = 0;
299 cmd_buffer->scratch_size_needed = 0;
300 cmd_buffer->compute_scratch_size_needed = 0;
301 cmd_buffer->esgs_ring_size_needed = 0;
302 cmd_buffer->gsvs_ring_size_needed = 0;
303 cmd_buffer->tess_rings_needed = false;
304 cmd_buffer->sample_positions_needed = false;
306 if (cmd_buffer->upload.upload_bo)
307 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
308 cmd_buffer->upload.upload_bo);
309 cmd_buffer->upload.offset = 0;
311 cmd_buffer->record_result = VK_SUCCESS;
313 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
314 cmd_buffer->descriptors[i].dirty = 0;
315 cmd_buffer->descriptors[i].valid = 0;
316 cmd_buffer->descriptors[i].push_dirty = false;
319 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
320 unsigned num_db = cmd_buffer->device->physical_device->rad_info.num_render_backends;
321 unsigned eop_bug_offset;
324 radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0,
325 &cmd_buffer->gfx9_fence_offset,
327 cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
329 /* Allocate a buffer for the EOP bug on GFX9. */
330 radv_cmd_buffer_upload_alloc(cmd_buffer, 16 * num_db, 0,
331 &eop_bug_offset, &fence_ptr);
332 cmd_buffer->gfx9_eop_bug_va =
333 radv_buffer_get_va(cmd_buffer->upload.upload_bo);
334 cmd_buffer->gfx9_eop_bug_va += eop_bug_offset;
337 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
339 return cmd_buffer->record_result;
343 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
347 struct radeon_winsys_bo *bo;
348 struct radv_cmd_buffer_upload *upload;
349 struct radv_device *device = cmd_buffer->device;
351 new_size = MAX2(min_needed, 16 * 1024);
352 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
354 bo = device->ws->buffer_create(device->ws,
357 RADEON_FLAG_CPU_ACCESS|
358 RADEON_FLAG_NO_INTERPROCESS_SHARING |
362 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
366 radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo);
367 if (cmd_buffer->upload.upload_bo) {
368 upload = malloc(sizeof(*upload));
371 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
372 device->ws->buffer_destroy(bo);
376 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
377 list_add(&upload->list, &cmd_buffer->upload.list);
380 cmd_buffer->upload.upload_bo = bo;
381 cmd_buffer->upload.size = new_size;
382 cmd_buffer->upload.offset = 0;
383 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
385 if (!cmd_buffer->upload.map) {
386 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
394 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
397 unsigned *out_offset,
400 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
401 if (offset + size > cmd_buffer->upload.size) {
402 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
407 *out_offset = offset;
408 *ptr = cmd_buffer->upload.map + offset;
410 cmd_buffer->upload.offset = offset + size;
415 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
416 unsigned size, unsigned alignment,
417 const void *data, unsigned *out_offset)
421 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
422 out_offset, (void **)&ptr))
426 memcpy(ptr, data, size);
432 radv_emit_write_data_packet(struct radv_cmd_buffer *cmd_buffer, uint64_t va,
433 unsigned count, const uint32_t *data)
435 struct radeon_cmdbuf *cs = cmd_buffer->cs;
437 radeon_check_space(cmd_buffer->device->ws, cs, 4 + count);
439 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
440 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
441 S_370_WR_CONFIRM(1) |
442 S_370_ENGINE_SEL(V_370_ME));
444 radeon_emit(cs, va >> 32);
445 radeon_emit_array(cs, data, count);
448 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
450 struct radv_device *device = cmd_buffer->device;
451 struct radeon_cmdbuf *cs = cmd_buffer->cs;
454 va = radv_buffer_get_va(device->trace_bo);
455 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
458 ++cmd_buffer->state.trace_id;
459 radv_emit_write_data_packet(cmd_buffer, va, 1,
460 &cmd_buffer->state.trace_id);
462 radeon_check_space(cmd_buffer->device->ws, cs, 2);
464 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
465 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
469 radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
470 enum radv_cmd_flush_bits flags)
472 if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
473 uint32_t *ptr = NULL;
476 assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
477 RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
479 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
480 va = radv_buffer_get_va(cmd_buffer->gfx9_fence_bo) +
481 cmd_buffer->gfx9_fence_offset;
482 ptr = &cmd_buffer->gfx9_fence_idx;
485 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 4);
487 /* Force wait for graphics or compute engines to be idle. */
488 si_cs_emit_cache_flush(cmd_buffer->cs,
489 cmd_buffer->device->physical_device->rad_info.chip_class,
491 radv_cmd_buffer_uses_mec(cmd_buffer),
492 flags, cmd_buffer->gfx9_eop_bug_va);
495 if (unlikely(cmd_buffer->device->trace_bo))
496 radv_cmd_buffer_trace_emit(cmd_buffer);
500 radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
501 struct radv_pipeline *pipeline, enum ring_type ring)
503 struct radv_device *device = cmd_buffer->device;
507 va = radv_buffer_get_va(device->trace_bo);
517 assert(!"invalid ring type");
520 data[0] = (uintptr_t)pipeline;
521 data[1] = (uintptr_t)pipeline >> 32;
523 radv_emit_write_data_packet(cmd_buffer, va, 2, data);
526 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
527 VkPipelineBindPoint bind_point,
528 struct radv_descriptor_set *set,
531 struct radv_descriptor_state *descriptors_state =
532 radv_get_descriptors_state(cmd_buffer, bind_point);
534 descriptors_state->sets[idx] = set;
536 descriptors_state->valid |= (1u << idx); /* active descriptors */
537 descriptors_state->dirty |= (1u << idx);
541 radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer,
542 VkPipelineBindPoint bind_point)
544 struct radv_descriptor_state *descriptors_state =
545 radv_get_descriptors_state(cmd_buffer, bind_point);
546 struct radv_device *device = cmd_buffer->device;
547 uint32_t data[MAX_SETS * 2] = {};
550 va = radv_buffer_get_va(device->trace_bo) + 24;
552 for_each_bit(i, descriptors_state->valid) {
553 struct radv_descriptor_set *set = descriptors_state->sets[i];
554 data[i * 2] = (uintptr_t)set;
555 data[i * 2 + 1] = (uintptr_t)set >> 32;
558 radv_emit_write_data_packet(cmd_buffer, va, MAX_SETS * 2, data);
561 struct radv_userdata_info *
562 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
563 gl_shader_stage stage,
566 struct radv_shader_variant *shader = radv_get_shader(pipeline, stage);
567 return &shader->info.user_sgprs_locs.shader_data[idx];
571 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
572 struct radv_pipeline *pipeline,
573 gl_shader_stage stage,
574 int idx, uint64_t va)
576 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
577 uint32_t base_reg = pipeline->user_data_0[stage];
578 if (loc->sgpr_idx == -1)
581 assert(loc->num_sgprs == (HAVE_32BIT_POINTERS ? 1 : 2));
582 assert(!loc->indirect);
584 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
585 base_reg + loc->sgpr_idx * 4, va, false);
589 radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
590 struct radv_pipeline *pipeline,
591 struct radv_descriptor_state *descriptors_state,
592 gl_shader_stage stage)
594 struct radv_device *device = cmd_buffer->device;
595 struct radeon_cmdbuf *cs = cmd_buffer->cs;
596 uint32_t sh_base = pipeline->user_data_0[stage];
597 struct radv_userdata_locations *locs =
598 &pipeline->shaders[stage]->info.user_sgprs_locs;
599 unsigned mask = locs->descriptor_sets_enabled;
601 mask &= descriptors_state->dirty & descriptors_state->valid;
606 u_bit_scan_consecutive_range(&mask, &start, &count);
608 struct radv_userdata_info *loc = &locs->descriptor_sets[start];
609 unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
611 radv_emit_shader_pointer_head(cs, sh_offset, count,
612 HAVE_32BIT_POINTERS);
613 for (int i = 0; i < count; i++) {
614 struct radv_descriptor_set *set =
615 descriptors_state->sets[start + i];
617 radv_emit_shader_pointer_body(device, cs, set->va,
618 HAVE_32BIT_POINTERS);
624 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
625 struct radv_pipeline *pipeline)
627 int num_samples = pipeline->graphics.ms.num_samples;
628 struct radv_multisample_state *ms = &pipeline->graphics.ms;
629 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
631 if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions)
632 cmd_buffer->sample_positions_needed = true;
634 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
637 radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
638 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
639 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
641 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
643 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
645 /* GFX9: Flush DFSM when the AA mode changes. */
646 if (cmd_buffer->device->dfsm_allowed) {
647 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
648 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
653 radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
654 struct radv_shader_variant *shader)
661 va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
663 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
667 radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer,
668 struct radv_pipeline *pipeline,
669 bool vertex_stage_only)
671 struct radv_cmd_state *state = &cmd_buffer->state;
672 uint32_t mask = state->prefetch_L2_mask;
674 if (vertex_stage_only) {
675 /* Fast prefetch path for starting draws as soon as possible.
677 mask = state->prefetch_L2_mask & (RADV_PREFETCH_VS |
678 RADV_PREFETCH_VBO_DESCRIPTORS);
681 if (mask & RADV_PREFETCH_VS)
682 radv_emit_shader_prefetch(cmd_buffer,
683 pipeline->shaders[MESA_SHADER_VERTEX]);
685 if (mask & RADV_PREFETCH_VBO_DESCRIPTORS)
686 si_cp_dma_prefetch(cmd_buffer, state->vb_va, state->vb_size);
688 if (mask & RADV_PREFETCH_TCS)
689 radv_emit_shader_prefetch(cmd_buffer,
690 pipeline->shaders[MESA_SHADER_TESS_CTRL]);
692 if (mask & RADV_PREFETCH_TES)
693 radv_emit_shader_prefetch(cmd_buffer,
694 pipeline->shaders[MESA_SHADER_TESS_EVAL]);
696 if (mask & RADV_PREFETCH_GS) {
697 radv_emit_shader_prefetch(cmd_buffer,
698 pipeline->shaders[MESA_SHADER_GEOMETRY]);
699 radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
702 if (mask & RADV_PREFETCH_PS)
703 radv_emit_shader_prefetch(cmd_buffer,
704 pipeline->shaders[MESA_SHADER_FRAGMENT]);
706 state->prefetch_L2_mask &= ~mask;
710 radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
712 if (!cmd_buffer->device->physical_device->rbplus_allowed)
715 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
716 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
717 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
719 unsigned sx_ps_downconvert = 0;
720 unsigned sx_blend_opt_epsilon = 0;
721 unsigned sx_blend_opt_control = 0;
723 for (unsigned i = 0; i < subpass->color_count; ++i) {
724 if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
725 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
726 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
730 int idx = subpass->color_attachments[i].attachment;
731 struct radv_color_buffer_info *cb = &framebuffer->attachments[idx].cb;
733 unsigned format = G_028C70_FORMAT(cb->cb_color_info);
734 unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info);
735 uint32_t spi_format = (pipeline->graphics.col_format >> (i * 4)) & 0xf;
736 uint32_t colormask = (pipeline->graphics.cb_target_mask >> (i * 4)) & 0xf;
738 bool has_alpha, has_rgb;
740 /* Set if RGB and A are present. */
741 has_alpha = !G_028C74_FORCE_DST_ALPHA_1(cb->cb_color_attrib);
743 if (format == V_028C70_COLOR_8 ||
744 format == V_028C70_COLOR_16 ||
745 format == V_028C70_COLOR_32)
746 has_rgb = !has_alpha;
750 /* Check the colormask and export format. */
751 if (!(colormask & 0x7))
753 if (!(colormask & 0x8))
756 if (spi_format == V_028714_SPI_SHADER_ZERO) {
761 /* Disable value checking for disabled channels. */
763 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
765 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
767 /* Enable down-conversion for 32bpp and smaller formats. */
769 case V_028C70_COLOR_8:
770 case V_028C70_COLOR_8_8:
771 case V_028C70_COLOR_8_8_8_8:
772 /* For 1 and 2-channel formats, use the superset thereof. */
773 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR ||
774 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
775 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
776 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_8_8_8_8 << (i * 4);
777 sx_blend_opt_epsilon |= V_028758_8BIT_FORMAT << (i * 4);
781 case V_028C70_COLOR_5_6_5:
782 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
783 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_5_6_5 << (i * 4);
784 sx_blend_opt_epsilon |= V_028758_6BIT_FORMAT << (i * 4);
788 case V_028C70_COLOR_1_5_5_5:
789 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
790 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_1_5_5_5 << (i * 4);
791 sx_blend_opt_epsilon |= V_028758_5BIT_FORMAT << (i * 4);
795 case V_028C70_COLOR_4_4_4_4:
796 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
797 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_4_4_4_4 << (i * 4);
798 sx_blend_opt_epsilon |= V_028758_4BIT_FORMAT << (i * 4);
802 case V_028C70_COLOR_32:
803 if (swap == V_028C70_SWAP_STD &&
804 spi_format == V_028714_SPI_SHADER_32_R)
805 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
806 else if (swap == V_028C70_SWAP_ALT_REV &&
807 spi_format == V_028714_SPI_SHADER_32_AR)
808 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_A << (i * 4);
811 case V_028C70_COLOR_16:
812 case V_028C70_COLOR_16_16:
813 /* For 1-channel formats, use the superset thereof. */
814 if (spi_format == V_028714_SPI_SHADER_UNORM16_ABGR ||
815 spi_format == V_028714_SPI_SHADER_SNORM16_ABGR ||
816 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
817 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
818 if (swap == V_028C70_SWAP_STD ||
819 swap == V_028C70_SWAP_STD_REV)
820 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_GR << (i * 4);
822 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_AR << (i * 4);
826 case V_028C70_COLOR_10_11_11:
827 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
828 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_10_11_11 << (i * 4);
829 sx_blend_opt_epsilon |= V_028758_11BIT_FORMAT << (i * 4);
833 case V_028C70_COLOR_2_10_10_10:
834 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
835 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_2_10_10_10 << (i * 4);
836 sx_blend_opt_epsilon |= V_028758_10BIT_FORMAT << (i * 4);
842 for (unsigned i = subpass->color_count; i < 8; ++i) {
843 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
844 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
846 radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
847 radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
848 radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
849 radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
853 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
855 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
857 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
860 radv_update_multisample_state(cmd_buffer, pipeline);
862 cmd_buffer->scratch_size_needed =
863 MAX2(cmd_buffer->scratch_size_needed,
864 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
866 if (!cmd_buffer->state.emitted_pipeline ||
867 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
868 pipeline->graphics.can_use_guardband)
869 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
871 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
873 for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
874 if (!pipeline->shaders[i])
877 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
878 pipeline->shaders[i]->bo);
881 if (radv_pipeline_has_gs(pipeline))
882 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
883 pipeline->gs_copy_shader->bo);
885 if (unlikely(cmd_buffer->device->trace_bo))
886 radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
888 cmd_buffer->state.emitted_pipeline = pipeline;
890 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_PIPELINE;
894 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
896 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
897 cmd_buffer->state.dynamic.viewport.viewports);
901 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
903 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
905 si_write_scissors(cmd_buffer->cs, 0, count,
906 cmd_buffer->state.dynamic.scissor.scissors,
907 cmd_buffer->state.dynamic.viewport.viewports,
908 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
912 radv_emit_discard_rectangle(struct radv_cmd_buffer *cmd_buffer)
914 if (!cmd_buffer->state.dynamic.discard_rectangle.count)
917 radeon_set_context_reg_seq(cmd_buffer->cs, R_028210_PA_SC_CLIPRECT_0_TL,
918 cmd_buffer->state.dynamic.discard_rectangle.count * 2);
919 for (unsigned i = 0; i < cmd_buffer->state.dynamic.discard_rectangle.count; ++i) {
920 VkRect2D rect = cmd_buffer->state.dynamic.discard_rectangle.rectangles[i];
921 radeon_emit(cmd_buffer->cs, S_028210_TL_X(rect.offset.x) | S_028210_TL_Y(rect.offset.y));
922 radeon_emit(cmd_buffer->cs, S_028214_BR_X(rect.offset.x + rect.extent.width) |
923 S_028214_BR_Y(rect.offset.y + rect.extent.height));
928 radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
930 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
932 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
933 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
937 radv_emit_blend_constants(struct radv_cmd_buffer *cmd_buffer)
939 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
941 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
942 radeon_emit_array(cmd_buffer->cs, (uint32_t *)d->blend_constants, 4);
946 radv_emit_stencil(struct radv_cmd_buffer *cmd_buffer)
948 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
950 radeon_set_context_reg_seq(cmd_buffer->cs,
951 R_028430_DB_STENCILREFMASK, 2);
952 radeon_emit(cmd_buffer->cs,
953 S_028430_STENCILTESTVAL(d->stencil_reference.front) |
954 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
955 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
956 S_028430_STENCILOPVAL(1));
957 radeon_emit(cmd_buffer->cs,
958 S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
959 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
960 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
961 S_028434_STENCILOPVAL_BF(1));
965 radv_emit_depth_bounds(struct radv_cmd_buffer *cmd_buffer)
967 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
969 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN,
970 fui(d->depth_bounds.min));
971 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX,
972 fui(d->depth_bounds.max));
976 radv_emit_depth_bias(struct radv_cmd_buffer *cmd_buffer)
978 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
979 unsigned slope = fui(d->depth_bias.slope * 16.0f);
980 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
983 radeon_set_context_reg_seq(cmd_buffer->cs,
984 R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
985 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
986 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
987 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
988 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
989 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
993 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
995 struct radv_attachment_info *att,
996 struct radv_image *image,
997 VkImageLayout layout)
999 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
1000 struct radv_color_buffer_info *cb = &att->cb;
1001 uint32_t cb_color_info = cb->cb_color_info;
1003 if (!radv_layout_dcc_compressed(image, layout,
1004 radv_image_queue_family_mask(image,
1005 cmd_buffer->queue_family_index,
1006 cmd_buffer->queue_family_index))) {
1007 cb_color_info &= C_028C70_DCC_ENABLE;
1010 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1011 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1012 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1013 radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32));
1014 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
1015 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1016 radeon_emit(cmd_buffer->cs, cb_color_info);
1017 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1018 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1019 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1020 radeon_emit(cmd_buffer->cs, S_028C80_BASE_256B(cb->cb_color_cmask >> 32));
1021 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1022 radeon_emit(cmd_buffer->cs, S_028C88_BASE_256B(cb->cb_color_fmask >> 32));
1024 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
1025 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1026 radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
1028 radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
1029 S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
1031 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1032 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1033 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
1034 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
1035 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1036 radeon_emit(cmd_buffer->cs, cb_color_info);
1037 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1038 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1039 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1040 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
1041 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1042 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
1044 if (is_vi) { /* DCC BASE */
1045 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
1051 radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
1052 struct radv_ds_buffer_info *ds,
1053 struct radv_image *image, VkImageLayout layout,
1054 bool requires_cond_write)
1056 uint32_t db_z_info = ds->db_z_info;
1057 uint32_t db_z_info_reg;
1059 if (!radv_image_is_tc_compat_htile(image))
1062 if (!radv_layout_has_htile(image, layout,
1063 radv_image_queue_family_mask(image,
1064 cmd_buffer->queue_family_index,
1065 cmd_buffer->queue_family_index))) {
1066 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1069 db_z_info &= C_028040_ZRANGE_PRECISION;
1071 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1072 db_z_info_reg = R_028038_DB_Z_INFO;
1074 db_z_info_reg = R_028040_DB_Z_INFO;
1077 /* When we don't know the last fast clear value we need to emit a
1078 * conditional packet, otherwise we can update DB_Z_INFO directly.
1080 if (requires_cond_write) {
1081 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_WRITE, 7, 0));
1083 const uint32_t write_space = 0 << 8; /* register */
1084 const uint32_t poll_space = 1 << 4; /* memory */
1085 const uint32_t function = 3 << 0; /* equal to the reference */
1086 const uint32_t options = write_space | poll_space | function;
1087 radeon_emit(cmd_buffer->cs, options);
1089 /* poll address - location of the depth clear value */
1090 uint64_t va = radv_buffer_get_va(image->bo);
1091 va += image->offset + image->clear_value_offset;
1093 /* In presence of stencil format, we have to adjust the base
1094 * address because the first value is the stencil clear value.
1096 if (vk_format_is_stencil(image->vk_format))
1099 radeon_emit(cmd_buffer->cs, va);
1100 radeon_emit(cmd_buffer->cs, va >> 32);
1102 radeon_emit(cmd_buffer->cs, fui(0.0f)); /* reference value */
1103 radeon_emit(cmd_buffer->cs, (uint32_t)-1); /* comparison mask */
1104 radeon_emit(cmd_buffer->cs, db_z_info_reg >> 2); /* write address low */
1105 radeon_emit(cmd_buffer->cs, 0u); /* write address high */
1106 radeon_emit(cmd_buffer->cs, db_z_info);
1108 radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
1113 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
1114 struct radv_ds_buffer_info *ds,
1115 struct radv_image *image,
1116 VkImageLayout layout)
1118 uint32_t db_z_info = ds->db_z_info;
1119 uint32_t db_stencil_info = ds->db_stencil_info;
1121 if (!radv_layout_has_htile(image, layout,
1122 radv_image_queue_family_mask(image,
1123 cmd_buffer->queue_family_index,
1124 cmd_buffer->queue_family_index))) {
1125 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1126 db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
1129 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
1130 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
1133 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1134 radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
1135 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
1136 radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32));
1137 radeon_emit(cmd_buffer->cs, ds->db_depth_size);
1139 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
1140 radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
1141 radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
1142 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
1143 radeon_emit(cmd_buffer->cs, S_028044_BASE_HI(ds->db_z_read_base >> 32)); /* DB_Z_READ_BASE_HI */
1144 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
1145 radeon_emit(cmd_buffer->cs, S_02804C_BASE_HI(ds->db_stencil_read_base >> 32)); /* DB_STENCIL_READ_BASE_HI */
1146 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
1147 radeon_emit(cmd_buffer->cs, S_028054_BASE_HI(ds->db_z_write_base >> 32)); /* DB_Z_WRITE_BASE_HI */
1148 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
1149 radeon_emit(cmd_buffer->cs, S_02805C_BASE_HI(ds->db_stencil_write_base >> 32)); /* DB_STENCIL_WRITE_BASE_HI */
1151 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
1152 radeon_emit(cmd_buffer->cs, ds->db_z_info2);
1153 radeon_emit(cmd_buffer->cs, ds->db_stencil_info2);
1155 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1157 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
1158 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
1159 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
1160 radeon_emit(cmd_buffer->cs, db_stencil_info); /* R_028044_DB_STENCIL_INFO */
1161 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
1162 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
1163 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
1164 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
1165 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
1166 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
1170 /* Update the ZRANGE_PRECISION value for the TC-compat bug. */
1171 radv_update_zrange_precision(cmd_buffer, ds, image, layout, true);
1173 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
1174 ds->pa_su_poly_offset_db_fmt_cntl);
1178 * Update the fast clear depth/stencil values if the image is bound as a
1179 * depth/stencil buffer.
1182 radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
1183 struct radv_image *image,
1184 VkClearDepthStencilValue ds_clear_value,
1185 VkImageAspectFlags aspects)
1187 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1188 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1189 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1190 struct radv_attachment_info *att;
1193 if (!framebuffer || !subpass)
1196 att_idx = subpass->depth_stencil_attachment.attachment;
1197 if (att_idx == VK_ATTACHMENT_UNUSED)
1200 att = &framebuffer->attachments[att_idx];
1201 if (att->attachment->image != image)
1204 radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
1205 radeon_emit(cs, ds_clear_value.stencil);
1206 radeon_emit(cs, fui(ds_clear_value.depth));
1208 /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
1209 * only needed when clearing Z to 0.0.
1211 if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1212 ds_clear_value.depth == 0.0) {
1213 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1215 radv_update_zrange_precision(cmd_buffer, &att->ds, image,
1221 * Set the clear depth/stencil values to the image's metadata.
1224 radv_set_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1225 struct radv_image *image,
1226 VkClearDepthStencilValue ds_clear_value,
1227 VkImageAspectFlags aspects)
1229 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1230 uint64_t va = radv_buffer_get_va(image->bo);
1231 unsigned reg_offset = 0, reg_count = 0;
1233 va += image->offset + image->clear_value_offset;
1235 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1241 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1244 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
1245 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1246 S_370_WR_CONFIRM(1) |
1247 S_370_ENGINE_SEL(V_370_PFP));
1248 radeon_emit(cs, va);
1249 radeon_emit(cs, va >> 32);
1250 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
1251 radeon_emit(cs, ds_clear_value.stencil);
1252 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1253 radeon_emit(cs, fui(ds_clear_value.depth));
1257 * Update the clear depth/stencil values for this image.
1260 radv_update_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1261 struct radv_image *image,
1262 VkClearDepthStencilValue ds_clear_value,
1263 VkImageAspectFlags aspects)
1265 assert(radv_image_has_htile(image));
1267 radv_set_ds_clear_metadata(cmd_buffer, image, ds_clear_value, aspects);
1269 radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value,
1274 * Load the clear depth/stencil values from the image's metadata.
1277 radv_load_ds_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1278 struct radv_image *image)
1280 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1281 VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
1282 uint64_t va = radv_buffer_get_va(image->bo);
1283 unsigned reg_offset = 0, reg_count = 0;
1285 va += image->offset + image->clear_value_offset;
1287 if (!radv_image_has_htile(image))
1290 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1296 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1299 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
1300 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1301 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1302 (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
1303 radeon_emit(cs, va);
1304 radeon_emit(cs, va >> 32);
1305 radeon_emit(cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
1308 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1313 * With DCC some colors don't require CMASK elimination before being
1314 * used as a texture. This sets a predicate value to determine if the
1315 * cmask eliminate is required.
1318 radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer,
1319 struct radv_image *image,
1322 uint64_t pred_val = value;
1323 uint64_t va = radv_buffer_get_va(image->bo);
1324 va += image->offset + image->dcc_pred_offset;
1326 assert(radv_image_has_dcc(image));
1328 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1329 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1330 S_370_WR_CONFIRM(1) |
1331 S_370_ENGINE_SEL(V_370_PFP));
1332 radeon_emit(cmd_buffer->cs, va);
1333 radeon_emit(cmd_buffer->cs, va >> 32);
1334 radeon_emit(cmd_buffer->cs, pred_val);
1335 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1339 * Update the fast clear color values if the image is bound as a color buffer.
1342 radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
1343 struct radv_image *image,
1345 uint32_t color_values[2])
1347 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1348 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1349 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1350 struct radv_attachment_info *att;
1353 if (!framebuffer || !subpass)
1356 att_idx = subpass->color_attachments[cb_idx].attachment;
1357 if (att_idx == VK_ATTACHMENT_UNUSED)
1360 att = &framebuffer->attachments[att_idx];
1361 if (att->attachment->image != image)
1364 radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
1365 radeon_emit(cs, color_values[0]);
1366 radeon_emit(cs, color_values[1]);
1370 * Set the clear color values to the image's metadata.
1373 radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1374 struct radv_image *image,
1375 uint32_t color_values[2])
1377 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1378 uint64_t va = radv_buffer_get_va(image->bo);
1380 va += image->offset + image->clear_value_offset;
1382 assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
1384 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1385 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1386 S_370_WR_CONFIRM(1) |
1387 S_370_ENGINE_SEL(V_370_PFP));
1388 radeon_emit(cs, va);
1389 radeon_emit(cs, va >> 32);
1390 radeon_emit(cs, color_values[0]);
1391 radeon_emit(cs, color_values[1]);
1395 * Update the clear color values for this image.
1398 radv_update_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1399 struct radv_image *image,
1401 uint32_t color_values[2])
1403 assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
1405 radv_set_color_clear_metadata(cmd_buffer, image, color_values);
1407 radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
1412 * Load the clear color values from the image's metadata.
1415 radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1416 struct radv_image *image,
1419 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1420 uint64_t va = radv_buffer_get_va(image->bo);
1422 va += image->offset + image->clear_value_offset;
1424 if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
1427 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
1429 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
1430 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1431 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1432 COPY_DATA_COUNT_SEL);
1433 radeon_emit(cs, va);
1434 radeon_emit(cs, va >> 32);
1435 radeon_emit(cs, reg >> 2);
1438 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1443 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
1446 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1447 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1449 /* this may happen for inherited secondary recording */
1453 for (i = 0; i < 8; ++i) {
1454 if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
1455 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1456 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1460 int idx = subpass->color_attachments[i].attachment;
1461 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1462 struct radv_image *image = att->attachment->image;
1463 VkImageLayout layout = subpass->color_attachments[i].layout;
1465 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
1467 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
1468 radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
1470 radv_load_color_clear_metadata(cmd_buffer, image, i);
1473 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1474 int idx = subpass->depth_stencil_attachment.attachment;
1475 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1476 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1477 struct radv_image *image = att->attachment->image;
1478 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo);
1479 MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
1480 cmd_buffer->queue_family_index,
1481 cmd_buffer->queue_family_index);
1482 /* We currently don't support writing decompressed HTILE */
1483 assert(radv_layout_has_htile(image, layout, queue_mask) ==
1484 radv_layout_is_htile_compressed(image, layout, queue_mask));
1486 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout);
1488 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) {
1489 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1490 cmd_buffer->state.offset_scale = att->ds.offset_scale;
1492 radv_load_ds_clear_metadata(cmd_buffer, image);
1494 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1495 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
1497 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
1499 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
1500 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
1502 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
1503 S_028208_BR_X(framebuffer->width) |
1504 S_028208_BR_Y(framebuffer->height));
1506 if (cmd_buffer->device->dfsm_allowed) {
1507 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1508 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
1511 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_FRAMEBUFFER;
1515 radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
1517 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1518 struct radv_cmd_state *state = &cmd_buffer->state;
1520 if (state->index_type != state->last_index_type) {
1521 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1522 radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
1523 2, state->index_type);
1525 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
1526 radeon_emit(cs, state->index_type);
1529 state->last_index_type = state->index_type;
1532 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
1533 radeon_emit(cs, state->index_va);
1534 radeon_emit(cs, state->index_va >> 32);
1536 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
1537 radeon_emit(cs, state->max_index_count);
1539 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
1542 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
1544 bool has_perfect_queries = cmd_buffer->state.perfect_occlusion_queries_enabled;
1545 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1546 uint32_t pa_sc_mode_cntl_1 =
1547 pipeline ? pipeline->graphics.ms.pa_sc_mode_cntl_1 : 0;
1548 uint32_t db_count_control;
1550 if(!cmd_buffer->state.active_occlusion_queries) {
1551 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1552 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
1553 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
1554 has_perfect_queries) {
1555 /* Re-enable out-of-order rasterization if the
1556 * bound pipeline supports it and if it's has
1557 * been disabled before starting any perfect
1558 * occlusion queries.
1560 radeon_set_context_reg(cmd_buffer->cs,
1561 R_028A4C_PA_SC_MODE_CNTL_1,
1565 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
1567 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1568 uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
1570 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1572 S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
1573 S_028004_SAMPLE_RATE(sample_rate) |
1574 S_028004_ZPASS_ENABLE(1) |
1575 S_028004_SLICE_EVEN_ENABLE(1) |
1576 S_028004_SLICE_ODD_ENABLE(1);
1578 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
1579 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
1580 has_perfect_queries) {
1581 /* If the bound pipeline has enabled
1582 * out-of-order rasterization, we should
1583 * disable it before starting any perfect
1584 * occlusion queries.
1586 pa_sc_mode_cntl_1 &= C_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE;
1588 radeon_set_context_reg(cmd_buffer->cs,
1589 R_028A4C_PA_SC_MODE_CNTL_1,
1593 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1594 S_028004_SAMPLE_RATE(sample_rate);
1598 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
1602 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
1604 uint32_t states = cmd_buffer->state.dirty & cmd_buffer->state.emitted_pipeline->graphics.needed_dynamic_state;
1606 if (states & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1607 radv_emit_viewport(cmd_buffer);
1609 if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
1610 !cmd_buffer->device->physical_device->has_scissor_bug)
1611 radv_emit_scissor(cmd_buffer);
1613 if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
1614 radv_emit_line_width(cmd_buffer);
1616 if (states & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
1617 radv_emit_blend_constants(cmd_buffer);
1619 if (states & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
1620 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
1621 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK))
1622 radv_emit_stencil(cmd_buffer);
1624 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)
1625 radv_emit_depth_bounds(cmd_buffer);
1627 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)
1628 radv_emit_depth_bias(cmd_buffer);
1630 if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE)
1631 radv_emit_discard_rectangle(cmd_buffer);
1633 cmd_buffer->state.dirty &= ~states;
1637 radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
1638 VkPipelineBindPoint bind_point)
1640 struct radv_descriptor_state *descriptors_state =
1641 radv_get_descriptors_state(cmd_buffer, bind_point);
1642 struct radv_descriptor_set *set = &descriptors_state->push_set.set;
1645 if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
1650 set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1651 set->va += bo_offset;
1655 radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
1656 VkPipelineBindPoint bind_point)
1658 struct radv_descriptor_state *descriptors_state =
1659 radv_get_descriptors_state(cmd_buffer, bind_point);
1660 uint32_t size = MAX_SETS * 2 * 4;
1664 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size,
1665 256, &offset, &ptr))
1668 for (unsigned i = 0; i < MAX_SETS; i++) {
1669 uint32_t *uptr = ((uint32_t *)ptr) + i * 2;
1670 uint64_t set_va = 0;
1671 struct radv_descriptor_set *set = descriptors_state->sets[i];
1672 if (descriptors_state->valid & (1u << i))
1674 uptr[0] = set_va & 0xffffffff;
1675 uptr[1] = set_va >> 32;
1678 uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1681 if (cmd_buffer->state.pipeline) {
1682 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX])
1683 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1684 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1686 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT])
1687 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_FRAGMENT,
1688 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1690 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
1691 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
1692 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1694 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1695 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_CTRL,
1696 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1698 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1699 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_EVAL,
1700 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1703 if (cmd_buffer->state.compute_pipeline)
1704 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.compute_pipeline, MESA_SHADER_COMPUTE,
1705 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1709 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
1710 VkShaderStageFlags stages)
1712 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
1713 VK_PIPELINE_BIND_POINT_COMPUTE :
1714 VK_PIPELINE_BIND_POINT_GRAPHICS;
1715 struct radv_descriptor_state *descriptors_state =
1716 radv_get_descriptors_state(cmd_buffer, bind_point);
1718 if (!descriptors_state->dirty)
1721 if (descriptors_state->push_dirty)
1722 radv_flush_push_descriptors(cmd_buffer, bind_point);
1724 if ((cmd_buffer->state.pipeline && cmd_buffer->state.pipeline->need_indirect_descriptor_sets) ||
1725 (cmd_buffer->state.compute_pipeline && cmd_buffer->state.compute_pipeline->need_indirect_descriptor_sets)) {
1726 radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
1729 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1731 MAX_SETS * MESA_SHADER_STAGES * 4);
1733 if (cmd_buffer->state.pipeline) {
1734 radv_foreach_stage(stage, stages) {
1735 if (!cmd_buffer->state.pipeline->shaders[stage])
1738 radv_emit_descriptor_pointers(cmd_buffer,
1739 cmd_buffer->state.pipeline,
1740 descriptors_state, stage);
1744 if (cmd_buffer->state.compute_pipeline &&
1745 (stages & VK_SHADER_STAGE_COMPUTE_BIT)) {
1746 radv_emit_descriptor_pointers(cmd_buffer,
1747 cmd_buffer->state.compute_pipeline,
1749 MESA_SHADER_COMPUTE);
1752 descriptors_state->dirty = 0;
1753 descriptors_state->push_dirty = false;
1755 assert(cmd_buffer->cs->cdw <= cdw_max);
1757 if (unlikely(cmd_buffer->device->trace_bo))
1758 radv_save_descriptors(cmd_buffer, bind_point);
1762 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
1763 VkShaderStageFlags stages)
1765 struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
1766 ? cmd_buffer->state.compute_pipeline
1767 : cmd_buffer->state.pipeline;
1768 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
1769 VK_PIPELINE_BIND_POINT_COMPUTE :
1770 VK_PIPELINE_BIND_POINT_GRAPHICS;
1771 struct radv_descriptor_state *descriptors_state =
1772 radv_get_descriptors_state(cmd_buffer, bind_point);
1773 struct radv_pipeline_layout *layout = pipeline->layout;
1774 struct radv_shader_variant *shader, *prev_shader;
1779 stages &= cmd_buffer->push_constant_stages;
1781 (!layout->push_constant_size && !layout->dynamic_offset_count))
1784 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
1785 16 * layout->dynamic_offset_count,
1786 256, &offset, &ptr))
1789 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
1790 memcpy((char*)ptr + layout->push_constant_size,
1791 descriptors_state->dynamic_buffers,
1792 16 * layout->dynamic_offset_count);
1794 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1797 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1798 cmd_buffer->cs, MESA_SHADER_STAGES * 4);
1801 radv_foreach_stage(stage, stages) {
1802 shader = radv_get_shader(pipeline, stage);
1804 /* Avoid redundantly emitting the address for merged stages. */
1805 if (shader && shader != prev_shader) {
1806 radv_emit_userdata_address(cmd_buffer, pipeline, stage,
1807 AC_UD_PUSH_CONSTANTS, va);
1809 prev_shader = shader;
1813 cmd_buffer->push_constant_stages &= ~stages;
1814 assert(cmd_buffer->cs->cdw <= cdw_max);
1818 radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
1819 bool pipeline_is_dirty)
1821 if ((pipeline_is_dirty ||
1822 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
1823 cmd_buffer->state.pipeline->vertex_elements.count &&
1824 radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
1825 struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
1829 uint32_t count = velems->count;
1832 /* allocate some descriptor state for vertex buffers */
1833 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
1834 &vb_offset, &vb_ptr))
1837 for (i = 0; i < count; i++) {
1838 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
1840 int vb = velems->binding[i];
1841 struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
1842 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
1844 va = radv_buffer_get_va(buffer->bo);
1846 offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i];
1847 va += offset + buffer->offset;
1849 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
1850 if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
1851 desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
1853 desc[2] = buffer->size - offset;
1854 desc[3] = velems->rsrc_word3[i];
1857 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1860 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1861 AC_UD_VS_VERTEX_BUFFERS, va);
1863 cmd_buffer->state.vb_va = va;
1864 cmd_buffer->state.vb_size = count * 16;
1865 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_VBO_DESCRIPTORS;
1867 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
1871 radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
1873 radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
1874 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
1875 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
1879 radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw,
1880 bool instanced_draw, bool indirect_draw,
1881 uint32_t draw_vertex_count)
1883 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
1884 struct radv_cmd_state *state = &cmd_buffer->state;
1885 struct radeon_cmdbuf *cs = cmd_buffer->cs;
1886 uint32_t ia_multi_vgt_param;
1887 int32_t primitive_reset_en;
1890 ia_multi_vgt_param =
1891 si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
1892 indirect_draw, draw_vertex_count);
1894 if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
1895 if (info->chip_class >= GFX9) {
1896 radeon_set_uconfig_reg_idx(cs,
1897 R_030960_IA_MULTI_VGT_PARAM,
1898 4, ia_multi_vgt_param);
1899 } else if (info->chip_class >= CIK) {
1900 radeon_set_context_reg_idx(cs,
1901 R_028AA8_IA_MULTI_VGT_PARAM,
1902 1, ia_multi_vgt_param);
1904 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM,
1905 ia_multi_vgt_param);
1907 state->last_ia_multi_vgt_param = ia_multi_vgt_param;
1910 /* Primitive restart. */
1911 primitive_reset_en =
1912 indexed_draw && state->pipeline->graphics.prim_restart_enable;
1914 if (primitive_reset_en != state->last_primitive_reset_en) {
1915 state->last_primitive_reset_en = primitive_reset_en;
1916 if (info->chip_class >= GFX9) {
1917 radeon_set_uconfig_reg(cs,
1918 R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
1919 primitive_reset_en);
1921 radeon_set_context_reg(cs,
1922 R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
1923 primitive_reset_en);
1927 if (primitive_reset_en) {
1928 uint32_t primitive_reset_index =
1929 state->index_type ? 0xffffffffu : 0xffffu;
1931 if (primitive_reset_index != state->last_primitive_reset_index) {
1932 radeon_set_context_reg(cs,
1933 R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
1934 primitive_reset_index);
1935 state->last_primitive_reset_index = primitive_reset_index;
1940 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
1941 VkPipelineStageFlags src_stage_mask)
1943 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
1944 VK_PIPELINE_STAGE_TRANSFER_BIT |
1945 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1946 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1947 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
1950 if (src_stage_mask & (VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
1951 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
1952 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
1953 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
1954 VK_PIPELINE_STAGE_TRANSFER_BIT |
1955 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1956 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
1957 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1958 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1959 } else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
1960 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
1961 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT |
1962 VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
1963 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
1964 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT)) {
1965 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
1969 static enum radv_cmd_flush_bits
1970 radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
1971 VkAccessFlags src_flags,
1972 struct radv_image *image)
1974 bool flush_CB_meta = true, flush_DB_meta = true;
1975 enum radv_cmd_flush_bits flush_bits = 0;
1979 if (!radv_image_has_CB_metadata(image))
1980 flush_CB_meta = false;
1981 if (!radv_image_has_htile(image))
1982 flush_DB_meta = false;
1985 for_each_bit(b, src_flags) {
1986 switch ((VkAccessFlagBits)(1 << b)) {
1987 case VK_ACCESS_SHADER_WRITE_BIT:
1988 flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
1990 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
1991 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
1993 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
1995 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
1996 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
1998 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2000 case VK_ACCESS_TRANSFER_WRITE_BIT:
2001 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
2002 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
2003 RADV_CMD_FLAG_INV_GLOBAL_L2;
2006 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2008 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2017 static enum radv_cmd_flush_bits
2018 radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
2019 VkAccessFlags dst_flags,
2020 struct radv_image *image)
2022 bool flush_CB_meta = true, flush_DB_meta = true;
2023 enum radv_cmd_flush_bits flush_bits = 0;
2024 bool flush_CB = true, flush_DB = true;
2028 if (!(image->usage & VK_IMAGE_USAGE_STORAGE_BIT)) {
2033 if (!radv_image_has_CB_metadata(image))
2034 flush_CB_meta = false;
2035 if (!radv_image_has_htile(image))
2036 flush_DB_meta = false;
2039 for_each_bit(b, dst_flags) {
2040 switch ((VkAccessFlagBits)(1 << b)) {
2041 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
2042 case VK_ACCESS_INDEX_READ_BIT:
2044 case VK_ACCESS_UNIFORM_READ_BIT:
2045 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
2047 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
2048 case VK_ACCESS_SHADER_READ_BIT:
2049 case VK_ACCESS_TRANSFER_READ_BIT:
2050 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
2051 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 |
2052 RADV_CMD_FLAG_INV_GLOBAL_L2;
2054 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
2056 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
2058 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
2060 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
2062 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
2064 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
2073 void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer,
2074 const struct radv_subpass_barrier *barrier)
2076 cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask,
2078 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
2079 cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
2083 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
2084 struct radv_subpass_attachment att)
2086 unsigned idx = att.attachment;
2087 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
2088 VkImageSubresourceRange range;
2089 range.aspectMask = 0;
2090 range.baseMipLevel = view->base_mip;
2091 range.levelCount = 1;
2092 range.baseArrayLayer = view->base_layer;
2093 range.layerCount = cmd_buffer->state.framebuffer->layers;
2095 radv_handle_image_transition(cmd_buffer,
2097 cmd_buffer->state.attachments[idx].current_layout,
2098 att.layout, 0, 0, &range,
2099 cmd_buffer->state.attachments[idx].pending_clear_aspects);
2101 cmd_buffer->state.attachments[idx].current_layout = att.layout;
2107 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
2108 const struct radv_subpass *subpass, bool transitions)
2111 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
2113 for (unsigned i = 0; i < subpass->color_count; ++i) {
2114 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
2115 radv_handle_subpass_image_transition(cmd_buffer,
2116 subpass->color_attachments[i]);
2119 for (unsigned i = 0; i < subpass->input_count; ++i) {
2120 radv_handle_subpass_image_transition(cmd_buffer,
2121 subpass->input_attachments[i]);
2124 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
2125 radv_handle_subpass_image_transition(cmd_buffer,
2126 subpass->depth_stencil_attachment);
2130 cmd_buffer->state.subpass = subpass;
2132 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
2136 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
2137 struct radv_render_pass *pass,
2138 const VkRenderPassBeginInfo *info)
2140 struct radv_cmd_state *state = &cmd_buffer->state;
2142 if (pass->attachment_count == 0) {
2143 state->attachments = NULL;
2147 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
2148 pass->attachment_count *
2149 sizeof(state->attachments[0]),
2150 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2151 if (state->attachments == NULL) {
2152 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2153 return cmd_buffer->record_result;
2156 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
2157 struct radv_render_pass_attachment *att = &pass->attachments[i];
2158 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
2159 VkImageAspectFlags clear_aspects = 0;
2161 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
2162 /* color attachment */
2163 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2164 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
2167 /* depthstencil attachment */
2168 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
2169 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2170 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
2171 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2172 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
2173 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2175 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2176 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2177 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2181 state->attachments[i].pending_clear_aspects = clear_aspects;
2182 state->attachments[i].cleared_views = 0;
2183 if (clear_aspects && info) {
2184 assert(info->clearValueCount > i);
2185 state->attachments[i].clear_value = info->pClearValues[i];
2188 state->attachments[i].current_layout = att->initial_layout;
2194 VkResult radv_AllocateCommandBuffers(
2196 const VkCommandBufferAllocateInfo *pAllocateInfo,
2197 VkCommandBuffer *pCommandBuffers)
2199 RADV_FROM_HANDLE(radv_device, device, _device);
2200 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
2202 VkResult result = VK_SUCCESS;
2205 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
2207 if (!list_empty(&pool->free_cmd_buffers)) {
2208 struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
2210 list_del(&cmd_buffer->pool_link);
2211 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
2213 result = radv_reset_cmd_buffer(cmd_buffer);
2214 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
2215 cmd_buffer->level = pAllocateInfo->level;
2217 pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
2219 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
2220 &pCommandBuffers[i]);
2222 if (result != VK_SUCCESS)
2226 if (result != VK_SUCCESS) {
2227 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
2228 i, pCommandBuffers);
2230 /* From the Vulkan 1.0.66 spec:
2232 * "vkAllocateCommandBuffers can be used to create multiple
2233 * command buffers. If the creation of any of those command
2234 * buffers fails, the implementation must destroy all
2235 * successfully created command buffer objects from this
2236 * command, set all entries of the pCommandBuffers array to
2237 * NULL and return the error."
2239 memset(pCommandBuffers, 0,
2240 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
2246 void radv_FreeCommandBuffers(
2248 VkCommandPool commandPool,
2249 uint32_t commandBufferCount,
2250 const VkCommandBuffer *pCommandBuffers)
2252 for (uint32_t i = 0; i < commandBufferCount; i++) {
2253 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
2256 if (cmd_buffer->pool) {
2257 list_del(&cmd_buffer->pool_link);
2258 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
2260 radv_cmd_buffer_destroy(cmd_buffer);
2266 VkResult radv_ResetCommandBuffer(
2267 VkCommandBuffer commandBuffer,
2268 VkCommandBufferResetFlags flags)
2270 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2271 return radv_reset_cmd_buffer(cmd_buffer);
2274 static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer)
2276 struct radv_device *device = cmd_buffer->device;
2277 if (device->gfx_init) {
2278 uint64_t va = radv_buffer_get_va(device->gfx_init);
2279 radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init);
2280 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
2281 radeon_emit(cmd_buffer->cs, va);
2282 radeon_emit(cmd_buffer->cs, va >> 32);
2283 radeon_emit(cmd_buffer->cs, device->gfx_init_size_dw & 0xffff);
2285 si_init_config(cmd_buffer);
2288 VkResult radv_BeginCommandBuffer(
2289 VkCommandBuffer commandBuffer,
2290 const VkCommandBufferBeginInfo *pBeginInfo)
2292 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2293 VkResult result = VK_SUCCESS;
2295 if (cmd_buffer->status != RADV_CMD_BUFFER_STATUS_INITIAL) {
2296 /* If the command buffer has already been resetted with
2297 * vkResetCommandBuffer, no need to do it again.
2299 result = radv_reset_cmd_buffer(cmd_buffer);
2300 if (result != VK_SUCCESS)
2304 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
2305 cmd_buffer->state.last_primitive_reset_en = -1;
2306 cmd_buffer->state.last_index_type = -1;
2307 cmd_buffer->state.last_num_instances = -1;
2308 cmd_buffer->state.last_vertex_offset = -1;
2309 cmd_buffer->state.last_first_instance = -1;
2310 cmd_buffer->state.predication_type = -1;
2311 cmd_buffer->usage_flags = pBeginInfo->flags;
2313 /* setup initial configuration into command buffer */
2314 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
2315 switch (cmd_buffer->queue_family_index) {
2316 case RADV_QUEUE_GENERAL:
2317 emit_gfx_buffer_state(cmd_buffer);
2319 case RADV_QUEUE_COMPUTE:
2320 si_init_compute(cmd_buffer);
2322 case RADV_QUEUE_TRANSFER:
2328 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY &&
2329 (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT)) {
2330 assert(pBeginInfo->pInheritanceInfo);
2331 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
2332 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
2334 struct radv_subpass *subpass =
2335 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
2337 result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
2338 if (result != VK_SUCCESS)
2341 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
2344 if (unlikely(cmd_buffer->device->trace_bo)) {
2345 struct radv_device *device = cmd_buffer->device;
2347 radv_cs_add_buffer(device->ws, cmd_buffer->cs,
2350 radv_cmd_buffer_trace_emit(cmd_buffer);
2353 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
2358 void radv_CmdBindVertexBuffers(
2359 VkCommandBuffer commandBuffer,
2360 uint32_t firstBinding,
2361 uint32_t bindingCount,
2362 const VkBuffer* pBuffers,
2363 const VkDeviceSize* pOffsets)
2365 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2366 struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
2367 bool changed = false;
2369 /* We have to defer setting up vertex buffer since we need the buffer
2370 * stride from the pipeline. */
2372 assert(firstBinding + bindingCount <= MAX_VBS);
2373 for (uint32_t i = 0; i < bindingCount; i++) {
2374 uint32_t idx = firstBinding + i;
2377 (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
2378 vb[idx].offset != pOffsets[i])) {
2382 vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
2383 vb[idx].offset = pOffsets[i];
2385 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
2386 vb[idx].buffer->bo);
2390 /* No state changes. */
2394 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
2397 void radv_CmdBindIndexBuffer(
2398 VkCommandBuffer commandBuffer,
2400 VkDeviceSize offset,
2401 VkIndexType indexType)
2403 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2404 RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
2406 if (cmd_buffer->state.index_buffer == index_buffer &&
2407 cmd_buffer->state.index_offset == offset &&
2408 cmd_buffer->state.index_type == indexType) {
2409 /* No state changes. */
2413 cmd_buffer->state.index_buffer = index_buffer;
2414 cmd_buffer->state.index_offset = offset;
2415 cmd_buffer->state.index_type = indexType; /* vk matches hw */
2416 cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
2417 cmd_buffer->state.index_va += index_buffer->offset + offset;
2419 int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
2420 cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
2421 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
2422 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo);
2427 radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2428 VkPipelineBindPoint bind_point,
2429 struct radv_descriptor_set *set, unsigned idx)
2431 struct radeon_winsys *ws = cmd_buffer->device->ws;
2433 radv_set_descriptor_set(cmd_buffer, bind_point, set, idx);
2436 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
2438 if (!cmd_buffer->device->use_global_bo_list) {
2439 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
2440 if (set->descriptors[j])
2441 radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j]);
2445 radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo);
2448 void radv_CmdBindDescriptorSets(
2449 VkCommandBuffer commandBuffer,
2450 VkPipelineBindPoint pipelineBindPoint,
2451 VkPipelineLayout _layout,
2453 uint32_t descriptorSetCount,
2454 const VkDescriptorSet* pDescriptorSets,
2455 uint32_t dynamicOffsetCount,
2456 const uint32_t* pDynamicOffsets)
2458 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2459 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2460 unsigned dyn_idx = 0;
2462 const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
2463 struct radv_descriptor_state *descriptors_state =
2464 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
2466 for (unsigned i = 0; i < descriptorSetCount; ++i) {
2467 unsigned idx = i + firstSet;
2468 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
2469 radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx);
2471 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
2472 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
2473 uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
2474 assert(dyn_idx < dynamicOffsetCount);
2476 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
2477 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
2479 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2480 dst[2] = no_dynamic_bounds ? 0xffffffffu : range->size;
2481 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2482 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2483 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2484 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2485 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2486 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2487 cmd_buffer->push_constant_stages |=
2488 set->layout->dynamic_shader_stages;
2493 static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2494 struct radv_descriptor_set *set,
2495 struct radv_descriptor_set_layout *layout,
2496 VkPipelineBindPoint bind_point)
2498 struct radv_descriptor_state *descriptors_state =
2499 radv_get_descriptors_state(cmd_buffer, bind_point);
2500 set->size = layout->size;
2501 set->layout = layout;
2503 if (descriptors_state->push_set.capacity < set->size) {
2504 size_t new_size = MAX2(set->size, 1024);
2505 new_size = MAX2(new_size, 2 * descriptors_state->push_set.capacity);
2506 new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
2508 free(set->mapped_ptr);
2509 set->mapped_ptr = malloc(new_size);
2511 if (!set->mapped_ptr) {
2512 descriptors_state->push_set.capacity = 0;
2513 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2517 descriptors_state->push_set.capacity = new_size;
2523 void radv_meta_push_descriptor_set(
2524 struct radv_cmd_buffer* cmd_buffer,
2525 VkPipelineBindPoint pipelineBindPoint,
2526 VkPipelineLayout _layout,
2528 uint32_t descriptorWriteCount,
2529 const VkWriteDescriptorSet* pDescriptorWrites)
2531 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2532 struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
2536 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2538 push_set->size = layout->set[set].layout->size;
2539 push_set->layout = layout->set[set].layout;
2541 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
2543 (void**) &push_set->mapped_ptr))
2546 push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2547 push_set->va += bo_offset;
2549 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2550 radv_descriptor_set_to_handle(push_set),
2551 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2553 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
2556 void radv_CmdPushDescriptorSetKHR(
2557 VkCommandBuffer commandBuffer,
2558 VkPipelineBindPoint pipelineBindPoint,
2559 VkPipelineLayout _layout,
2561 uint32_t descriptorWriteCount,
2562 const VkWriteDescriptorSet* pDescriptorWrites)
2564 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2565 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2566 struct radv_descriptor_state *descriptors_state =
2567 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
2568 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
2570 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2572 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
2573 layout->set[set].layout,
2577 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2578 radv_descriptor_set_to_handle(push_set),
2579 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2581 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
2582 descriptors_state->push_dirty = true;
2585 void radv_CmdPushDescriptorSetWithTemplateKHR(
2586 VkCommandBuffer commandBuffer,
2587 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
2588 VkPipelineLayout _layout,
2592 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2593 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2594 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
2595 struct radv_descriptor_state *descriptors_state =
2596 radv_get_descriptors_state(cmd_buffer, templ->bind_point);
2597 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
2599 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2601 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
2602 layout->set[set].layout,
2606 radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
2607 descriptorUpdateTemplate, pData);
2609 radv_set_descriptor_set(cmd_buffer, templ->bind_point, push_set, set);
2610 descriptors_state->push_dirty = true;
2613 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
2614 VkPipelineLayout layout,
2615 VkShaderStageFlags stageFlags,
2618 const void* pValues)
2620 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2621 memcpy(cmd_buffer->push_constants + offset, pValues, size);
2622 cmd_buffer->push_constant_stages |= stageFlags;
2625 VkResult radv_EndCommandBuffer(
2626 VkCommandBuffer commandBuffer)
2628 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2630 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
2631 if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
2632 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
2633 si_emit_cache_flush(cmd_buffer);
2636 /* Make sure CP DMA is idle at the end of IBs because the kernel
2637 * doesn't wait for it.
2639 si_cp_dma_wait_for_idle(cmd_buffer);
2641 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
2643 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
2644 return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2646 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
2648 return cmd_buffer->record_result;
2652 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
2654 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2656 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
2659 cmd_buffer->state.emitted_compute_pipeline = pipeline;
2661 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
2662 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
2664 cmd_buffer->compute_scratch_size_needed =
2665 MAX2(cmd_buffer->compute_scratch_size_needed,
2666 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
2668 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
2669 pipeline->shaders[MESA_SHADER_COMPUTE]->bo);
2671 if (unlikely(cmd_buffer->device->trace_bo))
2672 radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
2675 static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer,
2676 VkPipelineBindPoint bind_point)
2678 struct radv_descriptor_state *descriptors_state =
2679 radv_get_descriptors_state(cmd_buffer, bind_point);
2681 descriptors_state->dirty |= descriptors_state->valid;
2684 void radv_CmdBindPipeline(
2685 VkCommandBuffer commandBuffer,
2686 VkPipelineBindPoint pipelineBindPoint,
2687 VkPipeline _pipeline)
2689 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2690 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
2692 switch (pipelineBindPoint) {
2693 case VK_PIPELINE_BIND_POINT_COMPUTE:
2694 if (cmd_buffer->state.compute_pipeline == pipeline)
2696 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
2698 cmd_buffer->state.compute_pipeline = pipeline;
2699 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
2701 case VK_PIPELINE_BIND_POINT_GRAPHICS:
2702 if (cmd_buffer->state.pipeline == pipeline)
2704 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
2706 cmd_buffer->state.pipeline = pipeline;
2710 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
2711 cmd_buffer->push_constant_stages |= pipeline->active_stages;
2713 /* the new vertex shader might not have the same user regs */
2714 cmd_buffer->state.last_first_instance = -1;
2715 cmd_buffer->state.last_vertex_offset = -1;
2717 /* Prefetch all pipeline shaders at first draw time. */
2718 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
2720 radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
2722 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
2723 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
2724 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
2725 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
2727 if (radv_pipeline_has_tess(pipeline))
2728 cmd_buffer->tess_rings_needed = true;
2731 assert(!"invalid bind point");
2736 void radv_CmdSetViewport(
2737 VkCommandBuffer commandBuffer,
2738 uint32_t firstViewport,
2739 uint32_t viewportCount,
2740 const VkViewport* pViewports)
2742 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2743 struct radv_cmd_state *state = &cmd_buffer->state;
2744 MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
2746 assert(firstViewport < MAX_VIEWPORTS);
2747 assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
2749 memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
2750 viewportCount * sizeof(*pViewports));
2752 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
2755 void radv_CmdSetScissor(
2756 VkCommandBuffer commandBuffer,
2757 uint32_t firstScissor,
2758 uint32_t scissorCount,
2759 const VkRect2D* pScissors)
2761 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2762 struct radv_cmd_state *state = &cmd_buffer->state;
2763 MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
2765 assert(firstScissor < MAX_SCISSORS);
2766 assert(total_count >= 1 && total_count <= MAX_SCISSORS);
2768 memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
2769 scissorCount * sizeof(*pScissors));
2771 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
2774 void radv_CmdSetLineWidth(
2775 VkCommandBuffer commandBuffer,
2778 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2779 cmd_buffer->state.dynamic.line_width = lineWidth;
2780 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2783 void radv_CmdSetDepthBias(
2784 VkCommandBuffer commandBuffer,
2785 float depthBiasConstantFactor,
2786 float depthBiasClamp,
2787 float depthBiasSlopeFactor)
2789 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2791 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
2792 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
2793 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
2795 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
2798 void radv_CmdSetBlendConstants(
2799 VkCommandBuffer commandBuffer,
2800 const float blendConstants[4])
2802 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2804 memcpy(cmd_buffer->state.dynamic.blend_constants,
2805 blendConstants, sizeof(float) * 4);
2807 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
2810 void radv_CmdSetDepthBounds(
2811 VkCommandBuffer commandBuffer,
2812 float minDepthBounds,
2813 float maxDepthBounds)
2815 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2817 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
2818 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
2820 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
2823 void radv_CmdSetStencilCompareMask(
2824 VkCommandBuffer commandBuffer,
2825 VkStencilFaceFlags faceMask,
2826 uint32_t compareMask)
2828 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2830 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2831 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
2832 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2833 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
2835 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2838 void radv_CmdSetStencilWriteMask(
2839 VkCommandBuffer commandBuffer,
2840 VkStencilFaceFlags faceMask,
2843 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2845 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2846 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
2847 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2848 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
2850 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2853 void radv_CmdSetStencilReference(
2854 VkCommandBuffer commandBuffer,
2855 VkStencilFaceFlags faceMask,
2858 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2860 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2861 cmd_buffer->state.dynamic.stencil_reference.front = reference;
2862 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2863 cmd_buffer->state.dynamic.stencil_reference.back = reference;
2865 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2868 void radv_CmdSetDiscardRectangleEXT(
2869 VkCommandBuffer commandBuffer,
2870 uint32_t firstDiscardRectangle,
2871 uint32_t discardRectangleCount,
2872 const VkRect2D* pDiscardRectangles)
2874 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2875 struct radv_cmd_state *state = &cmd_buffer->state;
2876 MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
2878 assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
2879 assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
2881 typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle],
2882 pDiscardRectangles, discardRectangleCount);
2884 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE;
2887 void radv_CmdExecuteCommands(
2888 VkCommandBuffer commandBuffer,
2889 uint32_t commandBufferCount,
2890 const VkCommandBuffer* pCmdBuffers)
2892 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
2894 assert(commandBufferCount > 0);
2896 /* Emit pending flushes on primary prior to executing secondary */
2897 si_emit_cache_flush(primary);
2899 for (uint32_t i = 0; i < commandBufferCount; i++) {
2900 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
2902 primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
2903 secondary->scratch_size_needed);
2904 primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
2905 secondary->compute_scratch_size_needed);
2907 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
2908 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
2909 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
2910 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
2911 if (secondary->tess_rings_needed)
2912 primary->tess_rings_needed = true;
2913 if (secondary->sample_positions_needed)
2914 primary->sample_positions_needed = true;
2916 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
2919 /* When the secondary command buffer is compute only we don't
2920 * need to re-emit the current graphics pipeline.
2922 if (secondary->state.emitted_pipeline) {
2923 primary->state.emitted_pipeline =
2924 secondary->state.emitted_pipeline;
2927 /* When the secondary command buffer is graphics only we don't
2928 * need to re-emit the current compute pipeline.
2930 if (secondary->state.emitted_compute_pipeline) {
2931 primary->state.emitted_compute_pipeline =
2932 secondary->state.emitted_compute_pipeline;
2935 /* Only re-emit the draw packets when needed. */
2936 if (secondary->state.last_primitive_reset_en != -1) {
2937 primary->state.last_primitive_reset_en =
2938 secondary->state.last_primitive_reset_en;
2941 if (secondary->state.last_primitive_reset_index) {
2942 primary->state.last_primitive_reset_index =
2943 secondary->state.last_primitive_reset_index;
2946 if (secondary->state.last_ia_multi_vgt_param) {
2947 primary->state.last_ia_multi_vgt_param =
2948 secondary->state.last_ia_multi_vgt_param;
2951 primary->state.last_first_instance = secondary->state.last_first_instance;
2952 primary->state.last_num_instances = secondary->state.last_num_instances;
2953 primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
2955 if (secondary->state.last_index_type != -1) {
2956 primary->state.last_index_type =
2957 secondary->state.last_index_type;
2961 /* After executing commands from secondary buffers we have to dirty
2964 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
2965 RADV_CMD_DIRTY_INDEX_BUFFER |
2966 RADV_CMD_DIRTY_DYNAMIC_ALL;
2967 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_GRAPHICS);
2968 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_COMPUTE);
2971 VkResult radv_CreateCommandPool(
2973 const VkCommandPoolCreateInfo* pCreateInfo,
2974 const VkAllocationCallbacks* pAllocator,
2975 VkCommandPool* pCmdPool)
2977 RADV_FROM_HANDLE(radv_device, device, _device);
2978 struct radv_cmd_pool *pool;
2980 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
2981 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2983 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2986 pool->alloc = *pAllocator;
2988 pool->alloc = device->alloc;
2990 list_inithead(&pool->cmd_buffers);
2991 list_inithead(&pool->free_cmd_buffers);
2993 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2995 *pCmdPool = radv_cmd_pool_to_handle(pool);
3001 void radv_DestroyCommandPool(
3003 VkCommandPool commandPool,
3004 const VkAllocationCallbacks* pAllocator)
3006 RADV_FROM_HANDLE(radv_device, device, _device);
3007 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
3012 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
3013 &pool->cmd_buffers, pool_link) {
3014 radv_cmd_buffer_destroy(cmd_buffer);
3017 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
3018 &pool->free_cmd_buffers, pool_link) {
3019 radv_cmd_buffer_destroy(cmd_buffer);
3022 vk_free2(&device->alloc, pAllocator, pool);
3025 VkResult radv_ResetCommandPool(
3027 VkCommandPool commandPool,
3028 VkCommandPoolResetFlags flags)
3030 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
3033 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
3034 &pool->cmd_buffers, pool_link) {
3035 result = radv_reset_cmd_buffer(cmd_buffer);
3036 if (result != VK_SUCCESS)
3043 void radv_TrimCommandPool(
3045 VkCommandPool commandPool,
3046 VkCommandPoolTrimFlagsKHR flags)
3048 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
3053 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
3054 &pool->free_cmd_buffers, pool_link) {
3055 radv_cmd_buffer_destroy(cmd_buffer);
3059 void radv_CmdBeginRenderPass(
3060 VkCommandBuffer commandBuffer,
3061 const VkRenderPassBeginInfo* pRenderPassBegin,
3062 VkSubpassContents contents)
3064 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3065 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
3066 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
3068 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
3069 cmd_buffer->cs, 2048);
3070 MAYBE_UNUSED VkResult result;
3072 cmd_buffer->state.framebuffer = framebuffer;
3073 cmd_buffer->state.pass = pass;
3074 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
3076 result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
3077 if (result != VK_SUCCESS)
3080 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
3081 assert(cmd_buffer->cs->cdw <= cdw_max);
3083 radv_cmd_buffer_clear_subpass(cmd_buffer);
3086 void radv_CmdBeginRenderPass2KHR(
3087 VkCommandBuffer commandBuffer,
3088 const VkRenderPassBeginInfo* pRenderPassBeginInfo,
3089 const VkSubpassBeginInfoKHR* pSubpassBeginInfo)
3091 radv_CmdBeginRenderPass(commandBuffer, pRenderPassBeginInfo,
3092 pSubpassBeginInfo->contents);
3095 void radv_CmdNextSubpass(
3096 VkCommandBuffer commandBuffer,
3097 VkSubpassContents contents)
3099 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3101 radv_cmd_buffer_resolve_subpass(cmd_buffer);
3103 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
3106 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true);
3107 radv_cmd_buffer_clear_subpass(cmd_buffer);
3110 void radv_CmdNextSubpass2KHR(
3111 VkCommandBuffer commandBuffer,
3112 const VkSubpassBeginInfoKHR* pSubpassBeginInfo,
3113 const VkSubpassEndInfoKHR* pSubpassEndInfo)
3115 radv_CmdNextSubpass(commandBuffer, pSubpassBeginInfo->contents);
3118 static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
3120 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
3121 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
3122 if (!radv_get_shader(pipeline, stage))
3125 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
3126 if (loc->sgpr_idx == -1)
3128 uint32_t base_reg = pipeline->user_data_0[stage];
3129 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
3132 if (pipeline->gs_copy_shader) {
3133 struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
3134 if (loc->sgpr_idx != -1) {
3135 uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
3136 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
3142 radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
3143 uint32_t vertex_count)
3145 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
3146 radeon_emit(cmd_buffer->cs, vertex_count);
3147 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
3148 S_0287F0_USE_OPAQUE(0));
3152 radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
3154 uint32_t index_count)
3156 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, cmd_buffer->state.predicating));
3157 radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
3158 radeon_emit(cmd_buffer->cs, index_va);
3159 radeon_emit(cmd_buffer->cs, index_va >> 32);
3160 radeon_emit(cmd_buffer->cs, index_count);
3161 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
3165 radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
3167 uint32_t draw_count,
3171 struct radeon_cmdbuf *cs = cmd_buffer->cs;
3172 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
3173 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
3174 bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id;
3175 uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
3176 bool predicating = cmd_buffer->state.predicating;
3179 /* just reset draw state for vertex data */
3180 cmd_buffer->state.last_first_instance = -1;
3181 cmd_buffer->state.last_num_instances = -1;
3182 cmd_buffer->state.last_vertex_offset = -1;
3184 if (draw_count == 1 && !count_va && !draw_id_enable) {
3185 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
3186 PKT3_DRAW_INDIRECT, 3, predicating));
3188 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
3189 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
3190 radeon_emit(cs, di_src_sel);
3192 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
3193 PKT3_DRAW_INDIRECT_MULTI,
3196 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
3197 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
3198 radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
3199 S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
3200 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
3201 radeon_emit(cs, draw_count); /* count */
3202 radeon_emit(cs, count_va); /* count_addr */
3203 radeon_emit(cs, count_va >> 32);
3204 radeon_emit(cs, stride); /* stride */
3205 radeon_emit(cs, di_src_sel);
3209 struct radv_draw_info {
3211 * Number of vertices.
3216 * Index of the first vertex.
3218 int32_t vertex_offset;
3221 * First instance id.
3223 uint32_t first_instance;
3226 * Number of instances.
3228 uint32_t instance_count;
3231 * First index (indexed draws only).
3233 uint32_t first_index;
3236 * Whether it's an indexed draw.
3241 * Indirect draw parameters resource.
3243 struct radv_buffer *indirect;
3244 uint64_t indirect_offset;
3248 * Draw count parameters resource.
3250 struct radv_buffer *count_buffer;
3251 uint64_t count_buffer_offset;
3255 radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
3256 const struct radv_draw_info *info)
3258 struct radv_cmd_state *state = &cmd_buffer->state;
3259 struct radeon_winsys *ws = cmd_buffer->device->ws;
3260 struct radeon_cmdbuf *cs = cmd_buffer->cs;
3262 if (info->indirect) {
3263 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3264 uint64_t count_va = 0;
3266 va += info->indirect->offset + info->indirect_offset;
3268 radv_cs_add_buffer(ws, cs, info->indirect->bo);
3270 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
3272 radeon_emit(cs, va);
3273 radeon_emit(cs, va >> 32);
3275 if (info->count_buffer) {
3276 count_va = radv_buffer_get_va(info->count_buffer->bo);
3277 count_va += info->count_buffer->offset +
3278 info->count_buffer_offset;
3280 radv_cs_add_buffer(ws, cs, info->count_buffer->bo);
3283 if (!state->subpass->view_mask) {
3284 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3291 for_each_bit(i, state->subpass->view_mask) {
3292 radv_emit_view_index(cmd_buffer, i);
3294 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3302 assert(state->pipeline->graphics.vtx_base_sgpr);
3304 if (info->vertex_offset != state->last_vertex_offset ||
3305 info->first_instance != state->last_first_instance) {
3306 radeon_set_sh_reg_seq(cs, state->pipeline->graphics.vtx_base_sgpr,
3307 state->pipeline->graphics.vtx_emit_num);
3309 radeon_emit(cs, info->vertex_offset);
3310 radeon_emit(cs, info->first_instance);
3311 if (state->pipeline->graphics.vtx_emit_num == 3)
3313 state->last_first_instance = info->first_instance;
3314 state->last_vertex_offset = info->vertex_offset;
3317 if (state->last_num_instances != info->instance_count) {
3318 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, false));
3319 radeon_emit(cs, info->instance_count);
3320 state->last_num_instances = info->instance_count;
3323 if (info->indexed) {
3324 int index_size = state->index_type ? 4 : 2;
3327 index_va = state->index_va;
3328 index_va += info->first_index * index_size;
3330 if (!state->subpass->view_mask) {
3331 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3336 for_each_bit(i, state->subpass->view_mask) {
3337 radv_emit_view_index(cmd_buffer, i);
3339 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3345 if (!state->subpass->view_mask) {
3346 radv_cs_emit_draw_packet(cmd_buffer, info->count);
3349 for_each_bit(i, state->subpass->view_mask) {
3350 radv_emit_view_index(cmd_buffer, i);
3352 radv_cs_emit_draw_packet(cmd_buffer,
3361 * Vega and raven have a bug which triggers if there are multiple context
3362 * register contexts active at the same time with different scissor values.
3364 * There are two possible workarounds:
3365 * 1) Wait for PS_PARTIAL_FLUSH every time the scissor is changed. That way
3366 * there is only ever 1 active set of scissor values at the same time.
3368 * 2) Whenever the hardware switches contexts we have to set the scissor
3369 * registers again even if it is a noop. That way the new context gets
3370 * the correct scissor values.
3372 * This implements option 2. radv_need_late_scissor_emission needs to
3373 * return true on affected HW if radv_emit_all_graphics_states sets
3374 * any context registers.
3376 static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer,
3379 struct radv_cmd_state *state = &cmd_buffer->state;
3381 if (!cmd_buffer->device->physical_device->has_scissor_bug)
3384 uint32_t used_states = cmd_buffer->state.pipeline->graphics.needed_dynamic_state | ~RADV_CMD_DIRTY_DYNAMIC_ALL;
3386 /* Index & Vertex buffer don't change context regs, and pipeline is handled later. */
3387 used_states &= ~(RADV_CMD_DIRTY_INDEX_BUFFER | RADV_CMD_DIRTY_VERTEX_BUFFER | RADV_CMD_DIRTY_PIPELINE);
3389 /* Assume all state changes except these two can imply context rolls. */
3390 if (cmd_buffer->state.dirty & used_states)
3393 if (cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
3396 if (indexed_draw && state->pipeline->graphics.prim_restart_enable &&
3397 (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index)
3404 radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
3405 const struct radv_draw_info *info)
3407 bool late_scissor_emission = radv_need_late_scissor_emission(cmd_buffer, info->indexed);
3409 if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
3410 cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
3411 radv_emit_rbplus_state(cmd_buffer);
3413 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
3414 radv_emit_graphics_pipeline(cmd_buffer);
3416 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
3417 radv_emit_framebuffer_state(cmd_buffer);
3419 if (info->indexed) {
3420 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
3421 radv_emit_index_buffer(cmd_buffer);
3423 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
3424 * so the state must be re-emitted before the next indexed
3427 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
3428 cmd_buffer->state.last_index_type = -1;
3429 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
3433 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
3435 radv_emit_draw_registers(cmd_buffer, info->indexed,
3436 info->instance_count > 1, info->indirect,
3437 info->indirect ? 0 : info->count);
3439 if (late_scissor_emission)
3440 radv_emit_scissor(cmd_buffer);
3444 radv_draw(struct radv_cmd_buffer *cmd_buffer,
3445 const struct radv_draw_info *info)
3448 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
3449 bool pipeline_is_dirty =
3450 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
3451 cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
3453 MAYBE_UNUSED unsigned cdw_max =
3454 radeon_check_space(cmd_buffer->device->ws,
3455 cmd_buffer->cs, 4096);
3457 /* Use optimal packet order based on whether we need to sync the
3460 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3461 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3462 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
3463 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
3464 /* If we have to wait for idle, set all states first, so that
3465 * all SET packets are processed in parallel with previous draw
3466 * calls. Then upload descriptors, set shader pointers, and
3467 * draw, and prefetch at the end. This ensures that the time
3468 * the CUs are idle is very short. (there are only SET_SH
3469 * packets between the wait and the draw)
3471 radv_emit_all_graphics_states(cmd_buffer, info);
3472 si_emit_cache_flush(cmd_buffer);
3473 /* <-- CUs are idle here --> */
3475 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
3477 radv_emit_draw_packets(cmd_buffer, info);
3478 /* <-- CUs are busy here --> */
3480 /* Start prefetches after the draw has been started. Both will
3481 * run in parallel, but starting the draw first is more
3484 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3485 radv_emit_prefetch_L2(cmd_buffer,
3486 cmd_buffer->state.pipeline, false);
3489 /* If we don't wait for idle, start prefetches first, then set
3490 * states, and draw at the end.
3492 si_emit_cache_flush(cmd_buffer);
3494 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3495 /* Only prefetch the vertex shader and VBO descriptors
3496 * in order to start the draw as soon as possible.
3498 radv_emit_prefetch_L2(cmd_buffer,
3499 cmd_buffer->state.pipeline, true);
3502 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
3504 radv_emit_all_graphics_states(cmd_buffer, info);
3505 radv_emit_draw_packets(cmd_buffer, info);
3507 /* Prefetch the remaining shaders after the draw has been
3510 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3511 radv_emit_prefetch_L2(cmd_buffer,
3512 cmd_buffer->state.pipeline, false);
3516 assert(cmd_buffer->cs->cdw <= cdw_max);
3517 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_PS_PARTIAL_FLUSH);
3521 VkCommandBuffer commandBuffer,
3522 uint32_t vertexCount,
3523 uint32_t instanceCount,
3524 uint32_t firstVertex,
3525 uint32_t firstInstance)
3527 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3528 struct radv_draw_info info = {};
3530 info.count = vertexCount;
3531 info.instance_count = instanceCount;
3532 info.first_instance = firstInstance;
3533 info.vertex_offset = firstVertex;
3535 radv_draw(cmd_buffer, &info);
3538 void radv_CmdDrawIndexed(
3539 VkCommandBuffer commandBuffer,
3540 uint32_t indexCount,
3541 uint32_t instanceCount,
3542 uint32_t firstIndex,
3543 int32_t vertexOffset,
3544 uint32_t firstInstance)
3546 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3547 struct radv_draw_info info = {};
3549 info.indexed = true;
3550 info.count = indexCount;
3551 info.instance_count = instanceCount;
3552 info.first_index = firstIndex;
3553 info.vertex_offset = vertexOffset;
3554 info.first_instance = firstInstance;
3556 radv_draw(cmd_buffer, &info);
3559 void radv_CmdDrawIndirect(
3560 VkCommandBuffer commandBuffer,
3562 VkDeviceSize offset,
3566 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3567 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3568 struct radv_draw_info info = {};
3570 info.count = drawCount;
3571 info.indirect = buffer;
3572 info.indirect_offset = offset;
3573 info.stride = stride;
3575 radv_draw(cmd_buffer, &info);
3578 void radv_CmdDrawIndexedIndirect(
3579 VkCommandBuffer commandBuffer,
3581 VkDeviceSize offset,
3585 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3586 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3587 struct radv_draw_info info = {};
3589 info.indexed = true;
3590 info.count = drawCount;
3591 info.indirect = buffer;
3592 info.indirect_offset = offset;
3593 info.stride = stride;
3595 radv_draw(cmd_buffer, &info);
3598 void radv_CmdDrawIndirectCountAMD(
3599 VkCommandBuffer commandBuffer,
3601 VkDeviceSize offset,
3602 VkBuffer _countBuffer,
3603 VkDeviceSize countBufferOffset,
3604 uint32_t maxDrawCount,
3607 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3608 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3609 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3610 struct radv_draw_info info = {};
3612 info.count = maxDrawCount;
3613 info.indirect = buffer;
3614 info.indirect_offset = offset;
3615 info.count_buffer = count_buffer;
3616 info.count_buffer_offset = countBufferOffset;
3617 info.stride = stride;
3619 radv_draw(cmd_buffer, &info);
3622 void radv_CmdDrawIndexedIndirectCountAMD(
3623 VkCommandBuffer commandBuffer,
3625 VkDeviceSize offset,
3626 VkBuffer _countBuffer,
3627 VkDeviceSize countBufferOffset,
3628 uint32_t maxDrawCount,
3631 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3632 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3633 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3634 struct radv_draw_info info = {};
3636 info.indexed = true;
3637 info.count = maxDrawCount;
3638 info.indirect = buffer;
3639 info.indirect_offset = offset;
3640 info.count_buffer = count_buffer;
3641 info.count_buffer_offset = countBufferOffset;
3642 info.stride = stride;
3644 radv_draw(cmd_buffer, &info);
3647 void radv_CmdDrawIndirectCountKHR(
3648 VkCommandBuffer commandBuffer,
3650 VkDeviceSize offset,
3651 VkBuffer _countBuffer,
3652 VkDeviceSize countBufferOffset,
3653 uint32_t maxDrawCount,
3656 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3657 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3658 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3659 struct radv_draw_info info = {};
3661 info.count = maxDrawCount;
3662 info.indirect = buffer;
3663 info.indirect_offset = offset;
3664 info.count_buffer = count_buffer;
3665 info.count_buffer_offset = countBufferOffset;
3666 info.stride = stride;
3668 radv_draw(cmd_buffer, &info);
3671 void radv_CmdDrawIndexedIndirectCountKHR(
3672 VkCommandBuffer commandBuffer,
3674 VkDeviceSize offset,
3675 VkBuffer _countBuffer,
3676 VkDeviceSize countBufferOffset,
3677 uint32_t maxDrawCount,
3680 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3681 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3682 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3683 struct radv_draw_info info = {};
3685 info.indexed = true;
3686 info.count = maxDrawCount;
3687 info.indirect = buffer;
3688 info.indirect_offset = offset;
3689 info.count_buffer = count_buffer;
3690 info.count_buffer_offset = countBufferOffset;
3691 info.stride = stride;
3693 radv_draw(cmd_buffer, &info);
3696 struct radv_dispatch_info {
3698 * Determine the layout of the grid (in block units) to be used.
3703 * A starting offset for the grid. If unaligned is set, the offset
3704 * must still be aligned.
3706 uint32_t offsets[3];
3708 * Whether it's an unaligned compute dispatch.
3713 * Indirect compute parameters resource.
3715 struct radv_buffer *indirect;
3716 uint64_t indirect_offset;
3720 radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
3721 const struct radv_dispatch_info *info)
3723 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3724 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
3725 unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
3726 struct radeon_winsys *ws = cmd_buffer->device->ws;
3727 bool predicating = cmd_buffer->state.predicating;
3728 struct radeon_cmdbuf *cs = cmd_buffer->cs;
3729 struct radv_userdata_info *loc;
3731 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
3732 AC_UD_CS_GRID_SIZE);
3734 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
3736 if (info->indirect) {
3737 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3739 va += info->indirect->offset + info->indirect_offset;
3741 radv_cs_add_buffer(ws, cs, info->indirect->bo);
3743 if (loc->sgpr_idx != -1) {
3744 for (unsigned i = 0; i < 3; ++i) {
3745 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
3746 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
3747 COPY_DATA_DST_SEL(COPY_DATA_REG));
3748 radeon_emit(cs, (va + 4 * i));
3749 radeon_emit(cs, (va + 4 * i) >> 32);
3750 radeon_emit(cs, ((R_00B900_COMPUTE_USER_DATA_0
3751 + loc->sgpr_idx * 4) >> 2) + i);
3756 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
3757 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, predicating) |
3758 PKT3_SHADER_TYPE_S(1));
3759 radeon_emit(cs, va);
3760 radeon_emit(cs, va >> 32);
3761 radeon_emit(cs, dispatch_initiator);
3763 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
3764 PKT3_SHADER_TYPE_S(1));
3766 radeon_emit(cs, va);
3767 radeon_emit(cs, va >> 32);
3769 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, predicating) |
3770 PKT3_SHADER_TYPE_S(1));
3772 radeon_emit(cs, dispatch_initiator);
3775 unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
3776 unsigned offsets[3] = { info->offsets[0], info->offsets[1], info->offsets[2] };
3778 if (info->unaligned) {
3779 unsigned *cs_block_size = compute_shader->info.cs.block_size;
3780 unsigned remainder[3];
3782 /* If aligned, these should be an entire block size,
3785 remainder[0] = blocks[0] + cs_block_size[0] -
3786 align_u32_npot(blocks[0], cs_block_size[0]);
3787 remainder[1] = blocks[1] + cs_block_size[1] -
3788 align_u32_npot(blocks[1], cs_block_size[1]);
3789 remainder[2] = blocks[2] + cs_block_size[2] -
3790 align_u32_npot(blocks[2], cs_block_size[2]);
3792 blocks[0] = round_up_u32(blocks[0], cs_block_size[0]);
3793 blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
3794 blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
3796 for(unsigned i = 0; i < 3; ++i) {
3797 assert(offsets[i] % cs_block_size[i] == 0);
3798 offsets[i] /= cs_block_size[i];
3801 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
3803 S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
3804 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
3806 S_00B81C_NUM_THREAD_FULL(cs_block_size[1]) |
3807 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
3809 S_00B81C_NUM_THREAD_FULL(cs_block_size[2]) |
3810 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
3812 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
3815 if (loc->sgpr_idx != -1) {
3816 assert(!loc->indirect);
3817 assert(loc->num_sgprs == 3);
3819 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
3820 loc->sgpr_idx * 4, 3);
3821 radeon_emit(cs, blocks[0]);
3822 radeon_emit(cs, blocks[1]);
3823 radeon_emit(cs, blocks[2]);
3826 if (offsets[0] || offsets[1] || offsets[2]) {
3827 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
3828 radeon_emit(cs, offsets[0]);
3829 radeon_emit(cs, offsets[1]);
3830 radeon_emit(cs, offsets[2]);
3832 /* The blocks in the packet are not counts but end values. */
3833 for (unsigned i = 0; i < 3; ++i)
3834 blocks[i] += offsets[i];
3836 dispatch_initiator |= S_00B800_FORCE_START_AT_000(1);
3839 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, predicating) |
3840 PKT3_SHADER_TYPE_S(1));
3841 radeon_emit(cs, blocks[0]);
3842 radeon_emit(cs, blocks[1]);
3843 radeon_emit(cs, blocks[2]);
3844 radeon_emit(cs, dispatch_initiator);
3847 assert(cmd_buffer->cs->cdw <= cdw_max);
3851 radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
3853 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
3854 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
3858 radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
3859 const struct radv_dispatch_info *info)
3861 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3863 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
3864 bool pipeline_is_dirty = pipeline &&
3865 pipeline != cmd_buffer->state.emitted_compute_pipeline;
3867 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3868 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3869 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
3870 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
3871 /* If we have to wait for idle, set all states first, so that
3872 * all SET packets are processed in parallel with previous draw
3873 * calls. Then upload descriptors, set shader pointers, and
3874 * dispatch, and prefetch at the end. This ensures that the
3875 * time the CUs are idle is very short. (there are only SET_SH
3876 * packets between the wait and the draw)
3878 radv_emit_compute_pipeline(cmd_buffer);
3879 si_emit_cache_flush(cmd_buffer);
3880 /* <-- CUs are idle here --> */
3882 radv_upload_compute_shader_descriptors(cmd_buffer);
3884 radv_emit_dispatch_packets(cmd_buffer, info);
3885 /* <-- CUs are busy here --> */
3887 /* Start prefetches after the dispatch has been started. Both
3888 * will run in parallel, but starting the dispatch first is
3891 if (has_prefetch && pipeline_is_dirty) {
3892 radv_emit_shader_prefetch(cmd_buffer,
3893 pipeline->shaders[MESA_SHADER_COMPUTE]);
3896 /* If we don't wait for idle, start prefetches first, then set
3897 * states, and dispatch at the end.
3899 si_emit_cache_flush(cmd_buffer);
3901 if (has_prefetch && pipeline_is_dirty) {
3902 radv_emit_shader_prefetch(cmd_buffer,
3903 pipeline->shaders[MESA_SHADER_COMPUTE]);
3906 radv_upload_compute_shader_descriptors(cmd_buffer);
3908 radv_emit_compute_pipeline(cmd_buffer);
3909 radv_emit_dispatch_packets(cmd_buffer, info);
3912 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_CS_PARTIAL_FLUSH);
3915 void radv_CmdDispatchBase(
3916 VkCommandBuffer commandBuffer,
3924 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3925 struct radv_dispatch_info info = {};
3931 info.offsets[0] = base_x;
3932 info.offsets[1] = base_y;
3933 info.offsets[2] = base_z;
3934 radv_dispatch(cmd_buffer, &info);
3937 void radv_CmdDispatch(
3938 VkCommandBuffer commandBuffer,
3943 radv_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
3946 void radv_CmdDispatchIndirect(
3947 VkCommandBuffer commandBuffer,
3949 VkDeviceSize offset)
3951 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3952 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3953 struct radv_dispatch_info info = {};
3955 info.indirect = buffer;
3956 info.indirect_offset = offset;
3958 radv_dispatch(cmd_buffer, &info);
3961 void radv_unaligned_dispatch(
3962 struct radv_cmd_buffer *cmd_buffer,
3967 struct radv_dispatch_info info = {};
3974 radv_dispatch(cmd_buffer, &info);
3977 void radv_CmdEndRenderPass(
3978 VkCommandBuffer commandBuffer)
3980 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3982 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
3984 radv_cmd_buffer_resolve_subpass(cmd_buffer);
3986 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
3987 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
3988 radv_handle_subpass_image_transition(cmd_buffer,
3989 (struct radv_subpass_attachment){i, layout});
3992 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
3994 cmd_buffer->state.pass = NULL;
3995 cmd_buffer->state.subpass = NULL;
3996 cmd_buffer->state.attachments = NULL;
3997 cmd_buffer->state.framebuffer = NULL;
4000 void radv_CmdEndRenderPass2KHR(
4001 VkCommandBuffer commandBuffer,
4002 const VkSubpassEndInfoKHR* pSubpassEndInfo)
4004 radv_CmdEndRenderPass(commandBuffer);
4008 * For HTILE we have the following interesting clear words:
4009 * 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
4010 * 0xfffc000f: Uncompressed, full depth range, for depth only HTILE.
4011 * 0xfffffff0: Clear depth to 1.0
4012 * 0x00000000: Clear depth to 0.0
4014 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
4015 struct radv_image *image,
4016 const VkImageSubresourceRange *range,
4017 uint32_t clear_word)
4019 assert(range->baseMipLevel == 0);
4020 assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
4021 unsigned layer_count = radv_get_layerCount(image, range);
4022 uint64_t size = image->surface.htile_slice_size * layer_count;
4023 VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
4024 uint64_t offset = image->offset + image->htile_offset +
4025 image->surface.htile_slice_size * range->baseArrayLayer;
4026 struct radv_cmd_state *state = &cmd_buffer->state;
4027 VkClearDepthStencilValue value = {};
4029 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4030 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4032 state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset,
4035 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4037 if (vk_format_is_stencil(image->vk_format))
4038 aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
4040 radv_set_ds_clear_metadata(cmd_buffer, image, value, aspects);
4043 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
4044 struct radv_image *image,
4045 VkImageLayout src_layout,
4046 VkImageLayout dst_layout,
4047 unsigned src_queue_mask,
4048 unsigned dst_queue_mask,
4049 const VkImageSubresourceRange *range,
4050 VkImageAspectFlags pending_clears)
4052 if (!radv_image_has_htile(image))
4055 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
4056 radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
4057 /* TODO: merge with the clear if applicable */
4058 radv_initialize_htile(cmd_buffer, image, range, 0);
4059 } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
4060 radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
4061 uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
4062 radv_initialize_htile(cmd_buffer, image, range, clear_value);
4063 } else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
4064 !radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
4065 VkImageSubresourceRange local_range = *range;
4066 local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
4067 local_range.baseMipLevel = 0;
4068 local_range.levelCount = 1;
4070 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4071 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4073 radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range);
4075 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
4076 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
4080 static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
4081 struct radv_image *image, uint32_t value)
4083 struct radv_cmd_state *state = &cmd_buffer->state;
4085 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4086 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4088 state->flush_bits |= radv_clear_cmask(cmd_buffer, image, value);
4090 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4093 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
4094 struct radv_image *image, uint32_t value)
4096 struct radv_cmd_state *state = &cmd_buffer->state;
4098 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4099 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4101 state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value);
4103 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4104 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4108 * Initialize DCC/FMASK/CMASK metadata for a color image.
4110 static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
4111 struct radv_image *image,
4112 VkImageLayout src_layout,
4113 VkImageLayout dst_layout,
4114 unsigned src_queue_mask,
4115 unsigned dst_queue_mask)
4117 if (radv_image_has_cmask(image)) {
4118 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
4120 /* TODO: clarify this. */
4121 if (radv_image_has_fmask(image)) {
4122 value = 0xccccccccu;
4125 radv_initialise_cmask(cmd_buffer, image, value);
4128 if (radv_image_has_dcc(image)) {
4129 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
4130 bool need_decompress_pass = false;
4132 if (radv_layout_dcc_compressed(image, dst_layout,
4134 value = 0x20202020u;
4135 need_decompress_pass = true;
4138 radv_initialize_dcc(cmd_buffer, image, value);
4140 radv_set_dcc_need_cmask_elim_pred(cmd_buffer, image,
4141 need_decompress_pass);
4144 if (radv_image_has_cmask(image) || radv_image_has_dcc(image)) {
4145 uint32_t color_values[2] = {};
4146 radv_set_color_clear_metadata(cmd_buffer, image, color_values);
4151 * Handle color image transitions for DCC/FMASK/CMASK.
4153 static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer,
4154 struct radv_image *image,
4155 VkImageLayout src_layout,
4156 VkImageLayout dst_layout,
4157 unsigned src_queue_mask,
4158 unsigned dst_queue_mask,
4159 const VkImageSubresourceRange *range)
4161 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
4162 radv_init_color_image_metadata(cmd_buffer, image,
4163 src_layout, dst_layout,
4164 src_queue_mask, dst_queue_mask);
4168 if (radv_image_has_dcc(image)) {
4169 if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
4170 radv_initialize_dcc(cmd_buffer, image, 0xffffffffu);
4171 } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
4172 !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
4173 radv_decompress_dcc(cmd_buffer, image, range);
4174 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
4175 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
4176 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
4178 } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
4179 if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
4180 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
4181 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
4186 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
4187 struct radv_image *image,
4188 VkImageLayout src_layout,
4189 VkImageLayout dst_layout,
4190 uint32_t src_family,
4191 uint32_t dst_family,
4192 const VkImageSubresourceRange *range,
4193 VkImageAspectFlags pending_clears)
4195 if (image->exclusive && src_family != dst_family) {
4196 /* This is an acquire or a release operation and there will be
4197 * a corresponding release/acquire. Do the transition in the
4198 * most flexible queue. */
4200 assert(src_family == cmd_buffer->queue_family_index ||
4201 dst_family == cmd_buffer->queue_family_index);
4203 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
4206 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
4207 (src_family == RADV_QUEUE_GENERAL ||
4208 dst_family == RADV_QUEUE_GENERAL))
4212 unsigned src_queue_mask =
4213 radv_image_queue_family_mask(image, src_family,
4214 cmd_buffer->queue_family_index);
4215 unsigned dst_queue_mask =
4216 radv_image_queue_family_mask(image, dst_family,
4217 cmd_buffer->queue_family_index);
4219 if (vk_format_is_depth(image->vk_format)) {
4220 radv_handle_depth_image_transition(cmd_buffer, image,
4221 src_layout, dst_layout,
4222 src_queue_mask, dst_queue_mask,
4223 range, pending_clears);
4225 radv_handle_color_image_transition(cmd_buffer, image,
4226 src_layout, dst_layout,
4227 src_queue_mask, dst_queue_mask,
4232 struct radv_barrier_info {
4233 uint32_t eventCount;
4234 const VkEvent *pEvents;
4235 VkPipelineStageFlags srcStageMask;
4239 radv_barrier(struct radv_cmd_buffer *cmd_buffer,
4240 uint32_t memoryBarrierCount,
4241 const VkMemoryBarrier *pMemoryBarriers,
4242 uint32_t bufferMemoryBarrierCount,
4243 const VkBufferMemoryBarrier *pBufferMemoryBarriers,
4244 uint32_t imageMemoryBarrierCount,
4245 const VkImageMemoryBarrier *pImageMemoryBarriers,
4246 const struct radv_barrier_info *info)
4248 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4249 enum radv_cmd_flush_bits src_flush_bits = 0;
4250 enum radv_cmd_flush_bits dst_flush_bits = 0;
4252 for (unsigned i = 0; i < info->eventCount; ++i) {
4253 RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
4254 uint64_t va = radv_buffer_get_va(event->bo);
4256 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
4258 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
4260 si_emit_wait_fence(cs, va, 1, 0xffffffff);
4261 assert(cmd_buffer->cs->cdw <= cdw_max);
4264 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
4265 src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask,
4267 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
4271 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
4272 src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask,
4274 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
4278 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
4279 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
4281 src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask,
4283 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
4287 radv_stage_flush(cmd_buffer, info->srcStageMask);
4288 cmd_buffer->state.flush_bits |= src_flush_bits;
4290 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
4291 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
4292 radv_handle_image_transition(cmd_buffer, image,
4293 pImageMemoryBarriers[i].oldLayout,
4294 pImageMemoryBarriers[i].newLayout,
4295 pImageMemoryBarriers[i].srcQueueFamilyIndex,
4296 pImageMemoryBarriers[i].dstQueueFamilyIndex,
4297 &pImageMemoryBarriers[i].subresourceRange,
4301 /* Make sure CP DMA is idle because the driver might have performed a
4302 * DMA operation for copying or filling buffers/images.
4304 si_cp_dma_wait_for_idle(cmd_buffer);
4306 cmd_buffer->state.flush_bits |= dst_flush_bits;
4309 void radv_CmdPipelineBarrier(
4310 VkCommandBuffer commandBuffer,
4311 VkPipelineStageFlags srcStageMask,
4312 VkPipelineStageFlags destStageMask,
4314 uint32_t memoryBarrierCount,
4315 const VkMemoryBarrier* pMemoryBarriers,
4316 uint32_t bufferMemoryBarrierCount,
4317 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
4318 uint32_t imageMemoryBarrierCount,
4319 const VkImageMemoryBarrier* pImageMemoryBarriers)
4321 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4322 struct radv_barrier_info info;
4324 info.eventCount = 0;
4325 info.pEvents = NULL;
4326 info.srcStageMask = srcStageMask;
4328 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
4329 bufferMemoryBarrierCount, pBufferMemoryBarriers,
4330 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
4334 static void write_event(struct radv_cmd_buffer *cmd_buffer,
4335 struct radv_event *event,
4336 VkPipelineStageFlags stageMask,
4339 struct radeon_cmdbuf *cs = cmd_buffer->cs;
4340 uint64_t va = radv_buffer_get_va(event->bo);
4342 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo);
4344 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
4346 /* Flags that only require a top-of-pipe event. */
4347 VkPipelineStageFlags top_of_pipe_flags =
4348 VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
4350 /* Flags that only require a post-index-fetch event. */
4351 VkPipelineStageFlags post_index_fetch_flags =
4353 VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
4354 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
4356 /* Make sure CP DMA is idle because the driver might have performed a
4357 * DMA operation for copying or filling buffers/images.
4359 si_cp_dma_wait_for_idle(cmd_buffer);
4361 /* TODO: Emit EOS events for syncing PS/CS stages. */
4363 if (!(stageMask & ~top_of_pipe_flags)) {
4364 /* Just need to sync the PFP engine. */
4365 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
4366 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
4367 S_370_WR_CONFIRM(1) |
4368 S_370_ENGINE_SEL(V_370_PFP));
4369 radeon_emit(cs, va);
4370 radeon_emit(cs, va >> 32);
4371 radeon_emit(cs, value);
4372 } else if (!(stageMask & ~post_index_fetch_flags)) {
4373 /* Sync ME because PFP reads index and indirect buffers. */
4374 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
4375 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
4376 S_370_WR_CONFIRM(1) |
4377 S_370_ENGINE_SEL(V_370_ME));
4378 radeon_emit(cs, va);
4379 radeon_emit(cs, va >> 32);
4380 radeon_emit(cs, value);
4382 /* Otherwise, sync all prior GPU work using an EOP event. */
4383 si_cs_emit_write_event_eop(cs,
4384 cmd_buffer->device->physical_device->rad_info.chip_class,
4385 radv_cmd_buffer_uses_mec(cmd_buffer),
4386 V_028A90_BOTTOM_OF_PIPE_TS, 0,
4387 EOP_DATA_SEL_VALUE_32BIT, va, 2, value,
4388 cmd_buffer->gfx9_eop_bug_va);
4391 assert(cmd_buffer->cs->cdw <= cdw_max);
4394 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
4396 VkPipelineStageFlags stageMask)
4398 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4399 RADV_FROM_HANDLE(radv_event, event, _event);
4401 write_event(cmd_buffer, event, stageMask, 1);
4404 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
4406 VkPipelineStageFlags stageMask)
4408 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4409 RADV_FROM_HANDLE(radv_event, event, _event);
4411 write_event(cmd_buffer, event, stageMask, 0);
4414 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
4415 uint32_t eventCount,
4416 const VkEvent* pEvents,
4417 VkPipelineStageFlags srcStageMask,
4418 VkPipelineStageFlags dstStageMask,
4419 uint32_t memoryBarrierCount,
4420 const VkMemoryBarrier* pMemoryBarriers,
4421 uint32_t bufferMemoryBarrierCount,
4422 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
4423 uint32_t imageMemoryBarrierCount,
4424 const VkImageMemoryBarrier* pImageMemoryBarriers)
4426 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4427 struct radv_barrier_info info;
4429 info.eventCount = eventCount;
4430 info.pEvents = pEvents;
4431 info.srcStageMask = 0;
4433 radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
4434 bufferMemoryBarrierCount, pBufferMemoryBarriers,
4435 imageMemoryBarrierCount, pImageMemoryBarriers, &info);
4439 void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
4440 uint32_t deviceMask)
4445 /* VK_EXT_conditional_rendering */
4446 void vkCmdBeginConditionalRenderingEXT(
4447 VkCommandBuffer commandBuffer,
4448 const VkConditionalRenderingBeginInfoEXT* pConditionalRenderingBegin)
4450 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4451 RADV_FROM_HANDLE(radv_buffer, buffer, pConditionalRenderingBegin->buffer);
4455 va = radv_buffer_get_va(buffer->bo) + pConditionalRenderingBegin->offset;
4457 inverted = pConditionalRenderingBegin->flags & VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT;
4459 /* Enable predication for this command buffer. */
4460 si_emit_set_predication_state(cmd_buffer, inverted, va);
4461 cmd_buffer->state.predicating = true;
4463 /* Store conditional rendering user info. */
4464 cmd_buffer->state.predication_type = inverted;
4465 cmd_buffer->state.predication_va = va;
4468 void vkCmdEndConditionalRenderingEXT(
4469 VkCommandBuffer commandBuffer)
4471 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4473 /* Disable predication for this command buffer. */
4474 si_emit_set_predication_state(cmd_buffer, false, 0);
4475 cmd_buffer->state.predicating = false;
4477 /* Reset conditional rendering user info. */
4478 cmd_buffer->state.predication_type = -1;
4479 cmd_buffer->state.predication_va = 0;