2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_shader.h"
34 #include "vk_format.h"
35 #include "radv_debug.h"
36 #include "radv_meta.h"
41 RADV_PREFETCH_VBO_DESCRIPTORS = (1 << 0),
42 RADV_PREFETCH_VS = (1 << 1),
43 RADV_PREFETCH_TCS = (1 << 2),
44 RADV_PREFETCH_TES = (1 << 3),
45 RADV_PREFETCH_GS = (1 << 4),
46 RADV_PREFETCH_PS = (1 << 5),
47 RADV_PREFETCH_SHADERS = (RADV_PREFETCH_VS |
54 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
55 struct radv_image *image,
56 VkImageLayout src_layout,
57 VkImageLayout dst_layout,
60 const VkImageSubresourceRange *range,
61 VkImageAspectFlags pending_clears);
63 const struct radv_dynamic_state default_dynamic_state = {
76 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
81 .stencil_compare_mask = {
85 .stencil_write_mask = {
89 .stencil_reference = {
96 radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
97 const struct radv_dynamic_state *src)
99 struct radv_dynamic_state *dest = &cmd_buffer->state.dynamic;
100 uint32_t copy_mask = src->mask;
101 uint32_t dest_mask = 0;
103 /* Make sure to copy the number of viewports/scissors because they can
104 * only be specified at pipeline creation time.
106 dest->viewport.count = src->viewport.count;
107 dest->scissor.count = src->scissor.count;
108 dest->discard_rectangle.count = src->discard_rectangle.count;
110 if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
111 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
112 src->viewport.count * sizeof(VkViewport))) {
113 typed_memcpy(dest->viewport.viewports,
114 src->viewport.viewports,
115 src->viewport.count);
116 dest_mask |= RADV_DYNAMIC_VIEWPORT;
120 if (copy_mask & RADV_DYNAMIC_SCISSOR) {
121 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
122 src->scissor.count * sizeof(VkRect2D))) {
123 typed_memcpy(dest->scissor.scissors,
124 src->scissor.scissors, src->scissor.count);
125 dest_mask |= RADV_DYNAMIC_SCISSOR;
129 if (copy_mask & RADV_DYNAMIC_LINE_WIDTH) {
130 if (dest->line_width != src->line_width) {
131 dest->line_width = src->line_width;
132 dest_mask |= RADV_DYNAMIC_LINE_WIDTH;
136 if (copy_mask & RADV_DYNAMIC_DEPTH_BIAS) {
137 if (memcmp(&dest->depth_bias, &src->depth_bias,
138 sizeof(src->depth_bias))) {
139 dest->depth_bias = src->depth_bias;
140 dest_mask |= RADV_DYNAMIC_DEPTH_BIAS;
144 if (copy_mask & RADV_DYNAMIC_BLEND_CONSTANTS) {
145 if (memcmp(&dest->blend_constants, &src->blend_constants,
146 sizeof(src->blend_constants))) {
147 typed_memcpy(dest->blend_constants,
148 src->blend_constants, 4);
149 dest_mask |= RADV_DYNAMIC_BLEND_CONSTANTS;
153 if (copy_mask & RADV_DYNAMIC_DEPTH_BOUNDS) {
154 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
155 sizeof(src->depth_bounds))) {
156 dest->depth_bounds = src->depth_bounds;
157 dest_mask |= RADV_DYNAMIC_DEPTH_BOUNDS;
161 if (copy_mask & RADV_DYNAMIC_STENCIL_COMPARE_MASK) {
162 if (memcmp(&dest->stencil_compare_mask,
163 &src->stencil_compare_mask,
164 sizeof(src->stencil_compare_mask))) {
165 dest->stencil_compare_mask = src->stencil_compare_mask;
166 dest_mask |= RADV_DYNAMIC_STENCIL_COMPARE_MASK;
170 if (copy_mask & RADV_DYNAMIC_STENCIL_WRITE_MASK) {
171 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
172 sizeof(src->stencil_write_mask))) {
173 dest->stencil_write_mask = src->stencil_write_mask;
174 dest_mask |= RADV_DYNAMIC_STENCIL_WRITE_MASK;
178 if (copy_mask & RADV_DYNAMIC_STENCIL_REFERENCE) {
179 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
180 sizeof(src->stencil_reference))) {
181 dest->stencil_reference = src->stencil_reference;
182 dest_mask |= RADV_DYNAMIC_STENCIL_REFERENCE;
186 if (copy_mask & RADV_DYNAMIC_DISCARD_RECTANGLE) {
187 if (memcmp(&dest->discard_rectangle.rectangles, &src->discard_rectangle.rectangles,
188 src->discard_rectangle.count * sizeof(VkRect2D))) {
189 typed_memcpy(dest->discard_rectangle.rectangles,
190 src->discard_rectangle.rectangles,
191 src->discard_rectangle.count);
192 dest_mask |= RADV_DYNAMIC_DISCARD_RECTANGLE;
196 cmd_buffer->state.dirty |= dest_mask;
199 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
201 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
202 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
205 enum ring_type radv_queue_family_to_ring(int f) {
207 case RADV_QUEUE_GENERAL:
209 case RADV_QUEUE_COMPUTE:
211 case RADV_QUEUE_TRANSFER:
214 unreachable("Unknown queue family");
218 static VkResult radv_create_cmd_buffer(
219 struct radv_device * device,
220 struct radv_cmd_pool * pool,
221 VkCommandBufferLevel level,
222 VkCommandBuffer* pCommandBuffer)
224 struct radv_cmd_buffer *cmd_buffer;
226 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
227 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
228 if (cmd_buffer == NULL)
229 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
231 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
232 cmd_buffer->device = device;
233 cmd_buffer->pool = pool;
234 cmd_buffer->level = level;
237 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
238 cmd_buffer->queue_family_index = pool->queue_family_index;
241 /* Init the pool_link so we can safely call list_del when we destroy
244 list_inithead(&cmd_buffer->pool_link);
245 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
248 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
250 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
251 if (!cmd_buffer->cs) {
252 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
253 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
256 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
258 list_inithead(&cmd_buffer->upload.list);
264 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
266 list_del(&cmd_buffer->pool_link);
268 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
269 &cmd_buffer->upload.list, list) {
270 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
275 if (cmd_buffer->upload.upload_bo)
276 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
277 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
279 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
280 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
282 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
286 radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
289 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
291 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
292 &cmd_buffer->upload.list, list) {
293 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
298 cmd_buffer->push_constant_stages = 0;
299 cmd_buffer->scratch_size_needed = 0;
300 cmd_buffer->compute_scratch_size_needed = 0;
301 cmd_buffer->esgs_ring_size_needed = 0;
302 cmd_buffer->gsvs_ring_size_needed = 0;
303 cmd_buffer->tess_rings_needed = false;
304 cmd_buffer->sample_positions_needed = false;
306 if (cmd_buffer->upload.upload_bo)
307 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
308 cmd_buffer->upload.upload_bo, 8);
309 cmd_buffer->upload.offset = 0;
311 cmd_buffer->record_result = VK_SUCCESS;
313 cmd_buffer->ring_offsets_idx = -1;
315 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
316 cmd_buffer->descriptors[i].dirty = 0;
317 cmd_buffer->descriptors[i].valid = 0;
318 cmd_buffer->descriptors[i].push_dirty = false;
321 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
323 radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0,
324 &cmd_buffer->gfx9_fence_offset,
326 cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
329 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
331 return cmd_buffer->record_result;
335 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
339 struct radeon_winsys_bo *bo;
340 struct radv_cmd_buffer_upload *upload;
341 struct radv_device *device = cmd_buffer->device;
343 new_size = MAX2(min_needed, 16 * 1024);
344 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
346 bo = device->ws->buffer_create(device->ws,
349 RADEON_FLAG_CPU_ACCESS|
350 RADEON_FLAG_NO_INTERPROCESS_SHARING |
354 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
358 radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo, 8);
359 if (cmd_buffer->upload.upload_bo) {
360 upload = malloc(sizeof(*upload));
363 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
364 device->ws->buffer_destroy(bo);
368 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
369 list_add(&upload->list, &cmd_buffer->upload.list);
372 cmd_buffer->upload.upload_bo = bo;
373 cmd_buffer->upload.size = new_size;
374 cmd_buffer->upload.offset = 0;
375 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
377 if (!cmd_buffer->upload.map) {
378 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
386 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
389 unsigned *out_offset,
392 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
393 if (offset + size > cmd_buffer->upload.size) {
394 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
399 *out_offset = offset;
400 *ptr = cmd_buffer->upload.map + offset;
402 cmd_buffer->upload.offset = offset + size;
407 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
408 unsigned size, unsigned alignment,
409 const void *data, unsigned *out_offset)
413 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
414 out_offset, (void **)&ptr))
418 memcpy(ptr, data, size);
424 radv_emit_write_data_packet(struct radeon_winsys_cs *cs, uint64_t va,
425 unsigned count, const uint32_t *data)
427 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
428 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
429 S_370_WR_CONFIRM(1) |
430 S_370_ENGINE_SEL(V_370_ME));
432 radeon_emit(cs, va >> 32);
433 radeon_emit_array(cs, data, count);
436 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
438 struct radv_device *device = cmd_buffer->device;
439 struct radeon_winsys_cs *cs = cmd_buffer->cs;
442 va = radv_buffer_get_va(device->trace_bo);
443 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
446 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
448 ++cmd_buffer->state.trace_id;
449 radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
450 radv_emit_write_data_packet(cs, va, 1, &cmd_buffer->state.trace_id);
451 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
452 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
456 radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
457 enum radv_cmd_flush_bits flags)
459 if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
460 uint32_t *ptr = NULL;
463 assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
464 RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
466 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
467 va = radv_buffer_get_va(cmd_buffer->gfx9_fence_bo) +
468 cmd_buffer->gfx9_fence_offset;
469 ptr = &cmd_buffer->gfx9_fence_idx;
472 /* Force wait for graphics or compute engines to be idle. */
473 si_cs_emit_cache_flush(cmd_buffer->cs,
474 cmd_buffer->device->physical_device->rad_info.chip_class,
476 radv_cmd_buffer_uses_mec(cmd_buffer),
480 if (unlikely(cmd_buffer->device->trace_bo))
481 radv_cmd_buffer_trace_emit(cmd_buffer);
485 radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
486 struct radv_pipeline *pipeline, enum ring_type ring)
488 struct radv_device *device = cmd_buffer->device;
489 struct radeon_winsys_cs *cs = cmd_buffer->cs;
493 va = radv_buffer_get_va(device->trace_bo);
503 assert(!"invalid ring type");
506 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
509 data[0] = (uintptr_t)pipeline;
510 data[1] = (uintptr_t)pipeline >> 32;
512 radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
513 radv_emit_write_data_packet(cs, va, 2, data);
516 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
517 VkPipelineBindPoint bind_point,
518 struct radv_descriptor_set *set,
521 struct radv_descriptor_state *descriptors_state =
522 radv_get_descriptors_state(cmd_buffer, bind_point);
524 descriptors_state->sets[idx] = set;
526 descriptors_state->valid |= (1u << idx);
528 descriptors_state->valid &= ~(1u << idx);
529 descriptors_state->dirty |= (1u << idx);
533 radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer,
534 VkPipelineBindPoint bind_point)
536 struct radv_descriptor_state *descriptors_state =
537 radv_get_descriptors_state(cmd_buffer, bind_point);
538 struct radv_device *device = cmd_buffer->device;
539 struct radeon_winsys_cs *cs = cmd_buffer->cs;
540 uint32_t data[MAX_SETS * 2] = {};
543 va = radv_buffer_get_va(device->trace_bo) + 24;
545 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
546 cmd_buffer->cs, 4 + MAX_SETS * 2);
548 for_each_bit(i, descriptors_state->valid) {
549 struct radv_descriptor_set *set = descriptors_state->sets[i];
550 data[i * 2] = (uintptr_t)set;
551 data[i * 2 + 1] = (uintptr_t)set >> 32;
554 radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
555 radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data);
558 struct radv_userdata_info *
559 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
560 gl_shader_stage stage,
563 struct radv_shader_variant *shader = radv_get_shader(pipeline, stage);
564 return &shader->info.user_sgprs_locs.shader_data[idx];
568 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
569 struct radv_pipeline *pipeline,
570 gl_shader_stage stage,
571 int idx, uint64_t va)
573 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
574 uint32_t base_reg = pipeline->user_data_0[stage];
575 if (loc->sgpr_idx == -1)
578 assert(loc->num_sgprs == (HAVE_32BIT_POINTERS ? 1 : 2));
579 assert(!loc->indirect);
581 radv_emit_shader_pointer(cmd_buffer->device, cmd_buffer->cs,
582 base_reg + loc->sgpr_idx * 4, va, false);
586 radv_emit_descriptor_pointers(struct radv_cmd_buffer *cmd_buffer,
587 struct radv_pipeline *pipeline,
588 struct radv_descriptor_state *descriptors_state,
589 gl_shader_stage stage)
591 struct radv_device *device = cmd_buffer->device;
592 struct radeon_winsys_cs *cs = cmd_buffer->cs;
593 uint32_t sh_base = pipeline->user_data_0[stage];
594 struct radv_userdata_locations *locs =
595 &pipeline->shaders[stage]->info.user_sgprs_locs;
598 mask = descriptors_state->dirty & descriptors_state->valid;
600 for (int i = 0; i < MAX_SETS; i++) {
601 struct radv_userdata_info *loc = &locs->descriptor_sets[i];
602 if (loc->sgpr_idx != -1 && !loc->indirect)
610 u_bit_scan_consecutive_range(&mask, &start, &count);
612 struct radv_userdata_info *loc = &locs->descriptor_sets[start];
613 unsigned sh_offset = sh_base + loc->sgpr_idx * 4;
615 radv_emit_shader_pointer_head(cs, sh_offset, count,
616 HAVE_32BIT_POINTERS);
617 for (int i = 0; i < count; i++) {
618 struct radv_descriptor_set *set =
619 descriptors_state->sets[start + i];
621 radv_emit_shader_pointer_body(device, cs, set->va,
622 HAVE_32BIT_POINTERS);
628 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
629 struct radv_pipeline *pipeline)
631 int num_samples = pipeline->graphics.ms.num_samples;
632 struct radv_multisample_state *ms = &pipeline->graphics.ms;
633 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
635 if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions)
636 cmd_buffer->sample_positions_needed = true;
638 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
641 radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
642 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
643 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
645 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
647 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
649 /* GFX9: Flush DFSM when the AA mode changes. */
650 if (cmd_buffer->device->dfsm_allowed) {
651 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
652 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
657 radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
658 struct radv_shader_variant *shader)
665 va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
667 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
671 radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer,
672 struct radv_pipeline *pipeline,
673 bool vertex_stage_only)
675 struct radv_cmd_state *state = &cmd_buffer->state;
676 uint32_t mask = state->prefetch_L2_mask;
678 if (vertex_stage_only) {
679 /* Fast prefetch path for starting draws as soon as possible.
681 mask = state->prefetch_L2_mask & (RADV_PREFETCH_VS |
682 RADV_PREFETCH_VBO_DESCRIPTORS);
685 if (mask & RADV_PREFETCH_VS)
686 radv_emit_shader_prefetch(cmd_buffer,
687 pipeline->shaders[MESA_SHADER_VERTEX]);
689 if (mask & RADV_PREFETCH_VBO_DESCRIPTORS)
690 si_cp_dma_prefetch(cmd_buffer, state->vb_va, state->vb_size);
692 if (mask & RADV_PREFETCH_TCS)
693 radv_emit_shader_prefetch(cmd_buffer,
694 pipeline->shaders[MESA_SHADER_TESS_CTRL]);
696 if (mask & RADV_PREFETCH_TES)
697 radv_emit_shader_prefetch(cmd_buffer,
698 pipeline->shaders[MESA_SHADER_TESS_EVAL]);
700 if (mask & RADV_PREFETCH_GS) {
701 radv_emit_shader_prefetch(cmd_buffer,
702 pipeline->shaders[MESA_SHADER_GEOMETRY]);
703 radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
706 if (mask & RADV_PREFETCH_PS)
707 radv_emit_shader_prefetch(cmd_buffer,
708 pipeline->shaders[MESA_SHADER_FRAGMENT]);
710 state->prefetch_L2_mask &= ~mask;
714 radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
716 if (!cmd_buffer->device->physical_device->rbplus_allowed)
719 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
720 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
721 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
723 unsigned sx_ps_downconvert = 0;
724 unsigned sx_blend_opt_epsilon = 0;
725 unsigned sx_blend_opt_control = 0;
727 for (unsigned i = 0; i < subpass->color_count; ++i) {
728 if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED)
731 int idx = subpass->color_attachments[i].attachment;
732 struct radv_color_buffer_info *cb = &framebuffer->attachments[idx].cb;
734 unsigned format = G_028C70_FORMAT(cb->cb_color_info);
735 unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info);
736 uint32_t spi_format = (pipeline->graphics.col_format >> (i * 4)) & 0xf;
737 uint32_t colormask = (pipeline->graphics.cb_target_mask >> (i * 4)) & 0xf;
739 bool has_alpha, has_rgb;
741 /* Set if RGB and A are present. */
742 has_alpha = !G_028C74_FORCE_DST_ALPHA_1(cb->cb_color_attrib);
744 if (format == V_028C70_COLOR_8 ||
745 format == V_028C70_COLOR_16 ||
746 format == V_028C70_COLOR_32)
747 has_rgb = !has_alpha;
751 /* Check the colormask and export format. */
752 if (!(colormask & 0x7))
754 if (!(colormask & 0x8))
757 if (spi_format == V_028714_SPI_SHADER_ZERO) {
762 /* Disable value checking for disabled channels. */
764 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
766 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
768 /* Enable down-conversion for 32bpp and smaller formats. */
770 case V_028C70_COLOR_8:
771 case V_028C70_COLOR_8_8:
772 case V_028C70_COLOR_8_8_8_8:
773 /* For 1 and 2-channel formats, use the superset thereof. */
774 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR ||
775 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
776 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
777 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_8_8_8_8 << (i * 4);
778 sx_blend_opt_epsilon |= V_028758_8BIT_FORMAT << (i * 4);
782 case V_028C70_COLOR_5_6_5:
783 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
784 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_5_6_5 << (i * 4);
785 sx_blend_opt_epsilon |= V_028758_6BIT_FORMAT << (i * 4);
789 case V_028C70_COLOR_1_5_5_5:
790 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
791 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_1_5_5_5 << (i * 4);
792 sx_blend_opt_epsilon |= V_028758_5BIT_FORMAT << (i * 4);
796 case V_028C70_COLOR_4_4_4_4:
797 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
798 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_4_4_4_4 << (i * 4);
799 sx_blend_opt_epsilon |= V_028758_4BIT_FORMAT << (i * 4);
803 case V_028C70_COLOR_32:
804 if (swap == V_028C70_SWAP_STD &&
805 spi_format == V_028714_SPI_SHADER_32_R)
806 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
807 else if (swap == V_028C70_SWAP_ALT_REV &&
808 spi_format == V_028714_SPI_SHADER_32_AR)
809 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_A << (i * 4);
812 case V_028C70_COLOR_16:
813 case V_028C70_COLOR_16_16:
814 /* For 1-channel formats, use the superset thereof. */
815 if (spi_format == V_028714_SPI_SHADER_UNORM16_ABGR ||
816 spi_format == V_028714_SPI_SHADER_SNORM16_ABGR ||
817 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
818 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
819 if (swap == V_028C70_SWAP_STD ||
820 swap == V_028C70_SWAP_STD_REV)
821 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_GR << (i * 4);
823 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_AR << (i * 4);
827 case V_028C70_COLOR_10_11_11:
828 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
829 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_10_11_11 << (i * 4);
830 sx_blend_opt_epsilon |= V_028758_11BIT_FORMAT << (i * 4);
834 case V_028C70_COLOR_2_10_10_10:
835 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
836 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_2_10_10_10 << (i * 4);
837 sx_blend_opt_epsilon |= V_028758_10BIT_FORMAT << (i * 4);
843 radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
844 radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
845 radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
846 radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
850 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
852 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
854 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
857 radv_update_multisample_state(cmd_buffer, pipeline);
859 cmd_buffer->scratch_size_needed =
860 MAX2(cmd_buffer->scratch_size_needed,
861 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
863 if (!cmd_buffer->state.emitted_pipeline ||
864 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
865 pipeline->graphics.can_use_guardband)
866 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
868 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
870 for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
871 if (!pipeline->shaders[i])
874 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
875 pipeline->shaders[i]->bo, 8);
878 if (radv_pipeline_has_gs(pipeline))
879 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
880 pipeline->gs_copy_shader->bo, 8);
882 if (unlikely(cmd_buffer->device->trace_bo))
883 radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
885 cmd_buffer->state.emitted_pipeline = pipeline;
887 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_PIPELINE;
891 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
893 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
894 cmd_buffer->state.dynamic.viewport.viewports);
898 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
900 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
902 si_write_scissors(cmd_buffer->cs, 0, count,
903 cmd_buffer->state.dynamic.scissor.scissors,
904 cmd_buffer->state.dynamic.viewport.viewports,
905 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
909 radv_emit_discard_rectangle(struct radv_cmd_buffer *cmd_buffer)
911 if (!cmd_buffer->state.dynamic.discard_rectangle.count)
914 radeon_set_context_reg_seq(cmd_buffer->cs, R_028210_PA_SC_CLIPRECT_0_TL,
915 cmd_buffer->state.dynamic.discard_rectangle.count * 2);
916 for (unsigned i = 0; i < cmd_buffer->state.dynamic.discard_rectangle.count; ++i) {
917 VkRect2D rect = cmd_buffer->state.dynamic.discard_rectangle.rectangles[i];
918 radeon_emit(cmd_buffer->cs, S_028210_TL_X(rect.offset.x) | S_028210_TL_Y(rect.offset.y));
919 radeon_emit(cmd_buffer->cs, S_028214_BR_X(rect.offset.x + rect.extent.width) |
920 S_028214_BR_Y(rect.offset.y + rect.extent.height));
925 radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
927 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
929 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
930 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
934 radv_emit_blend_constants(struct radv_cmd_buffer *cmd_buffer)
936 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
938 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
939 radeon_emit_array(cmd_buffer->cs, (uint32_t *)d->blend_constants, 4);
943 radv_emit_stencil(struct radv_cmd_buffer *cmd_buffer)
945 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
947 radeon_set_context_reg_seq(cmd_buffer->cs,
948 R_028430_DB_STENCILREFMASK, 2);
949 radeon_emit(cmd_buffer->cs,
950 S_028430_STENCILTESTVAL(d->stencil_reference.front) |
951 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
952 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
953 S_028430_STENCILOPVAL(1));
954 radeon_emit(cmd_buffer->cs,
955 S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
956 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
957 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
958 S_028434_STENCILOPVAL_BF(1));
962 radv_emit_depth_bounds(struct radv_cmd_buffer *cmd_buffer)
964 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
966 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN,
967 fui(d->depth_bounds.min));
968 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX,
969 fui(d->depth_bounds.max));
973 radv_emit_depth_bias(struct radv_cmd_buffer *cmd_buffer)
975 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
976 unsigned slope = fui(d->depth_bias.slope * 16.0f);
977 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
980 radeon_set_context_reg_seq(cmd_buffer->cs,
981 R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
982 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
983 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
984 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
985 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
986 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
990 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
992 struct radv_attachment_info *att,
993 struct radv_image *image,
994 VkImageLayout layout)
996 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
997 struct radv_color_buffer_info *cb = &att->cb;
998 uint32_t cb_color_info = cb->cb_color_info;
1000 if (!radv_layout_dcc_compressed(image, layout,
1001 radv_image_queue_family_mask(image,
1002 cmd_buffer->queue_family_index,
1003 cmd_buffer->queue_family_index))) {
1004 cb_color_info &= C_028C70_DCC_ENABLE;
1007 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1008 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1009 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1010 radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32));
1011 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
1012 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1013 radeon_emit(cmd_buffer->cs, cb_color_info);
1014 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1015 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1016 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1017 radeon_emit(cmd_buffer->cs, S_028C80_BASE_256B(cb->cb_color_cmask >> 32));
1018 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1019 radeon_emit(cmd_buffer->cs, S_028C88_BASE_256B(cb->cb_color_fmask >> 32));
1021 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
1022 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
1023 radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
1025 radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
1026 S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
1028 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1029 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1030 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
1031 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
1032 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1033 radeon_emit(cmd_buffer->cs, cb_color_info);
1034 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1035 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1036 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1037 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
1038 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1039 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
1041 if (is_vi) { /* DCC BASE */
1042 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
1048 radv_update_zrange_precision(struct radv_cmd_buffer *cmd_buffer,
1049 struct radv_ds_buffer_info *ds,
1050 struct radv_image *image, VkImageLayout layout,
1051 bool requires_cond_write)
1053 uint32_t db_z_info = ds->db_z_info;
1054 uint32_t db_z_info_reg;
1056 if (!radv_image_is_tc_compat_htile(image))
1059 if (!radv_layout_has_htile(image, layout,
1060 radv_image_queue_family_mask(image,
1061 cmd_buffer->queue_family_index,
1062 cmd_buffer->queue_family_index))) {
1063 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1066 db_z_info &= C_028040_ZRANGE_PRECISION;
1068 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1069 db_z_info_reg = R_028038_DB_Z_INFO;
1071 db_z_info_reg = R_028040_DB_Z_INFO;
1074 /* When we don't know the last fast clear value we need to emit a
1075 * conditional packet, otherwise we can update DB_Z_INFO directly.
1077 if (requires_cond_write) {
1078 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COND_WRITE, 7, 0));
1080 const uint32_t write_space = 0 << 8; /* register */
1081 const uint32_t poll_space = 1 << 4; /* memory */
1082 const uint32_t function = 3 << 0; /* equal to the reference */
1083 const uint32_t options = write_space | poll_space | function;
1084 radeon_emit(cmd_buffer->cs, options);
1086 /* poll address - location of the depth clear value */
1087 uint64_t va = radv_buffer_get_va(image->bo);
1088 va += image->offset + image->clear_value_offset;
1090 /* In presence of stencil format, we have to adjust the base
1091 * address because the first value is the stencil clear value.
1093 if (vk_format_is_stencil(image->vk_format))
1096 radeon_emit(cmd_buffer->cs, va);
1097 radeon_emit(cmd_buffer->cs, va >> 32);
1099 radeon_emit(cmd_buffer->cs, fui(0.0f)); /* reference value */
1100 radeon_emit(cmd_buffer->cs, (uint32_t)-1); /* comparison mask */
1101 radeon_emit(cmd_buffer->cs, db_z_info_reg >> 2); /* write address low */
1102 radeon_emit(cmd_buffer->cs, 0u); /* write address high */
1103 radeon_emit(cmd_buffer->cs, db_z_info);
1105 radeon_set_context_reg(cmd_buffer->cs, db_z_info_reg, db_z_info);
1110 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
1111 struct radv_ds_buffer_info *ds,
1112 struct radv_image *image,
1113 VkImageLayout layout)
1115 uint32_t db_z_info = ds->db_z_info;
1116 uint32_t db_stencil_info = ds->db_stencil_info;
1118 if (!radv_layout_has_htile(image, layout,
1119 radv_image_queue_family_mask(image,
1120 cmd_buffer->queue_family_index,
1121 cmd_buffer->queue_family_index))) {
1122 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1123 db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
1126 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
1127 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
1130 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1131 radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
1132 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
1133 radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32));
1134 radeon_emit(cmd_buffer->cs, ds->db_depth_size);
1136 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
1137 radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
1138 radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
1139 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
1140 radeon_emit(cmd_buffer->cs, S_028044_BASE_HI(ds->db_z_read_base >> 32)); /* DB_Z_READ_BASE_HI */
1141 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
1142 radeon_emit(cmd_buffer->cs, S_02804C_BASE_HI(ds->db_stencil_read_base >> 32)); /* DB_STENCIL_READ_BASE_HI */
1143 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
1144 radeon_emit(cmd_buffer->cs, S_028054_BASE_HI(ds->db_z_write_base >> 32)); /* DB_Z_WRITE_BASE_HI */
1145 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
1146 radeon_emit(cmd_buffer->cs, S_02805C_BASE_HI(ds->db_stencil_write_base >> 32)); /* DB_STENCIL_WRITE_BASE_HI */
1148 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
1149 radeon_emit(cmd_buffer->cs, ds->db_z_info2);
1150 radeon_emit(cmd_buffer->cs, ds->db_stencil_info2);
1152 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1154 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
1155 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
1156 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
1157 radeon_emit(cmd_buffer->cs, db_stencil_info); /* R_028044_DB_STENCIL_INFO */
1158 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
1159 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
1160 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
1161 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
1162 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
1163 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
1167 /* Update the ZRANGE_PRECISION value for the TC-compat bug. */
1168 radv_update_zrange_precision(cmd_buffer, ds, image, layout, true);
1170 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
1171 ds->pa_su_poly_offset_db_fmt_cntl);
1175 * Update the fast clear depth/stencil values if the image is bound as a
1176 * depth/stencil buffer.
1179 radv_update_bound_fast_clear_ds(struct radv_cmd_buffer *cmd_buffer,
1180 struct radv_image *image,
1181 VkClearDepthStencilValue ds_clear_value)
1183 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1184 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1185 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1186 struct radv_attachment_info *att;
1189 if (!framebuffer || !subpass)
1192 att_idx = subpass->depth_stencil_attachment.attachment;
1193 if (att_idx == VK_ATTACHMENT_UNUSED)
1196 att = &framebuffer->attachments[att_idx];
1197 if (att->attachment->image != image)
1200 radeon_set_context_reg_seq(cs, R_028028_DB_STENCIL_CLEAR, 2);
1201 radeon_emit(cs, ds_clear_value.stencil);
1202 radeon_emit(cs, fui(ds_clear_value.depth));
1206 radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1207 struct radv_image *image,
1208 VkClearDepthStencilValue ds_clear_value,
1209 VkImageAspectFlags aspects)
1211 uint64_t va = radv_buffer_get_va(image->bo);
1212 va += image->offset + image->clear_value_offset;
1214 assert(radv_image_has_htile(image));
1216 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1217 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1218 S_370_WR_CONFIRM(1) |
1219 S_370_ENGINE_SEL(V_370_PFP));
1220 radeon_emit(cmd_buffer->cs, va);
1221 radeon_emit(cmd_buffer->cs, va >> 32);
1222 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil);
1223 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth));
1225 radv_update_bound_fast_clear_ds(cmd_buffer, image, ds_clear_value);
1227 /* Update the ZRANGE_PRECISION value for the TC-compat bug. This is
1228 * only needed when clearing Z to 0.0.
1230 if ((aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1231 ds_clear_value.depth == 0.0) {
1232 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1233 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1235 if (!framebuffer || !subpass)
1238 if (subpass->depth_stencil_attachment.attachment == VK_ATTACHMENT_UNUSED)
1241 int idx = subpass->depth_stencil_attachment.attachment;
1242 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1243 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1244 struct radv_image *image = att->attachment->image;
1246 /* Only needed if the image is currently bound as the depth
1249 if (att->attachment->image != image)
1252 radv_update_zrange_precision(cmd_buffer, &att->ds, image,
1258 radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1259 struct radv_image *image)
1261 uint64_t va = radv_buffer_get_va(image->bo);
1262 va += image->offset + image->clear_value_offset;
1264 if (!radv_image_has_htile(image))
1267 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
1268 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1269 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1270 COPY_DATA_COUNT_SEL);
1271 radeon_emit(cmd_buffer->cs, va);
1272 radeon_emit(cmd_buffer->cs, va >> 32);
1273 radeon_emit(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR >> 2);
1274 radeon_emit(cmd_buffer->cs, 0);
1276 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1277 radeon_emit(cmd_buffer->cs, 0);
1281 * With DCC some colors don't require CMASK elimination before being
1282 * used as a texture. This sets a predicate value to determine if the
1283 * cmask eliminate is required.
1286 radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer,
1287 struct radv_image *image,
1290 uint64_t pred_val = value;
1291 uint64_t va = radv_buffer_get_va(image->bo);
1292 va += image->offset + image->dcc_pred_offset;
1294 assert(radv_image_has_dcc(image));
1296 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1297 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1298 S_370_WR_CONFIRM(1) |
1299 S_370_ENGINE_SEL(V_370_PFP));
1300 radeon_emit(cmd_buffer->cs, va);
1301 radeon_emit(cmd_buffer->cs, va >> 32);
1302 radeon_emit(cmd_buffer->cs, pred_val);
1303 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1307 * Update the fast clear color values if the image is bound as a color buffer.
1310 radv_update_bound_fast_clear_color(struct radv_cmd_buffer *cmd_buffer,
1311 struct radv_image *image,
1313 uint32_t color_values[2])
1315 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1316 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1317 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1318 struct radv_attachment_info *att;
1321 if (!framebuffer || !subpass)
1324 att_idx = subpass->color_attachments[cb_idx].attachment;
1325 if (att_idx == VK_ATTACHMENT_UNUSED)
1328 att = &framebuffer->attachments[att_idx];
1329 if (att->attachment->image != image)
1332 radeon_set_context_reg_seq(cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c, 2);
1333 radeon_emit(cs, color_values[0]);
1334 radeon_emit(cs, color_values[1]);
1338 * Set the clear color values to the image's metadata.
1341 radv_set_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1342 struct radv_image *image,
1344 uint32_t color_values[2])
1346 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1347 uint64_t va = radv_buffer_get_va(image->bo);
1349 va += image->offset + image->clear_value_offset;
1351 assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
1353 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1354 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1355 S_370_WR_CONFIRM(1) |
1356 S_370_ENGINE_SEL(V_370_PFP));
1357 radeon_emit(cs, va);
1358 radeon_emit(cs, va >> 32);
1359 radeon_emit(cs, color_values[0]);
1360 radeon_emit(cs, color_values[1]);
1362 radv_update_bound_fast_clear_color(cmd_buffer, image, cb_idx,
1367 * Load the clear color values from the image's metadata.
1370 radv_load_color_clear_metadata(struct radv_cmd_buffer *cmd_buffer,
1371 struct radv_image *image,
1374 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1375 uint64_t va = radv_buffer_get_va(image->bo);
1377 va += image->offset + image->clear_value_offset;
1379 if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
1382 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + cb_idx * 0x3c;
1384 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
1385 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1386 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1387 COPY_DATA_COUNT_SEL);
1388 radeon_emit(cs, va);
1389 radeon_emit(cs, va >> 32);
1390 radeon_emit(cs, reg >> 2);
1393 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1398 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
1401 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1402 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1404 /* this may happen for inherited secondary recording */
1408 for (i = 0; i < 8; ++i) {
1409 if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
1410 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1411 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1415 int idx = subpass->color_attachments[i].attachment;
1416 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1417 struct radv_image *image = att->attachment->image;
1418 VkImageLayout layout = subpass->color_attachments[i].layout;
1420 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
1422 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
1423 radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
1425 radv_load_color_clear_metadata(cmd_buffer, image, i);
1428 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1429 int idx = subpass->depth_stencil_attachment.attachment;
1430 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1431 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1432 struct radv_image *image = att->attachment->image;
1433 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
1434 MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
1435 cmd_buffer->queue_family_index,
1436 cmd_buffer->queue_family_index);
1437 /* We currently don't support writing decompressed HTILE */
1438 assert(radv_layout_has_htile(image, layout, queue_mask) ==
1439 radv_layout_is_htile_compressed(image, layout, queue_mask));
1441 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout);
1443 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) {
1444 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1445 cmd_buffer->state.offset_scale = att->ds.offset_scale;
1447 radv_load_depth_clear_regs(cmd_buffer, image);
1449 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1450 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
1452 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
1454 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
1455 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
1457 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
1458 S_028208_BR_X(framebuffer->width) |
1459 S_028208_BR_Y(framebuffer->height));
1461 if (cmd_buffer->device->dfsm_allowed) {
1462 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1463 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
1466 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_FRAMEBUFFER;
1470 radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
1472 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1473 struct radv_cmd_state *state = &cmd_buffer->state;
1475 if (state->index_type != state->last_index_type) {
1476 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1477 radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
1478 2, state->index_type);
1480 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
1481 radeon_emit(cs, state->index_type);
1484 state->last_index_type = state->index_type;
1487 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
1488 radeon_emit(cs, state->index_va);
1489 radeon_emit(cs, state->index_va >> 32);
1491 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
1492 radeon_emit(cs, state->max_index_count);
1494 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
1497 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
1499 bool has_perfect_queries = cmd_buffer->state.perfect_occlusion_queries_enabled;
1500 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1501 uint32_t pa_sc_mode_cntl_1 =
1502 pipeline ? pipeline->graphics.ms.pa_sc_mode_cntl_1 : 0;
1503 uint32_t db_count_control;
1505 if(!cmd_buffer->state.active_occlusion_queries) {
1506 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1507 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
1508 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
1509 has_perfect_queries) {
1510 /* Re-enable out-of-order rasterization if the
1511 * bound pipeline supports it and if it's has
1512 * been disabled before starting any perfect
1513 * occlusion queries.
1515 radeon_set_context_reg(cmd_buffer->cs,
1516 R_028A4C_PA_SC_MODE_CNTL_1,
1519 db_count_control = 0;
1521 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
1524 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1525 uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
1527 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1529 S_028004_PERFECT_ZPASS_COUNTS(has_perfect_queries) |
1530 S_028004_SAMPLE_RATE(sample_rate) |
1531 S_028004_ZPASS_ENABLE(1) |
1532 S_028004_SLICE_EVEN_ENABLE(1) |
1533 S_028004_SLICE_ODD_ENABLE(1);
1535 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
1536 pipeline->graphics.disable_out_of_order_rast_for_occlusion &&
1537 has_perfect_queries) {
1538 /* If the bound pipeline has enabled
1539 * out-of-order rasterization, we should
1540 * disable it before starting any perfect
1541 * occlusion queries.
1543 pa_sc_mode_cntl_1 &= C_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE;
1545 radeon_set_context_reg(cmd_buffer->cs,
1546 R_028A4C_PA_SC_MODE_CNTL_1,
1550 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1551 S_028004_SAMPLE_RATE(sample_rate);
1555 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
1559 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
1561 uint32_t states = cmd_buffer->state.dirty & cmd_buffer->state.emitted_pipeline->graphics.needed_dynamic_state;
1563 if (states & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1564 radv_emit_viewport(cmd_buffer);
1566 if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT) &&
1567 !cmd_buffer->device->physical_device->has_scissor_bug)
1568 radv_emit_scissor(cmd_buffer);
1570 if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
1571 radv_emit_line_width(cmd_buffer);
1573 if (states & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
1574 radv_emit_blend_constants(cmd_buffer);
1576 if (states & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
1577 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
1578 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK))
1579 radv_emit_stencil(cmd_buffer);
1581 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)
1582 radv_emit_depth_bounds(cmd_buffer);
1584 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)
1585 radv_emit_depth_bias(cmd_buffer);
1587 if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE)
1588 radv_emit_discard_rectangle(cmd_buffer);
1590 cmd_buffer->state.dirty &= ~states;
1594 radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
1595 VkPipelineBindPoint bind_point)
1597 struct radv_descriptor_state *descriptors_state =
1598 radv_get_descriptors_state(cmd_buffer, bind_point);
1599 struct radv_descriptor_set *set = &descriptors_state->push_set.set;
1602 if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
1607 set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1608 set->va += bo_offset;
1612 radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
1613 VkPipelineBindPoint bind_point)
1615 struct radv_descriptor_state *descriptors_state =
1616 radv_get_descriptors_state(cmd_buffer, bind_point);
1617 uint32_t size = MAX_SETS * 2 * 4;
1621 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size,
1622 256, &offset, &ptr))
1625 for (unsigned i = 0; i < MAX_SETS; i++) {
1626 uint32_t *uptr = ((uint32_t *)ptr) + i * 2;
1627 uint64_t set_va = 0;
1628 struct radv_descriptor_set *set = descriptors_state->sets[i];
1629 if (descriptors_state->valid & (1u << i))
1631 uptr[0] = set_va & 0xffffffff;
1632 uptr[1] = set_va >> 32;
1635 uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1638 if (cmd_buffer->state.pipeline) {
1639 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX])
1640 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1641 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1643 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT])
1644 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_FRAGMENT,
1645 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1647 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
1648 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
1649 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1651 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1652 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_CTRL,
1653 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1655 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1656 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_EVAL,
1657 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1660 if (cmd_buffer->state.compute_pipeline)
1661 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.compute_pipeline, MESA_SHADER_COMPUTE,
1662 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1666 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
1667 VkShaderStageFlags stages)
1669 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
1670 VK_PIPELINE_BIND_POINT_COMPUTE :
1671 VK_PIPELINE_BIND_POINT_GRAPHICS;
1672 struct radv_descriptor_state *descriptors_state =
1673 radv_get_descriptors_state(cmd_buffer, bind_point);
1675 if (!descriptors_state->dirty)
1678 if (descriptors_state->push_dirty)
1679 radv_flush_push_descriptors(cmd_buffer, bind_point);
1681 if ((cmd_buffer->state.pipeline && cmd_buffer->state.pipeline->need_indirect_descriptor_sets) ||
1682 (cmd_buffer->state.compute_pipeline && cmd_buffer->state.compute_pipeline->need_indirect_descriptor_sets)) {
1683 radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
1686 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1688 MAX_SETS * MESA_SHADER_STAGES * 4);
1690 if (cmd_buffer->state.pipeline) {
1691 radv_foreach_stage(stage, stages) {
1692 if (!cmd_buffer->state.pipeline->shaders[stage])
1695 radv_emit_descriptor_pointers(cmd_buffer,
1696 cmd_buffer->state.pipeline,
1697 descriptors_state, stage);
1701 if (cmd_buffer->state.compute_pipeline &&
1702 (stages & VK_SHADER_STAGE_COMPUTE_BIT)) {
1703 radv_emit_descriptor_pointers(cmd_buffer,
1704 cmd_buffer->state.compute_pipeline,
1706 MESA_SHADER_COMPUTE);
1709 descriptors_state->dirty = 0;
1710 descriptors_state->push_dirty = false;
1712 if (unlikely(cmd_buffer->device->trace_bo))
1713 radv_save_descriptors(cmd_buffer, bind_point);
1715 assert(cmd_buffer->cs->cdw <= cdw_max);
1719 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
1720 VkShaderStageFlags stages)
1722 struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
1723 ? cmd_buffer->state.compute_pipeline
1724 : cmd_buffer->state.pipeline;
1725 struct radv_pipeline_layout *layout = pipeline->layout;
1726 struct radv_shader_variant *shader, *prev_shader;
1731 stages &= cmd_buffer->push_constant_stages;
1733 (!layout->push_constant_size && !layout->dynamic_offset_count))
1736 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
1737 16 * layout->dynamic_offset_count,
1738 256, &offset, &ptr))
1741 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
1742 memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
1743 16 * layout->dynamic_offset_count);
1745 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1748 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1749 cmd_buffer->cs, MESA_SHADER_STAGES * 4);
1752 radv_foreach_stage(stage, stages) {
1753 shader = radv_get_shader(pipeline, stage);
1755 /* Avoid redundantly emitting the address for merged stages. */
1756 if (shader && shader != prev_shader) {
1757 radv_emit_userdata_address(cmd_buffer, pipeline, stage,
1758 AC_UD_PUSH_CONSTANTS, va);
1760 prev_shader = shader;
1764 cmd_buffer->push_constant_stages &= ~stages;
1765 assert(cmd_buffer->cs->cdw <= cdw_max);
1769 radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
1770 bool pipeline_is_dirty)
1772 if ((pipeline_is_dirty ||
1773 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
1774 cmd_buffer->state.pipeline->vertex_elements.count &&
1775 radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.has_vertex_buffers) {
1776 struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
1780 uint32_t count = velems->count;
1783 /* allocate some descriptor state for vertex buffers */
1784 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
1785 &vb_offset, &vb_ptr))
1788 for (i = 0; i < count; i++) {
1789 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
1791 int vb = velems->binding[i];
1792 struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
1793 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
1795 va = radv_buffer_get_va(buffer->bo);
1797 offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i];
1798 va += offset + buffer->offset;
1800 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
1801 if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
1802 desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
1804 desc[2] = buffer->size - offset;
1805 desc[3] = velems->rsrc_word3[i];
1808 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1811 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1812 AC_UD_VS_VERTEX_BUFFERS, va);
1814 cmd_buffer->state.vb_va = va;
1815 cmd_buffer->state.vb_size = count * 16;
1816 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_VBO_DESCRIPTORS;
1818 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
1822 radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
1824 radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
1825 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
1826 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
1830 radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw,
1831 bool instanced_draw, bool indirect_draw,
1832 uint32_t draw_vertex_count)
1834 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
1835 struct radv_cmd_state *state = &cmd_buffer->state;
1836 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1837 uint32_t ia_multi_vgt_param;
1838 int32_t primitive_reset_en;
1841 ia_multi_vgt_param =
1842 si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
1843 indirect_draw, draw_vertex_count);
1845 if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
1846 if (info->chip_class >= GFX9) {
1847 radeon_set_uconfig_reg_idx(cs,
1848 R_030960_IA_MULTI_VGT_PARAM,
1849 4, ia_multi_vgt_param);
1850 } else if (info->chip_class >= CIK) {
1851 radeon_set_context_reg_idx(cs,
1852 R_028AA8_IA_MULTI_VGT_PARAM,
1853 1, ia_multi_vgt_param);
1855 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM,
1856 ia_multi_vgt_param);
1858 state->last_ia_multi_vgt_param = ia_multi_vgt_param;
1861 /* Primitive restart. */
1862 primitive_reset_en =
1863 indexed_draw && state->pipeline->graphics.prim_restart_enable;
1865 if (primitive_reset_en != state->last_primitive_reset_en) {
1866 state->last_primitive_reset_en = primitive_reset_en;
1867 if (info->chip_class >= GFX9) {
1868 radeon_set_uconfig_reg(cs,
1869 R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
1870 primitive_reset_en);
1872 radeon_set_context_reg(cs,
1873 R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
1874 primitive_reset_en);
1878 if (primitive_reset_en) {
1879 uint32_t primitive_reset_index =
1880 state->index_type ? 0xffffffffu : 0xffffu;
1882 if (primitive_reset_index != state->last_primitive_reset_index) {
1883 radeon_set_context_reg(cs,
1884 R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
1885 primitive_reset_index);
1886 state->last_primitive_reset_index = primitive_reset_index;
1891 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
1892 VkPipelineStageFlags src_stage_mask)
1894 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
1895 VK_PIPELINE_STAGE_TRANSFER_BIT |
1896 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1897 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1898 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
1901 if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
1902 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
1903 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
1904 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
1905 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
1906 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
1907 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
1908 VK_PIPELINE_STAGE_TRANSFER_BIT |
1909 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1910 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
1911 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1912 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1913 } else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
1914 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
1915 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) {
1916 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
1920 static enum radv_cmd_flush_bits
1921 radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
1922 VkAccessFlags src_flags)
1924 enum radv_cmd_flush_bits flush_bits = 0;
1926 for_each_bit(b, src_flags) {
1927 switch ((VkAccessFlagBits)(1 << b)) {
1928 case VK_ACCESS_SHADER_WRITE_BIT:
1929 flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
1931 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
1932 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1933 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
1935 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
1936 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1937 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
1939 case VK_ACCESS_TRANSFER_WRITE_BIT:
1940 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1941 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
1942 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1943 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
1944 RADV_CMD_FLAG_INV_GLOBAL_L2;
1953 static enum radv_cmd_flush_bits
1954 radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
1955 VkAccessFlags dst_flags,
1956 struct radv_image *image)
1958 enum radv_cmd_flush_bits flush_bits = 0;
1960 for_each_bit(b, dst_flags) {
1961 switch ((VkAccessFlagBits)(1 << b)) {
1962 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
1963 case VK_ACCESS_INDEX_READ_BIT:
1965 case VK_ACCESS_UNIFORM_READ_BIT:
1966 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
1968 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
1969 case VK_ACCESS_SHADER_READ_BIT:
1970 case VK_ACCESS_TRANSFER_READ_BIT:
1971 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
1972 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 |
1973 RADV_CMD_FLAG_INV_GLOBAL_L2;
1975 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
1976 /* TODO: change to image && when the image gets passed
1977 * through from the subpass. */
1978 if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
1979 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1980 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
1982 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
1983 if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
1984 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1985 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
1994 static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier)
1996 cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask);
1997 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
1998 cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
2002 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
2003 VkAttachmentReference att)
2005 unsigned idx = att.attachment;
2006 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
2007 VkImageSubresourceRange range;
2008 range.aspectMask = 0;
2009 range.baseMipLevel = view->base_mip;
2010 range.levelCount = 1;
2011 range.baseArrayLayer = view->base_layer;
2012 range.layerCount = cmd_buffer->state.framebuffer->layers;
2014 radv_handle_image_transition(cmd_buffer,
2016 cmd_buffer->state.attachments[idx].current_layout,
2017 att.layout, 0, 0, &range,
2018 cmd_buffer->state.attachments[idx].pending_clear_aspects);
2020 cmd_buffer->state.attachments[idx].current_layout = att.layout;
2026 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
2027 const struct radv_subpass *subpass, bool transitions)
2030 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
2032 for (unsigned i = 0; i < subpass->color_count; ++i) {
2033 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
2034 radv_handle_subpass_image_transition(cmd_buffer,
2035 subpass->color_attachments[i]);
2038 for (unsigned i = 0; i < subpass->input_count; ++i) {
2039 radv_handle_subpass_image_transition(cmd_buffer,
2040 subpass->input_attachments[i]);
2043 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
2044 radv_handle_subpass_image_transition(cmd_buffer,
2045 subpass->depth_stencil_attachment);
2049 cmd_buffer->state.subpass = subpass;
2051 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
2055 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
2056 struct radv_render_pass *pass,
2057 const VkRenderPassBeginInfo *info)
2059 struct radv_cmd_state *state = &cmd_buffer->state;
2061 if (pass->attachment_count == 0) {
2062 state->attachments = NULL;
2066 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
2067 pass->attachment_count *
2068 sizeof(state->attachments[0]),
2069 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2070 if (state->attachments == NULL) {
2071 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2072 return cmd_buffer->record_result;
2075 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
2076 struct radv_render_pass_attachment *att = &pass->attachments[i];
2077 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
2078 VkImageAspectFlags clear_aspects = 0;
2080 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
2081 /* color attachment */
2082 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2083 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
2086 /* depthstencil attachment */
2087 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
2088 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2089 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
2090 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2091 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
2092 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2094 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
2095 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
2096 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
2100 state->attachments[i].pending_clear_aspects = clear_aspects;
2101 state->attachments[i].cleared_views = 0;
2102 if (clear_aspects && info) {
2103 assert(info->clearValueCount > i);
2104 state->attachments[i].clear_value = info->pClearValues[i];
2107 state->attachments[i].current_layout = att->initial_layout;
2113 VkResult radv_AllocateCommandBuffers(
2115 const VkCommandBufferAllocateInfo *pAllocateInfo,
2116 VkCommandBuffer *pCommandBuffers)
2118 RADV_FROM_HANDLE(radv_device, device, _device);
2119 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
2121 VkResult result = VK_SUCCESS;
2124 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
2126 if (!list_empty(&pool->free_cmd_buffers)) {
2127 struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
2129 list_del(&cmd_buffer->pool_link);
2130 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
2132 result = radv_reset_cmd_buffer(cmd_buffer);
2133 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
2134 cmd_buffer->level = pAllocateInfo->level;
2136 pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
2138 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
2139 &pCommandBuffers[i]);
2141 if (result != VK_SUCCESS)
2145 if (result != VK_SUCCESS) {
2146 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
2147 i, pCommandBuffers);
2149 /* From the Vulkan 1.0.66 spec:
2151 * "vkAllocateCommandBuffers can be used to create multiple
2152 * command buffers. If the creation of any of those command
2153 * buffers fails, the implementation must destroy all
2154 * successfully created command buffer objects from this
2155 * command, set all entries of the pCommandBuffers array to
2156 * NULL and return the error."
2158 memset(pCommandBuffers, 0,
2159 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
2165 void radv_FreeCommandBuffers(
2167 VkCommandPool commandPool,
2168 uint32_t commandBufferCount,
2169 const VkCommandBuffer *pCommandBuffers)
2171 for (uint32_t i = 0; i < commandBufferCount; i++) {
2172 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
2175 if (cmd_buffer->pool) {
2176 list_del(&cmd_buffer->pool_link);
2177 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
2179 radv_cmd_buffer_destroy(cmd_buffer);
2185 VkResult radv_ResetCommandBuffer(
2186 VkCommandBuffer commandBuffer,
2187 VkCommandBufferResetFlags flags)
2189 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2190 return radv_reset_cmd_buffer(cmd_buffer);
2193 static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer)
2195 struct radv_device *device = cmd_buffer->device;
2196 if (device->gfx_init) {
2197 uint64_t va = radv_buffer_get_va(device->gfx_init);
2198 radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init, 8);
2199 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
2200 radeon_emit(cmd_buffer->cs, va);
2201 radeon_emit(cmd_buffer->cs, va >> 32);
2202 radeon_emit(cmd_buffer->cs, device->gfx_init_size_dw & 0xffff);
2204 si_init_config(cmd_buffer);
2207 VkResult radv_BeginCommandBuffer(
2208 VkCommandBuffer commandBuffer,
2209 const VkCommandBufferBeginInfo *pBeginInfo)
2211 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2212 VkResult result = VK_SUCCESS;
2214 if (cmd_buffer->status != RADV_CMD_BUFFER_STATUS_INITIAL) {
2215 /* If the command buffer has already been resetted with
2216 * vkResetCommandBuffer, no need to do it again.
2218 result = radv_reset_cmd_buffer(cmd_buffer);
2219 if (result != VK_SUCCESS)
2223 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
2224 cmd_buffer->state.last_primitive_reset_en = -1;
2225 cmd_buffer->state.last_index_type = -1;
2226 cmd_buffer->state.last_num_instances = -1;
2227 cmd_buffer->state.last_vertex_offset = -1;
2228 cmd_buffer->state.last_first_instance = -1;
2229 cmd_buffer->usage_flags = pBeginInfo->flags;
2231 /* setup initial configuration into command buffer */
2232 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
2233 switch (cmd_buffer->queue_family_index) {
2234 case RADV_QUEUE_GENERAL:
2235 emit_gfx_buffer_state(cmd_buffer);
2237 case RADV_QUEUE_COMPUTE:
2238 si_init_compute(cmd_buffer);
2240 case RADV_QUEUE_TRANSFER:
2246 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
2247 assert(pBeginInfo->pInheritanceInfo);
2248 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
2249 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
2251 struct radv_subpass *subpass =
2252 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
2254 result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
2255 if (result != VK_SUCCESS)
2258 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
2261 if (unlikely(cmd_buffer->device->trace_bo))
2262 radv_cmd_buffer_trace_emit(cmd_buffer);
2264 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
2269 void radv_CmdBindVertexBuffers(
2270 VkCommandBuffer commandBuffer,
2271 uint32_t firstBinding,
2272 uint32_t bindingCount,
2273 const VkBuffer* pBuffers,
2274 const VkDeviceSize* pOffsets)
2276 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2277 struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
2278 bool changed = false;
2280 /* We have to defer setting up vertex buffer since we need the buffer
2281 * stride from the pipeline. */
2283 assert(firstBinding + bindingCount <= MAX_VBS);
2284 for (uint32_t i = 0; i < bindingCount; i++) {
2285 uint32_t idx = firstBinding + i;
2288 (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
2289 vb[idx].offset != pOffsets[i])) {
2293 vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
2294 vb[idx].offset = pOffsets[i];
2296 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
2297 vb[idx].buffer->bo, 8);
2301 /* No state changes. */
2305 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
2308 void radv_CmdBindIndexBuffer(
2309 VkCommandBuffer commandBuffer,
2311 VkDeviceSize offset,
2312 VkIndexType indexType)
2314 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2315 RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
2317 if (cmd_buffer->state.index_buffer == index_buffer &&
2318 cmd_buffer->state.index_offset == offset &&
2319 cmd_buffer->state.index_type == indexType) {
2320 /* No state changes. */
2324 cmd_buffer->state.index_buffer = index_buffer;
2325 cmd_buffer->state.index_offset = offset;
2326 cmd_buffer->state.index_type = indexType; /* vk matches hw */
2327 cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
2328 cmd_buffer->state.index_va += index_buffer->offset + offset;
2330 int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
2331 cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
2332 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
2333 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo, 8);
2338 radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2339 VkPipelineBindPoint bind_point,
2340 struct radv_descriptor_set *set, unsigned idx)
2342 struct radeon_winsys *ws = cmd_buffer->device->ws;
2344 radv_set_descriptor_set(cmd_buffer, bind_point, set, idx);
2348 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
2350 if (!cmd_buffer->device->use_global_bo_list) {
2351 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
2352 if (set->descriptors[j])
2353 radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j], 7);
2357 radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo, 8);
2360 void radv_CmdBindDescriptorSets(
2361 VkCommandBuffer commandBuffer,
2362 VkPipelineBindPoint pipelineBindPoint,
2363 VkPipelineLayout _layout,
2365 uint32_t descriptorSetCount,
2366 const VkDescriptorSet* pDescriptorSets,
2367 uint32_t dynamicOffsetCount,
2368 const uint32_t* pDynamicOffsets)
2370 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2371 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2372 unsigned dyn_idx = 0;
2374 const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
2376 for (unsigned i = 0; i < descriptorSetCount; ++i) {
2377 unsigned idx = i + firstSet;
2378 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
2379 radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx);
2381 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
2382 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
2383 uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4;
2384 assert(dyn_idx < dynamicOffsetCount);
2386 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
2387 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
2389 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2390 dst[2] = no_dynamic_bounds ? 0xffffffffu : range->size;
2391 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2392 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2393 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2394 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2395 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2396 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2397 cmd_buffer->push_constant_stages |=
2398 set->layout->dynamic_shader_stages;
2403 static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2404 struct radv_descriptor_set *set,
2405 struct radv_descriptor_set_layout *layout,
2406 VkPipelineBindPoint bind_point)
2408 struct radv_descriptor_state *descriptors_state =
2409 radv_get_descriptors_state(cmd_buffer, bind_point);
2410 set->size = layout->size;
2411 set->layout = layout;
2413 if (descriptors_state->push_set.capacity < set->size) {
2414 size_t new_size = MAX2(set->size, 1024);
2415 new_size = MAX2(new_size, 2 * descriptors_state->push_set.capacity);
2416 new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
2418 free(set->mapped_ptr);
2419 set->mapped_ptr = malloc(new_size);
2421 if (!set->mapped_ptr) {
2422 descriptors_state->push_set.capacity = 0;
2423 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2427 descriptors_state->push_set.capacity = new_size;
2433 void radv_meta_push_descriptor_set(
2434 struct radv_cmd_buffer* cmd_buffer,
2435 VkPipelineBindPoint pipelineBindPoint,
2436 VkPipelineLayout _layout,
2438 uint32_t descriptorWriteCount,
2439 const VkWriteDescriptorSet* pDescriptorWrites)
2441 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2442 struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
2446 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2448 push_set->size = layout->set[set].layout->size;
2449 push_set->layout = layout->set[set].layout;
2451 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
2453 (void**) &push_set->mapped_ptr))
2456 push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2457 push_set->va += bo_offset;
2459 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2460 radv_descriptor_set_to_handle(push_set),
2461 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2463 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
2466 void radv_CmdPushDescriptorSetKHR(
2467 VkCommandBuffer commandBuffer,
2468 VkPipelineBindPoint pipelineBindPoint,
2469 VkPipelineLayout _layout,
2471 uint32_t descriptorWriteCount,
2472 const VkWriteDescriptorSet* pDescriptorWrites)
2474 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2475 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2476 struct radv_descriptor_state *descriptors_state =
2477 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
2478 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
2480 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2482 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
2483 layout->set[set].layout,
2487 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2488 radv_descriptor_set_to_handle(push_set),
2489 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2491 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
2492 descriptors_state->push_dirty = true;
2495 void radv_CmdPushDescriptorSetWithTemplateKHR(
2496 VkCommandBuffer commandBuffer,
2497 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
2498 VkPipelineLayout _layout,
2502 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2503 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2504 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
2505 struct radv_descriptor_state *descriptors_state =
2506 radv_get_descriptors_state(cmd_buffer, templ->bind_point);
2507 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
2509 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2511 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
2512 layout->set[set].layout,
2516 radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
2517 descriptorUpdateTemplate, pData);
2519 radv_set_descriptor_set(cmd_buffer, templ->bind_point, push_set, set);
2520 descriptors_state->push_dirty = true;
2523 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
2524 VkPipelineLayout layout,
2525 VkShaderStageFlags stageFlags,
2528 const void* pValues)
2530 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2531 memcpy(cmd_buffer->push_constants + offset, pValues, size);
2532 cmd_buffer->push_constant_stages |= stageFlags;
2535 VkResult radv_EndCommandBuffer(
2536 VkCommandBuffer commandBuffer)
2538 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2540 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
2541 if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
2542 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
2543 si_emit_cache_flush(cmd_buffer);
2546 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
2548 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
2549 return vk_error(cmd_buffer->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2551 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
2553 return cmd_buffer->record_result;
2557 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
2559 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2561 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
2564 cmd_buffer->state.emitted_compute_pipeline = pipeline;
2566 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
2567 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
2569 cmd_buffer->compute_scratch_size_needed =
2570 MAX2(cmd_buffer->compute_scratch_size_needed,
2571 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
2573 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
2574 pipeline->shaders[MESA_SHADER_COMPUTE]->bo, 8);
2576 if (unlikely(cmd_buffer->device->trace_bo))
2577 radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
2580 static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer,
2581 VkPipelineBindPoint bind_point)
2583 struct radv_descriptor_state *descriptors_state =
2584 radv_get_descriptors_state(cmd_buffer, bind_point);
2586 descriptors_state->dirty |= descriptors_state->valid;
2589 void radv_CmdBindPipeline(
2590 VkCommandBuffer commandBuffer,
2591 VkPipelineBindPoint pipelineBindPoint,
2592 VkPipeline _pipeline)
2594 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2595 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
2597 switch (pipelineBindPoint) {
2598 case VK_PIPELINE_BIND_POINT_COMPUTE:
2599 if (cmd_buffer->state.compute_pipeline == pipeline)
2601 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
2603 cmd_buffer->state.compute_pipeline = pipeline;
2604 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
2606 case VK_PIPELINE_BIND_POINT_GRAPHICS:
2607 if (cmd_buffer->state.pipeline == pipeline)
2609 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
2611 cmd_buffer->state.pipeline = pipeline;
2615 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
2616 cmd_buffer->push_constant_stages |= pipeline->active_stages;
2618 /* the new vertex shader might not have the same user regs */
2619 cmd_buffer->state.last_first_instance = -1;
2620 cmd_buffer->state.last_vertex_offset = -1;
2622 /* Prefetch all pipeline shaders at first draw time. */
2623 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
2625 radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
2627 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
2628 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
2629 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
2630 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
2632 if (radv_pipeline_has_tess(pipeline))
2633 cmd_buffer->tess_rings_needed = true;
2635 if (radv_pipeline_has_gs(pipeline)) {
2636 struct radv_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
2637 AC_UD_SCRATCH_RING_OFFSETS);
2638 if (cmd_buffer->ring_offsets_idx == -1)
2639 cmd_buffer->ring_offsets_idx = loc->sgpr_idx;
2640 else if (loc->sgpr_idx != -1)
2641 assert(loc->sgpr_idx == cmd_buffer->ring_offsets_idx);
2645 assert(!"invalid bind point");
2650 void radv_CmdSetViewport(
2651 VkCommandBuffer commandBuffer,
2652 uint32_t firstViewport,
2653 uint32_t viewportCount,
2654 const VkViewport* pViewports)
2656 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2657 struct radv_cmd_state *state = &cmd_buffer->state;
2658 MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
2660 assert(firstViewport < MAX_VIEWPORTS);
2661 assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
2663 memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
2664 viewportCount * sizeof(*pViewports));
2666 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
2669 void radv_CmdSetScissor(
2670 VkCommandBuffer commandBuffer,
2671 uint32_t firstScissor,
2672 uint32_t scissorCount,
2673 const VkRect2D* pScissors)
2675 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2676 struct radv_cmd_state *state = &cmd_buffer->state;
2677 MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
2679 assert(firstScissor < MAX_SCISSORS);
2680 assert(total_count >= 1 && total_count <= MAX_SCISSORS);
2682 memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
2683 scissorCount * sizeof(*pScissors));
2685 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
2688 void radv_CmdSetLineWidth(
2689 VkCommandBuffer commandBuffer,
2692 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2693 cmd_buffer->state.dynamic.line_width = lineWidth;
2694 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2697 void radv_CmdSetDepthBias(
2698 VkCommandBuffer commandBuffer,
2699 float depthBiasConstantFactor,
2700 float depthBiasClamp,
2701 float depthBiasSlopeFactor)
2703 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2705 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
2706 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
2707 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
2709 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
2712 void radv_CmdSetBlendConstants(
2713 VkCommandBuffer commandBuffer,
2714 const float blendConstants[4])
2716 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2718 memcpy(cmd_buffer->state.dynamic.blend_constants,
2719 blendConstants, sizeof(float) * 4);
2721 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
2724 void radv_CmdSetDepthBounds(
2725 VkCommandBuffer commandBuffer,
2726 float minDepthBounds,
2727 float maxDepthBounds)
2729 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2731 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
2732 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
2734 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
2737 void radv_CmdSetStencilCompareMask(
2738 VkCommandBuffer commandBuffer,
2739 VkStencilFaceFlags faceMask,
2740 uint32_t compareMask)
2742 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2744 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2745 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
2746 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2747 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
2749 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2752 void radv_CmdSetStencilWriteMask(
2753 VkCommandBuffer commandBuffer,
2754 VkStencilFaceFlags faceMask,
2757 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2759 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2760 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
2761 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2762 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
2764 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2767 void radv_CmdSetStencilReference(
2768 VkCommandBuffer commandBuffer,
2769 VkStencilFaceFlags faceMask,
2772 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2774 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2775 cmd_buffer->state.dynamic.stencil_reference.front = reference;
2776 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2777 cmd_buffer->state.dynamic.stencil_reference.back = reference;
2779 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2782 void radv_CmdSetDiscardRectangleEXT(
2783 VkCommandBuffer commandBuffer,
2784 uint32_t firstDiscardRectangle,
2785 uint32_t discardRectangleCount,
2786 const VkRect2D* pDiscardRectangles)
2788 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2789 struct radv_cmd_state *state = &cmd_buffer->state;
2790 MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
2792 assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
2793 assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
2795 typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle],
2796 pDiscardRectangles, discardRectangleCount);
2798 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE;
2801 void radv_CmdExecuteCommands(
2802 VkCommandBuffer commandBuffer,
2803 uint32_t commandBufferCount,
2804 const VkCommandBuffer* pCmdBuffers)
2806 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
2808 assert(commandBufferCount > 0);
2810 /* Emit pending flushes on primary prior to executing secondary */
2811 si_emit_cache_flush(primary);
2813 for (uint32_t i = 0; i < commandBufferCount; i++) {
2814 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
2816 primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
2817 secondary->scratch_size_needed);
2818 primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
2819 secondary->compute_scratch_size_needed);
2821 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
2822 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
2823 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
2824 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
2825 if (secondary->tess_rings_needed)
2826 primary->tess_rings_needed = true;
2827 if (secondary->sample_positions_needed)
2828 primary->sample_positions_needed = true;
2830 if (secondary->ring_offsets_idx != -1) {
2831 if (primary->ring_offsets_idx == -1)
2832 primary->ring_offsets_idx = secondary->ring_offsets_idx;
2834 assert(secondary->ring_offsets_idx == primary->ring_offsets_idx);
2836 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
2839 /* When the secondary command buffer is compute only we don't
2840 * need to re-emit the current graphics pipeline.
2842 if (secondary->state.emitted_pipeline) {
2843 primary->state.emitted_pipeline =
2844 secondary->state.emitted_pipeline;
2847 /* When the secondary command buffer is graphics only we don't
2848 * need to re-emit the current compute pipeline.
2850 if (secondary->state.emitted_compute_pipeline) {
2851 primary->state.emitted_compute_pipeline =
2852 secondary->state.emitted_compute_pipeline;
2855 /* Only re-emit the draw packets when needed. */
2856 if (secondary->state.last_primitive_reset_en != -1) {
2857 primary->state.last_primitive_reset_en =
2858 secondary->state.last_primitive_reset_en;
2861 if (secondary->state.last_primitive_reset_index) {
2862 primary->state.last_primitive_reset_index =
2863 secondary->state.last_primitive_reset_index;
2866 if (secondary->state.last_ia_multi_vgt_param) {
2867 primary->state.last_ia_multi_vgt_param =
2868 secondary->state.last_ia_multi_vgt_param;
2871 primary->state.last_first_instance = secondary->state.last_first_instance;
2872 primary->state.last_num_instances = secondary->state.last_num_instances;
2873 primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
2875 if (secondary->state.last_index_type != -1) {
2876 primary->state.last_index_type =
2877 secondary->state.last_index_type;
2881 /* After executing commands from secondary buffers we have to dirty
2884 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
2885 RADV_CMD_DIRTY_INDEX_BUFFER |
2886 RADV_CMD_DIRTY_DYNAMIC_ALL;
2887 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_GRAPHICS);
2888 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_COMPUTE);
2891 VkResult radv_CreateCommandPool(
2893 const VkCommandPoolCreateInfo* pCreateInfo,
2894 const VkAllocationCallbacks* pAllocator,
2895 VkCommandPool* pCmdPool)
2897 RADV_FROM_HANDLE(radv_device, device, _device);
2898 struct radv_cmd_pool *pool;
2900 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
2901 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2903 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2906 pool->alloc = *pAllocator;
2908 pool->alloc = device->alloc;
2910 list_inithead(&pool->cmd_buffers);
2911 list_inithead(&pool->free_cmd_buffers);
2913 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2915 *pCmdPool = radv_cmd_pool_to_handle(pool);
2921 void radv_DestroyCommandPool(
2923 VkCommandPool commandPool,
2924 const VkAllocationCallbacks* pAllocator)
2926 RADV_FROM_HANDLE(radv_device, device, _device);
2927 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2932 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2933 &pool->cmd_buffers, pool_link) {
2934 radv_cmd_buffer_destroy(cmd_buffer);
2937 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2938 &pool->free_cmd_buffers, pool_link) {
2939 radv_cmd_buffer_destroy(cmd_buffer);
2942 vk_free2(&device->alloc, pAllocator, pool);
2945 VkResult radv_ResetCommandPool(
2947 VkCommandPool commandPool,
2948 VkCommandPoolResetFlags flags)
2950 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2953 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
2954 &pool->cmd_buffers, pool_link) {
2955 result = radv_reset_cmd_buffer(cmd_buffer);
2956 if (result != VK_SUCCESS)
2963 void radv_TrimCommandPool(
2965 VkCommandPool commandPool,
2966 VkCommandPoolTrimFlagsKHR flags)
2968 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2973 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2974 &pool->free_cmd_buffers, pool_link) {
2975 radv_cmd_buffer_destroy(cmd_buffer);
2979 void radv_CmdBeginRenderPass(
2980 VkCommandBuffer commandBuffer,
2981 const VkRenderPassBeginInfo* pRenderPassBegin,
2982 VkSubpassContents contents)
2984 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2985 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
2986 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
2988 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2989 cmd_buffer->cs, 2048);
2990 MAYBE_UNUSED VkResult result;
2992 cmd_buffer->state.framebuffer = framebuffer;
2993 cmd_buffer->state.pass = pass;
2994 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
2996 result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
2997 if (result != VK_SUCCESS)
3000 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
3001 assert(cmd_buffer->cs->cdw <= cdw_max);
3003 radv_cmd_buffer_clear_subpass(cmd_buffer);
3006 void radv_CmdNextSubpass(
3007 VkCommandBuffer commandBuffer,
3008 VkSubpassContents contents)
3010 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3012 radv_cmd_buffer_resolve_subpass(cmd_buffer);
3014 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
3017 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true);
3018 radv_cmd_buffer_clear_subpass(cmd_buffer);
3021 static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
3023 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
3024 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
3025 if (!pipeline->shaders[stage])
3027 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
3028 if (loc->sgpr_idx == -1)
3030 uint32_t base_reg = pipeline->user_data_0[stage];
3031 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
3034 if (pipeline->gs_copy_shader) {
3035 struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
3036 if (loc->sgpr_idx != -1) {
3037 uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
3038 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
3044 radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
3045 uint32_t vertex_count)
3047 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
3048 radeon_emit(cmd_buffer->cs, vertex_count);
3049 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
3050 S_0287F0_USE_OPAQUE(0));
3054 radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
3056 uint32_t index_count)
3058 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
3059 radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
3060 radeon_emit(cmd_buffer->cs, index_va);
3061 radeon_emit(cmd_buffer->cs, index_va >> 32);
3062 radeon_emit(cmd_buffer->cs, index_count);
3063 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
3067 radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
3069 uint32_t draw_count,
3073 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3074 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
3075 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
3076 bool draw_id_enable = radv_get_shader(cmd_buffer->state.pipeline, MESA_SHADER_VERTEX)->info.info.vs.needs_draw_id;
3077 uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
3080 /* just reset draw state for vertex data */
3081 cmd_buffer->state.last_first_instance = -1;
3082 cmd_buffer->state.last_num_instances = -1;
3083 cmd_buffer->state.last_vertex_offset = -1;
3085 if (draw_count == 1 && !count_va && !draw_id_enable) {
3086 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
3087 PKT3_DRAW_INDIRECT, 3, false));
3089 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
3090 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
3091 radeon_emit(cs, di_src_sel);
3093 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
3094 PKT3_DRAW_INDIRECT_MULTI,
3097 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
3098 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
3099 radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
3100 S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
3101 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
3102 radeon_emit(cs, draw_count); /* count */
3103 radeon_emit(cs, count_va); /* count_addr */
3104 radeon_emit(cs, count_va >> 32);
3105 radeon_emit(cs, stride); /* stride */
3106 radeon_emit(cs, di_src_sel);
3110 struct radv_draw_info {
3112 * Number of vertices.
3117 * Index of the first vertex.
3119 int32_t vertex_offset;
3122 * First instance id.
3124 uint32_t first_instance;
3127 * Number of instances.
3129 uint32_t instance_count;
3132 * First index (indexed draws only).
3134 uint32_t first_index;
3137 * Whether it's an indexed draw.
3142 * Indirect draw parameters resource.
3144 struct radv_buffer *indirect;
3145 uint64_t indirect_offset;
3149 * Draw count parameters resource.
3151 struct radv_buffer *count_buffer;
3152 uint64_t count_buffer_offset;
3156 radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
3157 const struct radv_draw_info *info)
3159 struct radv_cmd_state *state = &cmd_buffer->state;
3160 struct radeon_winsys *ws = cmd_buffer->device->ws;
3161 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3163 if (info->indirect) {
3164 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3165 uint64_t count_va = 0;
3167 va += info->indirect->offset + info->indirect_offset;
3169 radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
3171 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
3173 radeon_emit(cs, va);
3174 radeon_emit(cs, va >> 32);
3176 if (info->count_buffer) {
3177 count_va = radv_buffer_get_va(info->count_buffer->bo);
3178 count_va += info->count_buffer->offset +
3179 info->count_buffer_offset;
3181 radv_cs_add_buffer(ws, cs, info->count_buffer->bo, 8);
3184 if (!state->subpass->view_mask) {
3185 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3192 for_each_bit(i, state->subpass->view_mask) {
3193 radv_emit_view_index(cmd_buffer, i);
3195 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3203 assert(state->pipeline->graphics.vtx_base_sgpr);
3205 if (info->vertex_offset != state->last_vertex_offset ||
3206 info->first_instance != state->last_first_instance) {
3207 radeon_set_sh_reg_seq(cs, state->pipeline->graphics.vtx_base_sgpr,
3208 state->pipeline->graphics.vtx_emit_num);
3210 radeon_emit(cs, info->vertex_offset);
3211 radeon_emit(cs, info->first_instance);
3212 if (state->pipeline->graphics.vtx_emit_num == 3)
3214 state->last_first_instance = info->first_instance;
3215 state->last_vertex_offset = info->vertex_offset;
3218 if (state->last_num_instances != info->instance_count) {
3219 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, false));
3220 radeon_emit(cs, info->instance_count);
3221 state->last_num_instances = info->instance_count;
3224 if (info->indexed) {
3225 int index_size = state->index_type ? 4 : 2;
3228 index_va = state->index_va;
3229 index_va += info->first_index * index_size;
3231 if (!state->subpass->view_mask) {
3232 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3237 for_each_bit(i, state->subpass->view_mask) {
3238 radv_emit_view_index(cmd_buffer, i);
3240 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3246 if (!state->subpass->view_mask) {
3247 radv_cs_emit_draw_packet(cmd_buffer, info->count);
3250 for_each_bit(i, state->subpass->view_mask) {
3251 radv_emit_view_index(cmd_buffer, i);
3253 radv_cs_emit_draw_packet(cmd_buffer,
3262 * Vega and raven have a bug which triggers if there are multiple context
3263 * register contexts active at the same time with different scissor values.
3265 * There are two possible workarounds:
3266 * 1) Wait for PS_PARTIAL_FLUSH every time the scissor is changed. That way
3267 * there is only ever 1 active set of scissor values at the same time.
3269 * 2) Whenever the hardware switches contexts we have to set the scissor
3270 * registers again even if it is a noop. That way the new context gets
3271 * the correct scissor values.
3273 * This implements option 2. radv_need_late_scissor_emission needs to
3274 * return true on affected HW if radv_emit_all_graphics_states sets
3275 * any context registers.
3277 static bool radv_need_late_scissor_emission(struct radv_cmd_buffer *cmd_buffer,
3280 struct radv_cmd_state *state = &cmd_buffer->state;
3282 if (!cmd_buffer->device->physical_device->has_scissor_bug)
3285 /* Assume all state changes except these two can imply context rolls. */
3286 if (cmd_buffer->state.dirty & ~(RADV_CMD_DIRTY_INDEX_BUFFER |
3287 RADV_CMD_DIRTY_VERTEX_BUFFER |
3288 RADV_CMD_DIRTY_PIPELINE))
3291 if (cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
3294 if (indexed_draw && state->pipeline->graphics.prim_restart_enable &&
3295 (state->index_type ? 0xffffffffu : 0xffffu) != state->last_primitive_reset_index)
3302 radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
3303 const struct radv_draw_info *info)
3305 bool late_scissor_emission = radv_need_late_scissor_emission(cmd_buffer, info->indexed);
3307 if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
3308 cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
3309 radv_emit_rbplus_state(cmd_buffer);
3311 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
3312 radv_emit_graphics_pipeline(cmd_buffer);
3314 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
3315 radv_emit_framebuffer_state(cmd_buffer);
3317 if (info->indexed) {
3318 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
3319 radv_emit_index_buffer(cmd_buffer);
3321 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
3322 * so the state must be re-emitted before the next indexed
3325 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
3326 cmd_buffer->state.last_index_type = -1;
3327 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
3331 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
3333 radv_emit_draw_registers(cmd_buffer, info->indexed,
3334 info->instance_count > 1, info->indirect,
3335 info->indirect ? 0 : info->count);
3337 if (late_scissor_emission)
3338 radv_emit_scissor(cmd_buffer);
3342 radv_draw(struct radv_cmd_buffer *cmd_buffer,
3343 const struct radv_draw_info *info)
3346 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
3347 bool pipeline_is_dirty =
3348 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
3349 cmd_buffer->state.pipeline &&
3350 cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
3352 MAYBE_UNUSED unsigned cdw_max =
3353 radeon_check_space(cmd_buffer->device->ws,
3354 cmd_buffer->cs, 4096);
3356 /* Use optimal packet order based on whether we need to sync the
3359 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3360 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3361 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
3362 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
3363 /* If we have to wait for idle, set all states first, so that
3364 * all SET packets are processed in parallel with previous draw
3365 * calls. Then upload descriptors, set shader pointers, and
3366 * draw, and prefetch at the end. This ensures that the time
3367 * the CUs are idle is very short. (there are only SET_SH
3368 * packets between the wait and the draw)
3370 radv_emit_all_graphics_states(cmd_buffer, info);
3371 si_emit_cache_flush(cmd_buffer);
3372 /* <-- CUs are idle here --> */
3374 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
3376 radv_emit_draw_packets(cmd_buffer, info);
3377 /* <-- CUs are busy here --> */
3379 /* Start prefetches after the draw has been started. Both will
3380 * run in parallel, but starting the draw first is more
3383 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3384 radv_emit_prefetch_L2(cmd_buffer,
3385 cmd_buffer->state.pipeline, false);
3388 /* If we don't wait for idle, start prefetches first, then set
3389 * states, and draw at the end.
3391 si_emit_cache_flush(cmd_buffer);
3393 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3394 /* Only prefetch the vertex shader and VBO descriptors
3395 * in order to start the draw as soon as possible.
3397 radv_emit_prefetch_L2(cmd_buffer,
3398 cmd_buffer->state.pipeline, true);
3401 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
3403 radv_emit_all_graphics_states(cmd_buffer, info);
3404 radv_emit_draw_packets(cmd_buffer, info);
3406 /* Prefetch the remaining shaders after the draw has been
3409 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3410 radv_emit_prefetch_L2(cmd_buffer,
3411 cmd_buffer->state.pipeline, false);
3415 assert(cmd_buffer->cs->cdw <= cdw_max);
3416 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_PS_PARTIAL_FLUSH);
3420 VkCommandBuffer commandBuffer,
3421 uint32_t vertexCount,
3422 uint32_t instanceCount,
3423 uint32_t firstVertex,
3424 uint32_t firstInstance)
3426 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3427 struct radv_draw_info info = {};
3429 info.count = vertexCount;
3430 info.instance_count = instanceCount;
3431 info.first_instance = firstInstance;
3432 info.vertex_offset = firstVertex;
3434 radv_draw(cmd_buffer, &info);
3437 void radv_CmdDrawIndexed(
3438 VkCommandBuffer commandBuffer,
3439 uint32_t indexCount,
3440 uint32_t instanceCount,
3441 uint32_t firstIndex,
3442 int32_t vertexOffset,
3443 uint32_t firstInstance)
3445 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3446 struct radv_draw_info info = {};
3448 info.indexed = true;
3449 info.count = indexCount;
3450 info.instance_count = instanceCount;
3451 info.first_index = firstIndex;
3452 info.vertex_offset = vertexOffset;
3453 info.first_instance = firstInstance;
3455 radv_draw(cmd_buffer, &info);
3458 void radv_CmdDrawIndirect(
3459 VkCommandBuffer commandBuffer,
3461 VkDeviceSize offset,
3465 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3466 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3467 struct radv_draw_info info = {};
3469 info.count = drawCount;
3470 info.indirect = buffer;
3471 info.indirect_offset = offset;
3472 info.stride = stride;
3474 radv_draw(cmd_buffer, &info);
3477 void radv_CmdDrawIndexedIndirect(
3478 VkCommandBuffer commandBuffer,
3480 VkDeviceSize offset,
3484 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3485 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3486 struct radv_draw_info info = {};
3488 info.indexed = true;
3489 info.count = drawCount;
3490 info.indirect = buffer;
3491 info.indirect_offset = offset;
3492 info.stride = stride;
3494 radv_draw(cmd_buffer, &info);
3497 void radv_CmdDrawIndirectCountAMD(
3498 VkCommandBuffer commandBuffer,
3500 VkDeviceSize offset,
3501 VkBuffer _countBuffer,
3502 VkDeviceSize countBufferOffset,
3503 uint32_t maxDrawCount,
3506 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3507 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3508 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3509 struct radv_draw_info info = {};
3511 info.count = maxDrawCount;
3512 info.indirect = buffer;
3513 info.indirect_offset = offset;
3514 info.count_buffer = count_buffer;
3515 info.count_buffer_offset = countBufferOffset;
3516 info.stride = stride;
3518 radv_draw(cmd_buffer, &info);
3521 void radv_CmdDrawIndexedIndirectCountAMD(
3522 VkCommandBuffer commandBuffer,
3524 VkDeviceSize offset,
3525 VkBuffer _countBuffer,
3526 VkDeviceSize countBufferOffset,
3527 uint32_t maxDrawCount,
3530 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3531 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3532 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3533 struct radv_draw_info info = {};
3535 info.indexed = true;
3536 info.count = maxDrawCount;
3537 info.indirect = buffer;
3538 info.indirect_offset = offset;
3539 info.count_buffer = count_buffer;
3540 info.count_buffer_offset = countBufferOffset;
3541 info.stride = stride;
3543 radv_draw(cmd_buffer, &info);
3546 void radv_CmdDrawIndirectCountKHR(
3547 VkCommandBuffer commandBuffer,
3549 VkDeviceSize offset,
3550 VkBuffer _countBuffer,
3551 VkDeviceSize countBufferOffset,
3552 uint32_t maxDrawCount,
3555 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3556 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3557 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3558 struct radv_draw_info info = {};
3560 info.count = maxDrawCount;
3561 info.indirect = buffer;
3562 info.indirect_offset = offset;
3563 info.count_buffer = count_buffer;
3564 info.count_buffer_offset = countBufferOffset;
3565 info.stride = stride;
3567 radv_draw(cmd_buffer, &info);
3570 void radv_CmdDrawIndexedIndirectCountKHR(
3571 VkCommandBuffer commandBuffer,
3573 VkDeviceSize offset,
3574 VkBuffer _countBuffer,
3575 VkDeviceSize countBufferOffset,
3576 uint32_t maxDrawCount,
3579 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3580 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3581 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3582 struct radv_draw_info info = {};
3584 info.indexed = true;
3585 info.count = maxDrawCount;
3586 info.indirect = buffer;
3587 info.indirect_offset = offset;
3588 info.count_buffer = count_buffer;
3589 info.count_buffer_offset = countBufferOffset;
3590 info.stride = stride;
3592 radv_draw(cmd_buffer, &info);
3595 struct radv_dispatch_info {
3597 * Determine the layout of the grid (in block units) to be used.
3602 * A starting offset for the grid. If unaligned is set, the offset
3603 * must still be aligned.
3605 uint32_t offsets[3];
3607 * Whether it's an unaligned compute dispatch.
3612 * Indirect compute parameters resource.
3614 struct radv_buffer *indirect;
3615 uint64_t indirect_offset;
3619 radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
3620 const struct radv_dispatch_info *info)
3622 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3623 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
3624 unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
3625 struct radeon_winsys *ws = cmd_buffer->device->ws;
3626 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3627 struct radv_userdata_info *loc;
3629 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
3630 AC_UD_CS_GRID_SIZE);
3632 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
3634 if (info->indirect) {
3635 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3637 va += info->indirect->offset + info->indirect_offset;
3639 radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
3641 if (loc->sgpr_idx != -1) {
3642 for (unsigned i = 0; i < 3; ++i) {
3643 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
3644 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
3645 COPY_DATA_DST_SEL(COPY_DATA_REG));
3646 radeon_emit(cs, (va + 4 * i));
3647 radeon_emit(cs, (va + 4 * i) >> 32);
3648 radeon_emit(cs, ((R_00B900_COMPUTE_USER_DATA_0
3649 + loc->sgpr_idx * 4) >> 2) + i);
3654 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
3655 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) |
3656 PKT3_SHADER_TYPE_S(1));
3657 radeon_emit(cs, va);
3658 radeon_emit(cs, va >> 32);
3659 radeon_emit(cs, dispatch_initiator);
3661 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
3662 PKT3_SHADER_TYPE_S(1));
3664 radeon_emit(cs, va);
3665 radeon_emit(cs, va >> 32);
3667 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) |
3668 PKT3_SHADER_TYPE_S(1));
3670 radeon_emit(cs, dispatch_initiator);
3673 unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
3674 unsigned offsets[3] = { info->offsets[0], info->offsets[1], info->offsets[2] };
3676 if (info->unaligned) {
3677 unsigned *cs_block_size = compute_shader->info.cs.block_size;
3678 unsigned remainder[3];
3680 /* If aligned, these should be an entire block size,
3683 remainder[0] = blocks[0] + cs_block_size[0] -
3684 align_u32_npot(blocks[0], cs_block_size[0]);
3685 remainder[1] = blocks[1] + cs_block_size[1] -
3686 align_u32_npot(blocks[1], cs_block_size[1]);
3687 remainder[2] = blocks[2] + cs_block_size[2] -
3688 align_u32_npot(blocks[2], cs_block_size[2]);
3690 blocks[0] = round_up_u32(blocks[0], cs_block_size[0]);
3691 blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
3692 blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
3694 for(unsigned i = 0; i < 3; ++i) {
3695 assert(offsets[i] % cs_block_size[i] == 0);
3696 offsets[i] /= cs_block_size[i];
3699 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
3701 S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
3702 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
3704 S_00B81C_NUM_THREAD_FULL(cs_block_size[1]) |
3705 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
3707 S_00B81C_NUM_THREAD_FULL(cs_block_size[2]) |
3708 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
3710 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
3713 if (loc->sgpr_idx != -1) {
3714 assert(!loc->indirect);
3715 assert(loc->num_sgprs == 3);
3717 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
3718 loc->sgpr_idx * 4, 3);
3719 radeon_emit(cs, blocks[0]);
3720 radeon_emit(cs, blocks[1]);
3721 radeon_emit(cs, blocks[2]);
3724 if (offsets[0] || offsets[1] || offsets[2]) {
3725 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
3726 radeon_emit(cs, offsets[0]);
3727 radeon_emit(cs, offsets[1]);
3728 radeon_emit(cs, offsets[2]);
3730 /* The blocks in the packet are not counts but end values. */
3731 for (unsigned i = 0; i < 3; ++i)
3732 blocks[i] += offsets[i];
3734 dispatch_initiator |= S_00B800_FORCE_START_AT_000(1);
3737 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
3738 PKT3_SHADER_TYPE_S(1));
3739 radeon_emit(cs, blocks[0]);
3740 radeon_emit(cs, blocks[1]);
3741 radeon_emit(cs, blocks[2]);
3742 radeon_emit(cs, dispatch_initiator);
3745 assert(cmd_buffer->cs->cdw <= cdw_max);
3749 radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
3751 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
3752 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
3756 radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
3757 const struct radv_dispatch_info *info)
3759 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3761 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
3762 bool pipeline_is_dirty = pipeline &&
3763 pipeline != cmd_buffer->state.emitted_compute_pipeline;
3765 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3766 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3767 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
3768 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
3769 /* If we have to wait for idle, set all states first, so that
3770 * all SET packets are processed in parallel with previous draw
3771 * calls. Then upload descriptors, set shader pointers, and
3772 * dispatch, and prefetch at the end. This ensures that the
3773 * time the CUs are idle is very short. (there are only SET_SH
3774 * packets between the wait and the draw)
3776 radv_emit_compute_pipeline(cmd_buffer);
3777 si_emit_cache_flush(cmd_buffer);
3778 /* <-- CUs are idle here --> */
3780 radv_upload_compute_shader_descriptors(cmd_buffer);
3782 radv_emit_dispatch_packets(cmd_buffer, info);
3783 /* <-- CUs are busy here --> */
3785 /* Start prefetches after the dispatch has been started. Both
3786 * will run in parallel, but starting the dispatch first is
3789 if (has_prefetch && pipeline_is_dirty) {
3790 radv_emit_shader_prefetch(cmd_buffer,
3791 pipeline->shaders[MESA_SHADER_COMPUTE]);
3794 /* If we don't wait for idle, start prefetches first, then set
3795 * states, and dispatch at the end.
3797 si_emit_cache_flush(cmd_buffer);
3799 if (has_prefetch && pipeline_is_dirty) {
3800 radv_emit_shader_prefetch(cmd_buffer,
3801 pipeline->shaders[MESA_SHADER_COMPUTE]);
3804 radv_upload_compute_shader_descriptors(cmd_buffer);
3806 radv_emit_compute_pipeline(cmd_buffer);
3807 radv_emit_dispatch_packets(cmd_buffer, info);
3810 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_CS_PARTIAL_FLUSH);
3813 void radv_CmdDispatchBase(
3814 VkCommandBuffer commandBuffer,
3822 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3823 struct radv_dispatch_info info = {};
3829 info.offsets[0] = base_x;
3830 info.offsets[1] = base_y;
3831 info.offsets[2] = base_z;
3832 radv_dispatch(cmd_buffer, &info);
3835 void radv_CmdDispatch(
3836 VkCommandBuffer commandBuffer,
3841 radv_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
3844 void radv_CmdDispatchIndirect(
3845 VkCommandBuffer commandBuffer,
3847 VkDeviceSize offset)
3849 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3850 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3851 struct radv_dispatch_info info = {};
3853 info.indirect = buffer;
3854 info.indirect_offset = offset;
3856 radv_dispatch(cmd_buffer, &info);
3859 void radv_unaligned_dispatch(
3860 struct radv_cmd_buffer *cmd_buffer,
3865 struct radv_dispatch_info info = {};
3872 radv_dispatch(cmd_buffer, &info);
3875 void radv_CmdEndRenderPass(
3876 VkCommandBuffer commandBuffer)
3878 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3880 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
3882 radv_cmd_buffer_resolve_subpass(cmd_buffer);
3884 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
3885 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
3886 radv_handle_subpass_image_transition(cmd_buffer,
3887 (VkAttachmentReference){i, layout});
3890 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
3892 cmd_buffer->state.pass = NULL;
3893 cmd_buffer->state.subpass = NULL;
3894 cmd_buffer->state.attachments = NULL;
3895 cmd_buffer->state.framebuffer = NULL;
3899 * For HTILE we have the following interesting clear words:
3900 * 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
3901 * 0xfffc000f: Uncompressed, full depth range, for depth only HTILE.
3902 * 0xfffffff0: Clear depth to 1.0
3903 * 0x00000000: Clear depth to 0.0
3905 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
3906 struct radv_image *image,
3907 const VkImageSubresourceRange *range,
3908 uint32_t clear_word)
3910 assert(range->baseMipLevel == 0);
3911 assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
3912 unsigned layer_count = radv_get_layerCount(image, range);
3913 uint64_t size = image->surface.htile_slice_size * layer_count;
3914 uint64_t offset = image->offset + image->htile_offset +
3915 image->surface.htile_slice_size * range->baseArrayLayer;
3916 struct radv_cmd_state *state = &cmd_buffer->state;
3918 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3919 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3921 state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset,
3924 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3926 /* Initialize the depth clear registers and update the ZRANGE_PRECISION
3927 * value for the TC-compat bug (because ZRANGE_PRECISION is 1 by
3928 * default). This is only needed whean clearing Z to 0.0f.
3930 if (radv_image_is_tc_compat_htile(image) && clear_word == 0) {
3931 VkImageAspectFlags aspects = VK_IMAGE_ASPECT_DEPTH_BIT;
3932 VkClearDepthStencilValue value = {};
3934 if (vk_format_is_stencil(image->vk_format))
3935 aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
3937 radv_set_depth_clear_regs(cmd_buffer, image, value, aspects);
3941 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
3942 struct radv_image *image,
3943 VkImageLayout src_layout,
3944 VkImageLayout dst_layout,
3945 unsigned src_queue_mask,
3946 unsigned dst_queue_mask,
3947 const VkImageSubresourceRange *range,
3948 VkImageAspectFlags pending_clears)
3950 if (!radv_image_has_htile(image))
3953 if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL &&
3954 (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) &&
3955 cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 &&
3956 cmd_buffer->state.render_area.extent.width == image->info.width &&
3957 cmd_buffer->state.render_area.extent.height == image->info.height) {
3958 /* The clear will initialize htile. */
3960 } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
3961 radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
3962 /* TODO: merge with the clear if applicable */
3963 radv_initialize_htile(cmd_buffer, image, range, 0);
3964 } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
3965 radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
3966 uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
3967 radv_initialize_htile(cmd_buffer, image, range, clear_value);
3968 } else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
3969 !radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
3970 VkImageSubresourceRange local_range = *range;
3971 local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
3972 local_range.baseMipLevel = 0;
3973 local_range.levelCount = 1;
3975 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3976 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3978 radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range);
3980 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3981 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3985 static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
3986 struct radv_image *image, uint32_t value)
3988 struct radv_cmd_state *state = &cmd_buffer->state;
3990 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3991 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3993 state->flush_bits |= radv_clear_cmask(cmd_buffer, image, value);
3995 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3998 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
3999 struct radv_image *image, uint32_t value)
4001 struct radv_cmd_state *state = &cmd_buffer->state;
4003 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4004 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4006 state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value);
4008 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
4009 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
4013 * Initialize DCC/FMASK/CMASK metadata for a color image.
4015 static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
4016 struct radv_image *image,
4017 VkImageLayout src_layout,
4018 VkImageLayout dst_layout,
4019 unsigned src_queue_mask,
4020 unsigned dst_queue_mask)
4022 if (radv_image_has_cmask(image)) {
4023 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
4025 /* TODO: clarify this. */
4026 if (radv_image_has_fmask(image)) {
4027 value = 0xccccccccu;
4030 radv_initialise_cmask(cmd_buffer, image, value);
4033 if (radv_image_has_dcc(image)) {
4034 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
4036 if (radv_layout_dcc_compressed(image, dst_layout,
4038 value = 0x20202020u;
4041 radv_initialize_dcc(cmd_buffer, image, value);
4046 * Handle color image transitions for DCC/FMASK/CMASK.
4048 static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer,
4049 struct radv_image *image,
4050 VkImageLayout src_layout,
4051 VkImageLayout dst_layout,
4052 unsigned src_queue_mask,
4053 unsigned dst_queue_mask,
4054 const VkImageSubresourceRange *range)
4056 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
4057 radv_init_color_image_metadata(cmd_buffer, image,
4058 src_layout, dst_layout,
4059 src_queue_mask, dst_queue_mask);
4063 if (radv_image_has_dcc(image)) {
4064 if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
4065 radv_initialize_dcc(cmd_buffer, image, 0xffffffffu);
4066 } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
4067 !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
4068 radv_decompress_dcc(cmd_buffer, image, range);
4069 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
4070 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
4071 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
4073 } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
4074 if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
4075 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
4076 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
4081 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
4082 struct radv_image *image,
4083 VkImageLayout src_layout,
4084 VkImageLayout dst_layout,
4085 uint32_t src_family,
4086 uint32_t dst_family,
4087 const VkImageSubresourceRange *range,
4088 VkImageAspectFlags pending_clears)
4090 if (image->exclusive && src_family != dst_family) {
4091 /* This is an acquire or a release operation and there will be
4092 * a corresponding release/acquire. Do the transition in the
4093 * most flexible queue. */
4095 assert(src_family == cmd_buffer->queue_family_index ||
4096 dst_family == cmd_buffer->queue_family_index);
4098 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
4101 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
4102 (src_family == RADV_QUEUE_GENERAL ||
4103 dst_family == RADV_QUEUE_GENERAL))
4107 unsigned src_queue_mask =
4108 radv_image_queue_family_mask(image, src_family,
4109 cmd_buffer->queue_family_index);
4110 unsigned dst_queue_mask =
4111 radv_image_queue_family_mask(image, dst_family,
4112 cmd_buffer->queue_family_index);
4114 if (vk_format_is_depth(image->vk_format)) {
4115 radv_handle_depth_image_transition(cmd_buffer, image,
4116 src_layout, dst_layout,
4117 src_queue_mask, dst_queue_mask,
4118 range, pending_clears);
4120 radv_handle_color_image_transition(cmd_buffer, image,
4121 src_layout, dst_layout,
4122 src_queue_mask, dst_queue_mask,
4127 void radv_CmdPipelineBarrier(
4128 VkCommandBuffer commandBuffer,
4129 VkPipelineStageFlags srcStageMask,
4130 VkPipelineStageFlags destStageMask,
4132 uint32_t memoryBarrierCount,
4133 const VkMemoryBarrier* pMemoryBarriers,
4134 uint32_t bufferMemoryBarrierCount,
4135 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
4136 uint32_t imageMemoryBarrierCount,
4137 const VkImageMemoryBarrier* pImageMemoryBarriers)
4139 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4140 enum radv_cmd_flush_bits src_flush_bits = 0;
4141 enum radv_cmd_flush_bits dst_flush_bits = 0;
4143 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
4144 src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask);
4145 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
4149 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
4150 src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask);
4151 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
4155 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
4156 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
4157 src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask);
4158 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
4162 radv_stage_flush(cmd_buffer, srcStageMask);
4163 cmd_buffer->state.flush_bits |= src_flush_bits;
4165 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
4166 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
4167 radv_handle_image_transition(cmd_buffer, image,
4168 pImageMemoryBarriers[i].oldLayout,
4169 pImageMemoryBarriers[i].newLayout,
4170 pImageMemoryBarriers[i].srcQueueFamilyIndex,
4171 pImageMemoryBarriers[i].dstQueueFamilyIndex,
4172 &pImageMemoryBarriers[i].subresourceRange,
4176 cmd_buffer->state.flush_bits |= dst_flush_bits;
4180 static void write_event(struct radv_cmd_buffer *cmd_buffer,
4181 struct radv_event *event,
4182 VkPipelineStageFlags stageMask,
4185 struct radeon_winsys_cs *cs = cmd_buffer->cs;
4186 uint64_t va = radv_buffer_get_va(event->bo);
4188 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
4190 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
4192 /* TODO: this is overkill. Probably should figure something out from
4193 * the stage mask. */
4195 si_cs_emit_write_event_eop(cs,
4196 cmd_buffer->state.predicating,
4197 cmd_buffer->device->physical_device->rad_info.chip_class,
4198 radv_cmd_buffer_uses_mec(cmd_buffer),
4199 V_028A90_BOTTOM_OF_PIPE_TS, 0,
4202 assert(cmd_buffer->cs->cdw <= cdw_max);
4205 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
4207 VkPipelineStageFlags stageMask)
4209 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4210 RADV_FROM_HANDLE(radv_event, event, _event);
4212 write_event(cmd_buffer, event, stageMask, 1);
4215 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
4217 VkPipelineStageFlags stageMask)
4219 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4220 RADV_FROM_HANDLE(radv_event, event, _event);
4222 write_event(cmd_buffer, event, stageMask, 0);
4225 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
4226 uint32_t eventCount,
4227 const VkEvent* pEvents,
4228 VkPipelineStageFlags srcStageMask,
4229 VkPipelineStageFlags dstStageMask,
4230 uint32_t memoryBarrierCount,
4231 const VkMemoryBarrier* pMemoryBarriers,
4232 uint32_t bufferMemoryBarrierCount,
4233 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
4234 uint32_t imageMemoryBarrierCount,
4235 const VkImageMemoryBarrier* pImageMemoryBarriers)
4237 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4238 struct radeon_winsys_cs *cs = cmd_buffer->cs;
4240 for (unsigned i = 0; i < eventCount; ++i) {
4241 RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
4242 uint64_t va = radv_buffer_get_va(event->bo);
4244 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
4246 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
4248 si_emit_wait_fence(cs, false, va, 1, 0xffffffff);
4249 assert(cmd_buffer->cs->cdw <= cdw_max);
4253 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
4254 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
4256 radv_handle_image_transition(cmd_buffer, image,
4257 pImageMemoryBarriers[i].oldLayout,
4258 pImageMemoryBarriers[i].newLayout,
4259 pImageMemoryBarriers[i].srcQueueFamilyIndex,
4260 pImageMemoryBarriers[i].dstQueueFamilyIndex,
4261 &pImageMemoryBarriers[i].subresourceRange,
4265 /* TODO: figure out how to do memory barriers without waiting */
4266 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER |
4267 RADV_CMD_FLAG_INV_GLOBAL_L2 |
4268 RADV_CMD_FLAG_INV_VMEM_L1 |
4269 RADV_CMD_FLAG_INV_SMEM_L1;
4273 void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
4274 uint32_t deviceMask)