2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 #include "radv_private.h"
29 #include "radv_radeon_winsys.h"
30 #include "radv_shader.h"
34 #include "vk_format.h"
35 #include "radv_debug.h"
36 #include "radv_meta.h"
41 RADV_PREFETCH_VBO_DESCRIPTORS = (1 << 0),
42 RADV_PREFETCH_VS = (1 << 1),
43 RADV_PREFETCH_TCS = (1 << 2),
44 RADV_PREFETCH_TES = (1 << 3),
45 RADV_PREFETCH_GS = (1 << 4),
46 RADV_PREFETCH_PS = (1 << 5),
47 RADV_PREFETCH_SHADERS = (RADV_PREFETCH_VS |
54 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
55 struct radv_image *image,
56 VkImageLayout src_layout,
57 VkImageLayout dst_layout,
60 const VkImageSubresourceRange *range,
61 VkImageAspectFlags pending_clears);
63 const struct radv_dynamic_state default_dynamic_state = {
76 .blend_constants = { 0.0f, 0.0f, 0.0f, 0.0f },
81 .stencil_compare_mask = {
85 .stencil_write_mask = {
89 .stencil_reference = {
96 radv_bind_dynamic_state(struct radv_cmd_buffer *cmd_buffer,
97 const struct radv_dynamic_state *src)
99 struct radv_dynamic_state *dest = &cmd_buffer->state.dynamic;
100 uint32_t copy_mask = src->mask;
101 uint32_t dest_mask = 0;
103 /* Make sure to copy the number of viewports/scissors because they can
104 * only be specified at pipeline creation time.
106 dest->viewport.count = src->viewport.count;
107 dest->scissor.count = src->scissor.count;
108 dest->discard_rectangle.count = src->discard_rectangle.count;
110 if (copy_mask & RADV_DYNAMIC_VIEWPORT) {
111 if (memcmp(&dest->viewport.viewports, &src->viewport.viewports,
112 src->viewport.count * sizeof(VkViewport))) {
113 typed_memcpy(dest->viewport.viewports,
114 src->viewport.viewports,
115 src->viewport.count);
116 dest_mask |= RADV_DYNAMIC_VIEWPORT;
120 if (copy_mask & RADV_DYNAMIC_SCISSOR) {
121 if (memcmp(&dest->scissor.scissors, &src->scissor.scissors,
122 src->scissor.count * sizeof(VkRect2D))) {
123 typed_memcpy(dest->scissor.scissors,
124 src->scissor.scissors, src->scissor.count);
125 dest_mask |= RADV_DYNAMIC_SCISSOR;
129 if (copy_mask & RADV_DYNAMIC_LINE_WIDTH) {
130 if (dest->line_width != src->line_width) {
131 dest->line_width = src->line_width;
132 dest_mask |= RADV_DYNAMIC_LINE_WIDTH;
136 if (copy_mask & RADV_DYNAMIC_DEPTH_BIAS) {
137 if (memcmp(&dest->depth_bias, &src->depth_bias,
138 sizeof(src->depth_bias))) {
139 dest->depth_bias = src->depth_bias;
140 dest_mask |= RADV_DYNAMIC_DEPTH_BIAS;
144 if (copy_mask & RADV_DYNAMIC_BLEND_CONSTANTS) {
145 if (memcmp(&dest->blend_constants, &src->blend_constants,
146 sizeof(src->blend_constants))) {
147 typed_memcpy(dest->blend_constants,
148 src->blend_constants, 4);
149 dest_mask |= RADV_DYNAMIC_BLEND_CONSTANTS;
153 if (copy_mask & RADV_DYNAMIC_DEPTH_BOUNDS) {
154 if (memcmp(&dest->depth_bounds, &src->depth_bounds,
155 sizeof(src->depth_bounds))) {
156 dest->depth_bounds = src->depth_bounds;
157 dest_mask |= RADV_DYNAMIC_DEPTH_BOUNDS;
161 if (copy_mask & RADV_DYNAMIC_STENCIL_COMPARE_MASK) {
162 if (memcmp(&dest->stencil_compare_mask,
163 &src->stencil_compare_mask,
164 sizeof(src->stencil_compare_mask))) {
165 dest->stencil_compare_mask = src->stencil_compare_mask;
166 dest_mask |= RADV_DYNAMIC_STENCIL_COMPARE_MASK;
170 if (copy_mask & RADV_DYNAMIC_STENCIL_WRITE_MASK) {
171 if (memcmp(&dest->stencil_write_mask, &src->stencil_write_mask,
172 sizeof(src->stencil_write_mask))) {
173 dest->stencil_write_mask = src->stencil_write_mask;
174 dest_mask |= RADV_DYNAMIC_STENCIL_WRITE_MASK;
178 if (copy_mask & RADV_DYNAMIC_STENCIL_REFERENCE) {
179 if (memcmp(&dest->stencil_reference, &src->stencil_reference,
180 sizeof(src->stencil_reference))) {
181 dest->stencil_reference = src->stencil_reference;
182 dest_mask |= RADV_DYNAMIC_STENCIL_REFERENCE;
186 if (copy_mask & RADV_DYNAMIC_DISCARD_RECTANGLE) {
187 if (memcmp(&dest->discard_rectangle.rectangles, &src->discard_rectangle.rectangles,
188 src->discard_rectangle.count * sizeof(VkRect2D))) {
189 typed_memcpy(dest->discard_rectangle.rectangles,
190 src->discard_rectangle.rectangles,
191 src->discard_rectangle.count);
192 dest_mask |= RADV_DYNAMIC_DISCARD_RECTANGLE;
196 cmd_buffer->state.dirty |= dest_mask;
199 bool radv_cmd_buffer_uses_mec(struct radv_cmd_buffer *cmd_buffer)
201 return cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
202 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
205 enum ring_type radv_queue_family_to_ring(int f) {
207 case RADV_QUEUE_GENERAL:
209 case RADV_QUEUE_COMPUTE:
211 case RADV_QUEUE_TRANSFER:
214 unreachable("Unknown queue family");
218 static VkResult radv_create_cmd_buffer(
219 struct radv_device * device,
220 struct radv_cmd_pool * pool,
221 VkCommandBufferLevel level,
222 VkCommandBuffer* pCommandBuffer)
224 struct radv_cmd_buffer *cmd_buffer;
226 cmd_buffer = vk_zalloc(&pool->alloc, sizeof(*cmd_buffer), 8,
227 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
228 if (cmd_buffer == NULL)
229 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
231 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
232 cmd_buffer->device = device;
233 cmd_buffer->pool = pool;
234 cmd_buffer->level = level;
237 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
238 cmd_buffer->queue_family_index = pool->queue_family_index;
241 /* Init the pool_link so we can safefly call list_del when we destroy
244 list_inithead(&cmd_buffer->pool_link);
245 cmd_buffer->queue_family_index = RADV_QUEUE_GENERAL;
248 ring = radv_queue_family_to_ring(cmd_buffer->queue_family_index);
250 cmd_buffer->cs = device->ws->cs_create(device->ws, ring);
251 if (!cmd_buffer->cs) {
252 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
253 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
256 *pCommandBuffer = radv_cmd_buffer_to_handle(cmd_buffer);
258 list_inithead(&cmd_buffer->upload.list);
264 radv_cmd_buffer_destroy(struct radv_cmd_buffer *cmd_buffer)
266 list_del(&cmd_buffer->pool_link);
268 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
269 &cmd_buffer->upload.list, list) {
270 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
275 if (cmd_buffer->upload.upload_bo)
276 cmd_buffer->device->ws->buffer_destroy(cmd_buffer->upload.upload_bo);
277 cmd_buffer->device->ws->cs_destroy(cmd_buffer->cs);
279 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++)
280 free(cmd_buffer->descriptors[i].push_set.set.mapped_ptr);
282 vk_free(&cmd_buffer->pool->alloc, cmd_buffer);
286 radv_reset_cmd_buffer(struct radv_cmd_buffer *cmd_buffer)
289 cmd_buffer->device->ws->cs_reset(cmd_buffer->cs);
291 list_for_each_entry_safe(struct radv_cmd_buffer_upload, up,
292 &cmd_buffer->upload.list, list) {
293 cmd_buffer->device->ws->buffer_destroy(up->upload_bo);
298 cmd_buffer->push_constant_stages = 0;
299 cmd_buffer->scratch_size_needed = 0;
300 cmd_buffer->compute_scratch_size_needed = 0;
301 cmd_buffer->esgs_ring_size_needed = 0;
302 cmd_buffer->gsvs_ring_size_needed = 0;
303 cmd_buffer->tess_rings_needed = false;
304 cmd_buffer->sample_positions_needed = false;
306 if (cmd_buffer->upload.upload_bo)
307 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
308 cmd_buffer->upload.upload_bo, 8);
309 cmd_buffer->upload.offset = 0;
311 cmd_buffer->record_result = VK_SUCCESS;
313 cmd_buffer->ring_offsets_idx = -1;
315 for (unsigned i = 0; i < VK_PIPELINE_BIND_POINT_RANGE_SIZE; i++) {
316 cmd_buffer->descriptors[i].dirty = 0;
317 cmd_buffer->descriptors[i].valid = 0;
318 cmd_buffer->descriptors[i].push_dirty = false;
321 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
323 radv_cmd_buffer_upload_alloc(cmd_buffer, 8, 0,
324 &cmd_buffer->gfx9_fence_offset,
326 cmd_buffer->gfx9_fence_bo = cmd_buffer->upload.upload_bo;
329 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_INITIAL;
331 return cmd_buffer->record_result;
335 radv_cmd_buffer_resize_upload_buf(struct radv_cmd_buffer *cmd_buffer,
339 struct radeon_winsys_bo *bo;
340 struct radv_cmd_buffer_upload *upload;
341 struct radv_device *device = cmd_buffer->device;
343 new_size = MAX2(min_needed, 16 * 1024);
344 new_size = MAX2(new_size, 2 * cmd_buffer->upload.size);
346 bo = device->ws->buffer_create(device->ws,
349 RADEON_FLAG_CPU_ACCESS|
350 RADEON_FLAG_NO_INTERPROCESS_SHARING);
353 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
357 radv_cs_add_buffer(device->ws, cmd_buffer->cs, bo, 8);
358 if (cmd_buffer->upload.upload_bo) {
359 upload = malloc(sizeof(*upload));
362 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
363 device->ws->buffer_destroy(bo);
367 memcpy(upload, &cmd_buffer->upload, sizeof(*upload));
368 list_add(&upload->list, &cmd_buffer->upload.list);
371 cmd_buffer->upload.upload_bo = bo;
372 cmd_buffer->upload.size = new_size;
373 cmd_buffer->upload.offset = 0;
374 cmd_buffer->upload.map = device->ws->buffer_map(cmd_buffer->upload.upload_bo);
376 if (!cmd_buffer->upload.map) {
377 cmd_buffer->record_result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
385 radv_cmd_buffer_upload_alloc(struct radv_cmd_buffer *cmd_buffer,
388 unsigned *out_offset,
391 uint64_t offset = align(cmd_buffer->upload.offset, alignment);
392 if (offset + size > cmd_buffer->upload.size) {
393 if (!radv_cmd_buffer_resize_upload_buf(cmd_buffer, size))
398 *out_offset = offset;
399 *ptr = cmd_buffer->upload.map + offset;
401 cmd_buffer->upload.offset = offset + size;
406 radv_cmd_buffer_upload_data(struct radv_cmd_buffer *cmd_buffer,
407 unsigned size, unsigned alignment,
408 const void *data, unsigned *out_offset)
412 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size, alignment,
413 out_offset, (void **)&ptr))
417 memcpy(ptr, data, size);
423 radv_emit_write_data_packet(struct radeon_winsys_cs *cs, uint64_t va,
424 unsigned count, const uint32_t *data)
426 radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 2 + count, 0));
427 radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
428 S_370_WR_CONFIRM(1) |
429 S_370_ENGINE_SEL(V_370_ME));
431 radeon_emit(cs, va >> 32);
432 radeon_emit_array(cs, data, count);
435 void radv_cmd_buffer_trace_emit(struct radv_cmd_buffer *cmd_buffer)
437 struct radv_device *device = cmd_buffer->device;
438 struct radeon_winsys_cs *cs = cmd_buffer->cs;
441 va = radv_buffer_get_va(device->trace_bo);
442 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
445 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
447 ++cmd_buffer->state.trace_id;
448 radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
449 radv_emit_write_data_packet(cs, va, 1, &cmd_buffer->state.trace_id);
450 radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
451 radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
455 radv_cmd_buffer_after_draw(struct radv_cmd_buffer *cmd_buffer,
456 enum radv_cmd_flush_bits flags)
458 if (cmd_buffer->device->instance->debug_flags & RADV_DEBUG_SYNC_SHADERS) {
459 uint32_t *ptr = NULL;
462 assert(flags & (RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
463 RADV_CMD_FLAG_CS_PARTIAL_FLUSH));
465 if (cmd_buffer->device->physical_device->rad_info.chip_class == GFX9) {
466 va = radv_buffer_get_va(cmd_buffer->gfx9_fence_bo) +
467 cmd_buffer->gfx9_fence_offset;
468 ptr = &cmd_buffer->gfx9_fence_idx;
471 /* Force wait for graphics or compute engines to be idle. */
472 si_cs_emit_cache_flush(cmd_buffer->cs,
473 cmd_buffer->device->physical_device->rad_info.chip_class,
475 radv_cmd_buffer_uses_mec(cmd_buffer),
479 if (unlikely(cmd_buffer->device->trace_bo))
480 radv_cmd_buffer_trace_emit(cmd_buffer);
484 radv_save_pipeline(struct radv_cmd_buffer *cmd_buffer,
485 struct radv_pipeline *pipeline, enum ring_type ring)
487 struct radv_device *device = cmd_buffer->device;
488 struct radeon_winsys_cs *cs = cmd_buffer->cs;
492 va = radv_buffer_get_va(device->trace_bo);
502 assert(!"invalid ring type");
505 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
508 data[0] = (uintptr_t)pipeline;
509 data[1] = (uintptr_t)pipeline >> 32;
511 radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
512 radv_emit_write_data_packet(cs, va, 2, data);
515 void radv_set_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
516 VkPipelineBindPoint bind_point,
517 struct radv_descriptor_set *set,
520 struct radv_descriptor_state *descriptors_state =
521 radv_get_descriptors_state(cmd_buffer, bind_point);
523 descriptors_state->sets[idx] = set;
525 descriptors_state->valid |= (1u << idx);
527 descriptors_state->valid &= ~(1u << idx);
528 descriptors_state->dirty |= (1u << idx);
532 radv_save_descriptors(struct radv_cmd_buffer *cmd_buffer,
533 VkPipelineBindPoint bind_point)
535 struct radv_descriptor_state *descriptors_state =
536 radv_get_descriptors_state(cmd_buffer, bind_point);
537 struct radv_device *device = cmd_buffer->device;
538 struct radeon_winsys_cs *cs = cmd_buffer->cs;
539 uint32_t data[MAX_SETS * 2] = {};
542 va = radv_buffer_get_va(device->trace_bo) + 24;
544 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
545 cmd_buffer->cs, 4 + MAX_SETS * 2);
547 for_each_bit(i, descriptors_state->valid) {
548 struct radv_descriptor_set *set = descriptors_state->sets[i];
549 data[i * 2] = (uintptr_t)set;
550 data[i * 2 + 1] = (uintptr_t)set >> 32;
553 radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
554 radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data);
557 struct radv_userdata_info *
558 radv_lookup_user_sgpr(struct radv_pipeline *pipeline,
559 gl_shader_stage stage,
562 if (stage == MESA_SHADER_VERTEX) {
563 if (pipeline->shaders[MESA_SHADER_VERTEX])
564 return &pipeline->shaders[MESA_SHADER_VERTEX]->info.user_sgprs_locs.shader_data[idx];
565 if (pipeline->shaders[MESA_SHADER_TESS_CTRL])
566 return &pipeline->shaders[MESA_SHADER_TESS_CTRL]->info.user_sgprs_locs.shader_data[idx];
567 if (pipeline->shaders[MESA_SHADER_GEOMETRY])
568 return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
569 } else if (stage == MESA_SHADER_TESS_EVAL) {
570 if (pipeline->shaders[MESA_SHADER_TESS_EVAL])
571 return &pipeline->shaders[MESA_SHADER_TESS_EVAL]->info.user_sgprs_locs.shader_data[idx];
572 if (pipeline->shaders[MESA_SHADER_GEOMETRY])
573 return &pipeline->shaders[MESA_SHADER_GEOMETRY]->info.user_sgprs_locs.shader_data[idx];
575 return &pipeline->shaders[stage]->info.user_sgprs_locs.shader_data[idx];
579 radv_emit_userdata_address(struct radv_cmd_buffer *cmd_buffer,
580 struct radv_pipeline *pipeline,
581 gl_shader_stage stage,
582 int idx, uint64_t va)
584 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, idx);
585 uint32_t base_reg = pipeline->user_data_0[stage];
586 if (loc->sgpr_idx == -1)
588 assert(loc->num_sgprs == 2);
589 assert(!loc->indirect);
590 radeon_set_sh_reg_seq(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, 2);
591 radeon_emit(cmd_buffer->cs, va);
592 radeon_emit(cmd_buffer->cs, va >> 32);
596 radv_update_multisample_state(struct radv_cmd_buffer *cmd_buffer,
597 struct radv_pipeline *pipeline)
599 int num_samples = pipeline->graphics.ms.num_samples;
600 struct radv_multisample_state *ms = &pipeline->graphics.ms;
601 struct radv_pipeline *old_pipeline = cmd_buffer->state.emitted_pipeline;
603 if (pipeline->shaders[MESA_SHADER_FRAGMENT]->info.info.ps.needs_sample_positions)
604 cmd_buffer->sample_positions_needed = true;
606 if (old_pipeline && num_samples == old_pipeline->graphics.ms.num_samples)
609 radeon_set_context_reg_seq(cmd_buffer->cs, R_028BDC_PA_SC_LINE_CNTL, 2);
610 radeon_emit(cmd_buffer->cs, ms->pa_sc_line_cntl);
611 radeon_emit(cmd_buffer->cs, ms->pa_sc_aa_config);
613 radeon_set_context_reg(cmd_buffer->cs, R_028A48_PA_SC_MODE_CNTL_0, ms->pa_sc_mode_cntl_0);
615 radv_cayman_emit_msaa_sample_locs(cmd_buffer->cs, num_samples);
617 /* GFX9: Flush DFSM when the AA mode changes. */
618 if (cmd_buffer->device->dfsm_allowed) {
619 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
620 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_FLUSH_DFSM) | EVENT_INDEX(0));
625 radv_emit_shader_prefetch(struct radv_cmd_buffer *cmd_buffer,
626 struct radv_shader_variant *shader)
633 va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
635 si_cp_dma_prefetch(cmd_buffer, va, shader->code_size);
639 radv_emit_prefetch_L2(struct radv_cmd_buffer *cmd_buffer,
640 struct radv_pipeline *pipeline,
641 bool vertex_stage_only)
643 struct radv_cmd_state *state = &cmd_buffer->state;
644 uint32_t mask = state->prefetch_L2_mask;
646 if (vertex_stage_only) {
647 /* Fast prefetch path for starting draws as soon as possible.
649 mask = state->prefetch_L2_mask & (RADV_PREFETCH_VS |
650 RADV_PREFETCH_VBO_DESCRIPTORS);
653 if (mask & RADV_PREFETCH_VS)
654 radv_emit_shader_prefetch(cmd_buffer,
655 pipeline->shaders[MESA_SHADER_VERTEX]);
657 if (mask & RADV_PREFETCH_VBO_DESCRIPTORS)
658 si_cp_dma_prefetch(cmd_buffer, state->vb_va, state->vb_size);
660 if (mask & RADV_PREFETCH_TCS)
661 radv_emit_shader_prefetch(cmd_buffer,
662 pipeline->shaders[MESA_SHADER_TESS_CTRL]);
664 if (mask & RADV_PREFETCH_TES)
665 radv_emit_shader_prefetch(cmd_buffer,
666 pipeline->shaders[MESA_SHADER_TESS_EVAL]);
668 if (mask & RADV_PREFETCH_GS) {
669 radv_emit_shader_prefetch(cmd_buffer,
670 pipeline->shaders[MESA_SHADER_GEOMETRY]);
671 radv_emit_shader_prefetch(cmd_buffer, pipeline->gs_copy_shader);
674 if (mask & RADV_PREFETCH_PS)
675 radv_emit_shader_prefetch(cmd_buffer,
676 pipeline->shaders[MESA_SHADER_FRAGMENT]);
678 state->prefetch_L2_mask &= ~mask;
682 radv_emit_rbplus_state(struct radv_cmd_buffer *cmd_buffer)
684 if (!cmd_buffer->device->physical_device->rbplus_allowed)
687 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
688 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
689 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
691 unsigned sx_ps_downconvert = 0;
692 unsigned sx_blend_opt_epsilon = 0;
693 unsigned sx_blend_opt_control = 0;
695 for (unsigned i = 0; i < subpass->color_count; ++i) {
696 if (subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED)
699 int idx = subpass->color_attachments[i].attachment;
700 struct radv_color_buffer_info *cb = &framebuffer->attachments[idx].cb;
702 unsigned format = G_028C70_FORMAT(cb->cb_color_info);
703 unsigned swap = G_028C70_COMP_SWAP(cb->cb_color_info);
704 uint32_t spi_format = (pipeline->graphics.col_format >> (i * 4)) & 0xf;
705 uint32_t colormask = (pipeline->graphics.cb_target_mask >> (i * 4)) & 0xf;
707 bool has_alpha, has_rgb;
709 /* Set if RGB and A are present. */
710 has_alpha = !G_028C74_FORCE_DST_ALPHA_1(cb->cb_color_attrib);
712 if (format == V_028C70_COLOR_8 ||
713 format == V_028C70_COLOR_16 ||
714 format == V_028C70_COLOR_32)
715 has_rgb = !has_alpha;
719 /* Check the colormask and export format. */
720 if (!(colormask & 0x7))
722 if (!(colormask & 0x8))
725 if (spi_format == V_028714_SPI_SHADER_ZERO) {
730 /* Disable value checking for disabled channels. */
732 sx_blend_opt_control |= S_02875C_MRT0_COLOR_OPT_DISABLE(1) << (i * 4);
734 sx_blend_opt_control |= S_02875C_MRT0_ALPHA_OPT_DISABLE(1) << (i * 4);
736 /* Enable down-conversion for 32bpp and smaller formats. */
738 case V_028C70_COLOR_8:
739 case V_028C70_COLOR_8_8:
740 case V_028C70_COLOR_8_8_8_8:
741 /* For 1 and 2-channel formats, use the superset thereof. */
742 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR ||
743 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
744 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
745 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_8_8_8_8 << (i * 4);
746 sx_blend_opt_epsilon |= V_028758_8BIT_FORMAT << (i * 4);
750 case V_028C70_COLOR_5_6_5:
751 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
752 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_5_6_5 << (i * 4);
753 sx_blend_opt_epsilon |= V_028758_6BIT_FORMAT << (i * 4);
757 case V_028C70_COLOR_1_5_5_5:
758 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
759 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_1_5_5_5 << (i * 4);
760 sx_blend_opt_epsilon |= V_028758_5BIT_FORMAT << (i * 4);
764 case V_028C70_COLOR_4_4_4_4:
765 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
766 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_4_4_4_4 << (i * 4);
767 sx_blend_opt_epsilon |= V_028758_4BIT_FORMAT << (i * 4);
771 case V_028C70_COLOR_32:
772 if (swap == V_028C70_SWAP_STD &&
773 spi_format == V_028714_SPI_SHADER_32_R)
774 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_R << (i * 4);
775 else if (swap == V_028C70_SWAP_ALT_REV &&
776 spi_format == V_028714_SPI_SHADER_32_AR)
777 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_32_A << (i * 4);
780 case V_028C70_COLOR_16:
781 case V_028C70_COLOR_16_16:
782 /* For 1-channel formats, use the superset thereof. */
783 if (spi_format == V_028714_SPI_SHADER_UNORM16_ABGR ||
784 spi_format == V_028714_SPI_SHADER_SNORM16_ABGR ||
785 spi_format == V_028714_SPI_SHADER_UINT16_ABGR ||
786 spi_format == V_028714_SPI_SHADER_SINT16_ABGR) {
787 if (swap == V_028C70_SWAP_STD ||
788 swap == V_028C70_SWAP_STD_REV)
789 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_GR << (i * 4);
791 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_16_16_AR << (i * 4);
795 case V_028C70_COLOR_10_11_11:
796 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
797 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_10_11_11 << (i * 4);
798 sx_blend_opt_epsilon |= V_028758_11BIT_FORMAT << (i * 4);
802 case V_028C70_COLOR_2_10_10_10:
803 if (spi_format == V_028714_SPI_SHADER_FP16_ABGR) {
804 sx_ps_downconvert |= V_028754_SX_RT_EXPORT_2_10_10_10 << (i * 4);
805 sx_blend_opt_epsilon |= V_028758_10BIT_FORMAT << (i * 4);
811 radeon_set_context_reg_seq(cmd_buffer->cs, R_028754_SX_PS_DOWNCONVERT, 3);
812 radeon_emit(cmd_buffer->cs, sx_ps_downconvert);
813 radeon_emit(cmd_buffer->cs, sx_blend_opt_epsilon);
814 radeon_emit(cmd_buffer->cs, sx_blend_opt_control);
818 radv_emit_graphics_pipeline(struct radv_cmd_buffer *cmd_buffer)
820 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
822 if (!pipeline || cmd_buffer->state.emitted_pipeline == pipeline)
825 radv_update_multisample_state(cmd_buffer, pipeline);
827 cmd_buffer->scratch_size_needed =
828 MAX2(cmd_buffer->scratch_size_needed,
829 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
831 if (!cmd_buffer->state.emitted_pipeline ||
832 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband !=
833 pipeline->graphics.can_use_guardband)
834 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
836 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
838 for (unsigned i = 0; i < MESA_SHADER_COMPUTE; i++) {
839 if (!pipeline->shaders[i])
842 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
843 pipeline->shaders[i]->bo, 8);
846 if (radv_pipeline_has_gs(pipeline))
847 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
848 pipeline->gs_copy_shader->bo, 8);
850 if (unlikely(cmd_buffer->device->trace_bo))
851 radv_save_pipeline(cmd_buffer, pipeline, RING_GFX);
853 cmd_buffer->state.emitted_pipeline = pipeline;
855 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_PIPELINE;
859 radv_emit_viewport(struct radv_cmd_buffer *cmd_buffer)
861 si_write_viewport(cmd_buffer->cs, 0, cmd_buffer->state.dynamic.viewport.count,
862 cmd_buffer->state.dynamic.viewport.viewports);
866 radv_emit_scissor(struct radv_cmd_buffer *cmd_buffer)
868 uint32_t count = cmd_buffer->state.dynamic.scissor.count;
870 /* Vega10/Raven scissor bug workaround. This must be done before VPORT
871 * scissor registers are changed. There is also a more efficient but
872 * more involved alternative workaround.
874 if (cmd_buffer->device->physical_device->has_scissor_bug) {
875 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
876 si_emit_cache_flush(cmd_buffer);
878 si_write_scissors(cmd_buffer->cs, 0, count,
879 cmd_buffer->state.dynamic.scissor.scissors,
880 cmd_buffer->state.dynamic.viewport.viewports,
881 cmd_buffer->state.emitted_pipeline->graphics.can_use_guardband);
885 radv_emit_discard_rectangle(struct radv_cmd_buffer *cmd_buffer)
887 if (!cmd_buffer->state.dynamic.discard_rectangle.count)
890 radeon_set_context_reg_seq(cmd_buffer->cs, R_028210_PA_SC_CLIPRECT_0_TL,
891 cmd_buffer->state.dynamic.discard_rectangle.count * 2);
892 for (unsigned i = 0; i < cmd_buffer->state.dynamic.discard_rectangle.count; ++i) {
893 VkRect2D rect = cmd_buffer->state.dynamic.discard_rectangle.rectangles[i];
894 radeon_emit(cmd_buffer->cs, S_028210_TL_X(rect.offset.x) | S_028210_TL_Y(rect.offset.y));
895 radeon_emit(cmd_buffer->cs, S_028214_BR_X(rect.offset.x + rect.extent.width) |
896 S_028214_BR_Y(rect.offset.y + rect.extent.height));
901 radv_emit_line_width(struct radv_cmd_buffer *cmd_buffer)
903 unsigned width = cmd_buffer->state.dynamic.line_width * 8;
905 radeon_set_context_reg(cmd_buffer->cs, R_028A08_PA_SU_LINE_CNTL,
906 S_028A08_WIDTH(CLAMP(width, 0, 0xFFF)));
910 radv_emit_blend_constants(struct radv_cmd_buffer *cmd_buffer)
912 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
914 radeon_set_context_reg_seq(cmd_buffer->cs, R_028414_CB_BLEND_RED, 4);
915 radeon_emit_array(cmd_buffer->cs, (uint32_t *)d->blend_constants, 4);
919 radv_emit_stencil(struct radv_cmd_buffer *cmd_buffer)
921 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
923 radeon_set_context_reg_seq(cmd_buffer->cs,
924 R_028430_DB_STENCILREFMASK, 2);
925 radeon_emit(cmd_buffer->cs,
926 S_028430_STENCILTESTVAL(d->stencil_reference.front) |
927 S_028430_STENCILMASK(d->stencil_compare_mask.front) |
928 S_028430_STENCILWRITEMASK(d->stencil_write_mask.front) |
929 S_028430_STENCILOPVAL(1));
930 radeon_emit(cmd_buffer->cs,
931 S_028434_STENCILTESTVAL_BF(d->stencil_reference.back) |
932 S_028434_STENCILMASK_BF(d->stencil_compare_mask.back) |
933 S_028434_STENCILWRITEMASK_BF(d->stencil_write_mask.back) |
934 S_028434_STENCILOPVAL_BF(1));
938 radv_emit_depth_bounds(struct radv_cmd_buffer *cmd_buffer)
940 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
942 radeon_set_context_reg(cmd_buffer->cs, R_028020_DB_DEPTH_BOUNDS_MIN,
943 fui(d->depth_bounds.min));
944 radeon_set_context_reg(cmd_buffer->cs, R_028024_DB_DEPTH_BOUNDS_MAX,
945 fui(d->depth_bounds.max));
949 radv_emit_depth_bias(struct radv_cmd_buffer *cmd_buffer)
951 struct radv_dynamic_state *d = &cmd_buffer->state.dynamic;
952 unsigned slope = fui(d->depth_bias.slope * 16.0f);
953 unsigned bias = fui(d->depth_bias.bias * cmd_buffer->state.offset_scale);
956 radeon_set_context_reg_seq(cmd_buffer->cs,
957 R_028B7C_PA_SU_POLY_OFFSET_CLAMP, 5);
958 radeon_emit(cmd_buffer->cs, fui(d->depth_bias.clamp)); /* CLAMP */
959 radeon_emit(cmd_buffer->cs, slope); /* FRONT SCALE */
960 radeon_emit(cmd_buffer->cs, bias); /* FRONT OFFSET */
961 radeon_emit(cmd_buffer->cs, slope); /* BACK SCALE */
962 radeon_emit(cmd_buffer->cs, bias); /* BACK OFFSET */
966 radv_emit_fb_color_state(struct radv_cmd_buffer *cmd_buffer,
968 struct radv_attachment_info *att,
969 struct radv_image *image,
970 VkImageLayout layout)
972 bool is_vi = cmd_buffer->device->physical_device->rad_info.chip_class >= VI;
973 struct radv_color_buffer_info *cb = &att->cb;
974 uint32_t cb_color_info = cb->cb_color_info;
976 if (!radv_layout_dcc_compressed(image, layout,
977 radv_image_queue_family_mask(image,
978 cmd_buffer->queue_family_index,
979 cmd_buffer->queue_family_index))) {
980 cb_color_info &= C_028C70_DCC_ENABLE;
983 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
984 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
985 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
986 radeon_emit(cmd_buffer->cs, S_028C64_BASE_256B(cb->cb_color_base >> 32));
987 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib2);
988 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
989 radeon_emit(cmd_buffer->cs, cb_color_info);
990 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
991 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
992 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
993 radeon_emit(cmd_buffer->cs, S_028C80_BASE_256B(cb->cb_color_cmask >> 32));
994 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
995 radeon_emit(cmd_buffer->cs, S_028C88_BASE_256B(cb->cb_color_fmask >> 32));
997 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, 2);
998 radeon_emit(cmd_buffer->cs, cb->cb_dcc_base);
999 radeon_emit(cmd_buffer->cs, S_028C98_BASE_256B(cb->cb_dcc_base >> 32));
1001 radeon_set_context_reg(cmd_buffer->cs, R_0287A0_CB_MRT0_EPITCH + index * 4,
1002 S_0287A0_EPITCH(att->attachment->image->surface.u.gfx9.surf.epitch));
1004 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C60_CB_COLOR0_BASE + index * 0x3c, 11);
1005 radeon_emit(cmd_buffer->cs, cb->cb_color_base);
1006 radeon_emit(cmd_buffer->cs, cb->cb_color_pitch);
1007 radeon_emit(cmd_buffer->cs, cb->cb_color_slice);
1008 radeon_emit(cmd_buffer->cs, cb->cb_color_view);
1009 radeon_emit(cmd_buffer->cs, cb_color_info);
1010 radeon_emit(cmd_buffer->cs, cb->cb_color_attrib);
1011 radeon_emit(cmd_buffer->cs, cb->cb_dcc_control);
1012 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask);
1013 radeon_emit(cmd_buffer->cs, cb->cb_color_cmask_slice);
1014 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask);
1015 radeon_emit(cmd_buffer->cs, cb->cb_color_fmask_slice);
1017 if (is_vi) { /* DCC BASE */
1018 radeon_set_context_reg(cmd_buffer->cs, R_028C94_CB_COLOR0_DCC_BASE + index * 0x3c, cb->cb_dcc_base);
1024 radv_emit_fb_ds_state(struct radv_cmd_buffer *cmd_buffer,
1025 struct radv_ds_buffer_info *ds,
1026 struct radv_image *image,
1027 VkImageLayout layout)
1029 uint32_t db_z_info = ds->db_z_info;
1030 uint32_t db_stencil_info = ds->db_stencil_info;
1032 if (!radv_layout_has_htile(image, layout,
1033 radv_image_queue_family_mask(image,
1034 cmd_buffer->queue_family_index,
1035 cmd_buffer->queue_family_index))) {
1036 db_z_info &= C_028040_TILE_SURFACE_ENABLE;
1037 db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
1040 radeon_set_context_reg(cmd_buffer->cs, R_028008_DB_DEPTH_VIEW, ds->db_depth_view);
1041 radeon_set_context_reg(cmd_buffer->cs, R_028ABC_DB_HTILE_SURFACE, ds->db_htile_surface);
1044 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1045 radeon_set_context_reg_seq(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, 3);
1046 radeon_emit(cmd_buffer->cs, ds->db_htile_data_base);
1047 radeon_emit(cmd_buffer->cs, S_028018_BASE_HI(ds->db_htile_data_base >> 32));
1048 radeon_emit(cmd_buffer->cs, ds->db_depth_size);
1050 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 10);
1051 radeon_emit(cmd_buffer->cs, db_z_info); /* DB_Z_INFO */
1052 radeon_emit(cmd_buffer->cs, db_stencil_info); /* DB_STENCIL_INFO */
1053 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* DB_Z_READ_BASE */
1054 radeon_emit(cmd_buffer->cs, S_028044_BASE_HI(ds->db_z_read_base >> 32)); /* DB_Z_READ_BASE_HI */
1055 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* DB_STENCIL_READ_BASE */
1056 radeon_emit(cmd_buffer->cs, S_02804C_BASE_HI(ds->db_stencil_read_base >> 32)); /* DB_STENCIL_READ_BASE_HI */
1057 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* DB_Z_WRITE_BASE */
1058 radeon_emit(cmd_buffer->cs, S_028054_BASE_HI(ds->db_z_write_base >> 32)); /* DB_Z_WRITE_BASE_HI */
1059 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* DB_STENCIL_WRITE_BASE */
1060 radeon_emit(cmd_buffer->cs, S_02805C_BASE_HI(ds->db_stencil_write_base >> 32)); /* DB_STENCIL_WRITE_BASE_HI */
1062 radeon_set_context_reg_seq(cmd_buffer->cs, R_028068_DB_Z_INFO2, 2);
1063 radeon_emit(cmd_buffer->cs, ds->db_z_info2);
1064 radeon_emit(cmd_buffer->cs, ds->db_stencil_info2);
1066 radeon_set_context_reg(cmd_buffer->cs, R_028014_DB_HTILE_DATA_BASE, ds->db_htile_data_base);
1068 radeon_set_context_reg_seq(cmd_buffer->cs, R_02803C_DB_DEPTH_INFO, 9);
1069 radeon_emit(cmd_buffer->cs, ds->db_depth_info); /* R_02803C_DB_DEPTH_INFO */
1070 radeon_emit(cmd_buffer->cs, db_z_info); /* R_028040_DB_Z_INFO */
1071 radeon_emit(cmd_buffer->cs, db_stencil_info); /* R_028044_DB_STENCIL_INFO */
1072 radeon_emit(cmd_buffer->cs, ds->db_z_read_base); /* R_028048_DB_Z_READ_BASE */
1073 radeon_emit(cmd_buffer->cs, ds->db_stencil_read_base); /* R_02804C_DB_STENCIL_READ_BASE */
1074 radeon_emit(cmd_buffer->cs, ds->db_z_write_base); /* R_028050_DB_Z_WRITE_BASE */
1075 radeon_emit(cmd_buffer->cs, ds->db_stencil_write_base); /* R_028054_DB_STENCIL_WRITE_BASE */
1076 radeon_emit(cmd_buffer->cs, ds->db_depth_size); /* R_028058_DB_DEPTH_SIZE */
1077 radeon_emit(cmd_buffer->cs, ds->db_depth_slice); /* R_02805C_DB_DEPTH_SLICE */
1081 radeon_set_context_reg(cmd_buffer->cs, R_028B78_PA_SU_POLY_OFFSET_DB_FMT_CNTL,
1082 ds->pa_su_poly_offset_db_fmt_cntl);
1086 radv_set_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1087 struct radv_image *image,
1088 VkClearDepthStencilValue ds_clear_value,
1089 VkImageAspectFlags aspects)
1091 uint64_t va = radv_buffer_get_va(image->bo);
1092 va += image->offset + image->clear_value_offset;
1093 unsigned reg_offset = 0, reg_count = 0;
1095 assert(radv_image_has_htile(image));
1097 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1103 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1106 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 2 + reg_count, 0));
1107 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1108 S_370_WR_CONFIRM(1) |
1109 S_370_ENGINE_SEL(V_370_PFP));
1110 radeon_emit(cmd_buffer->cs, va);
1111 radeon_emit(cmd_buffer->cs, va >> 32);
1112 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
1113 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil);
1114 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1115 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth));
1117 radeon_set_context_reg_seq(cmd_buffer->cs, R_028028_DB_STENCIL_CLEAR + 4 * reg_offset, reg_count);
1118 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT)
1119 radeon_emit(cmd_buffer->cs, ds_clear_value.stencil); /* R_028028_DB_STENCIL_CLEAR */
1120 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1121 radeon_emit(cmd_buffer->cs, fui(ds_clear_value.depth)); /* R_02802C_DB_DEPTH_CLEAR */
1125 radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1126 struct radv_image *image)
1128 VkImageAspectFlags aspects = vk_format_aspects(image->vk_format);
1129 uint64_t va = radv_buffer_get_va(image->bo);
1130 va += image->offset + image->clear_value_offset;
1131 unsigned reg_offset = 0, reg_count = 0;
1133 if (!radv_image_has_htile(image))
1136 if (aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1142 if (aspects & VK_IMAGE_ASPECT_DEPTH_BIT)
1145 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, 0));
1146 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1147 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1148 (reg_count == 2 ? COPY_DATA_COUNT_SEL : 0));
1149 radeon_emit(cmd_buffer->cs, va);
1150 radeon_emit(cmd_buffer->cs, va >> 32);
1151 radeon_emit(cmd_buffer->cs, (R_028028_DB_STENCIL_CLEAR + 4 * reg_offset) >> 2);
1152 radeon_emit(cmd_buffer->cs, 0);
1154 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1155 radeon_emit(cmd_buffer->cs, 0);
1159 *with DCC some colors don't require CMASK elimiation before being
1160 * used as a texture. This sets a predicate value to determine if the
1161 * cmask eliminate is required.
1164 radv_set_dcc_need_cmask_elim_pred(struct radv_cmd_buffer *cmd_buffer,
1165 struct radv_image *image,
1168 uint64_t pred_val = value;
1169 uint64_t va = radv_buffer_get_va(image->bo);
1170 va += image->offset + image->dcc_pred_offset;
1172 assert(radv_image_has_dcc(image));
1174 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1175 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1176 S_370_WR_CONFIRM(1) |
1177 S_370_ENGINE_SEL(V_370_PFP));
1178 radeon_emit(cmd_buffer->cs, va);
1179 radeon_emit(cmd_buffer->cs, va >> 32);
1180 radeon_emit(cmd_buffer->cs, pred_val);
1181 radeon_emit(cmd_buffer->cs, pred_val >> 32);
1185 radv_set_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1186 struct radv_image *image,
1188 uint32_t color_values[2])
1190 uint64_t va = radv_buffer_get_va(image->bo);
1191 va += image->offset + image->clear_value_offset;
1193 assert(radv_image_has_cmask(image) || radv_image_has_dcc(image));
1195 radeon_emit(cmd_buffer->cs, PKT3(PKT3_WRITE_DATA, 4, 0));
1196 radeon_emit(cmd_buffer->cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
1197 S_370_WR_CONFIRM(1) |
1198 S_370_ENGINE_SEL(V_370_PFP));
1199 radeon_emit(cmd_buffer->cs, va);
1200 radeon_emit(cmd_buffer->cs, va >> 32);
1201 radeon_emit(cmd_buffer->cs, color_values[0]);
1202 radeon_emit(cmd_buffer->cs, color_values[1]);
1204 radeon_set_context_reg_seq(cmd_buffer->cs, R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c, 2);
1205 radeon_emit(cmd_buffer->cs, color_values[0]);
1206 radeon_emit(cmd_buffer->cs, color_values[1]);
1210 radv_load_color_clear_regs(struct radv_cmd_buffer *cmd_buffer,
1211 struct radv_image *image,
1214 uint64_t va = radv_buffer_get_va(image->bo);
1215 va += image->offset + image->clear_value_offset;
1217 if (!radv_image_has_cmask(image) && !radv_image_has_dcc(image))
1220 uint32_t reg = R_028C8C_CB_COLOR0_CLEAR_WORD0 + idx * 0x3c;
1222 radeon_emit(cmd_buffer->cs, PKT3(PKT3_COPY_DATA, 4, cmd_buffer->state.predicating));
1223 radeon_emit(cmd_buffer->cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
1224 COPY_DATA_DST_SEL(COPY_DATA_REG) |
1225 COPY_DATA_COUNT_SEL);
1226 radeon_emit(cmd_buffer->cs, va);
1227 radeon_emit(cmd_buffer->cs, va >> 32);
1228 radeon_emit(cmd_buffer->cs, reg >> 2);
1229 radeon_emit(cmd_buffer->cs, 0);
1231 radeon_emit(cmd_buffer->cs, PKT3(PKT3_PFP_SYNC_ME, 0, cmd_buffer->state.predicating));
1232 radeon_emit(cmd_buffer->cs, 0);
1236 radv_emit_framebuffer_state(struct radv_cmd_buffer *cmd_buffer)
1239 struct radv_framebuffer *framebuffer = cmd_buffer->state.framebuffer;
1240 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1242 /* this may happen for inherited secondary recording */
1246 for (i = 0; i < 8; ++i) {
1247 if (i >= subpass->color_count || subpass->color_attachments[i].attachment == VK_ATTACHMENT_UNUSED) {
1248 radeon_set_context_reg(cmd_buffer->cs, R_028C70_CB_COLOR0_INFO + i * 0x3C,
1249 S_028C70_FORMAT(V_028C70_COLOR_INVALID));
1253 int idx = subpass->color_attachments[i].attachment;
1254 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1255 struct radv_image *image = att->attachment->image;
1256 VkImageLayout layout = subpass->color_attachments[i].layout;
1258 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
1260 assert(att->attachment->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT);
1261 radv_emit_fb_color_state(cmd_buffer, i, att, image, layout);
1263 radv_load_color_clear_regs(cmd_buffer, image, i);
1266 if(subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1267 int idx = subpass->depth_stencil_attachment.attachment;
1268 VkImageLayout layout = subpass->depth_stencil_attachment.layout;
1269 struct radv_attachment_info *att = &framebuffer->attachments[idx];
1270 struct radv_image *image = att->attachment->image;
1271 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, att->attachment->bo, 8);
1272 MAYBE_UNUSED uint32_t queue_mask = radv_image_queue_family_mask(image,
1273 cmd_buffer->queue_family_index,
1274 cmd_buffer->queue_family_index);
1275 /* We currently don't support writing decompressed HTILE */
1276 assert(radv_layout_has_htile(image, layout, queue_mask) ==
1277 radv_layout_is_htile_compressed(image, layout, queue_mask));
1279 radv_emit_fb_ds_state(cmd_buffer, &att->ds, image, layout);
1281 if (att->ds.offset_scale != cmd_buffer->state.offset_scale) {
1282 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
1283 cmd_buffer->state.offset_scale = att->ds.offset_scale;
1285 radv_load_depth_clear_regs(cmd_buffer, image);
1287 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9)
1288 radeon_set_context_reg_seq(cmd_buffer->cs, R_028038_DB_Z_INFO, 2);
1290 radeon_set_context_reg_seq(cmd_buffer->cs, R_028040_DB_Z_INFO, 2);
1292 radeon_emit(cmd_buffer->cs, S_028040_FORMAT(V_028040_Z_INVALID)); /* DB_Z_INFO */
1293 radeon_emit(cmd_buffer->cs, S_028044_FORMAT(V_028044_STENCIL_INVALID)); /* DB_STENCIL_INFO */
1295 radeon_set_context_reg(cmd_buffer->cs, R_028208_PA_SC_WINDOW_SCISSOR_BR,
1296 S_028208_BR_X(framebuffer->width) |
1297 S_028208_BR_Y(framebuffer->height));
1299 if (cmd_buffer->device->dfsm_allowed) {
1300 radeon_emit(cmd_buffer->cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1301 radeon_emit(cmd_buffer->cs, EVENT_TYPE(V_028A90_BREAK_BATCH) | EVENT_INDEX(0));
1304 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_FRAMEBUFFER;
1308 radv_emit_index_buffer(struct radv_cmd_buffer *cmd_buffer)
1310 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1311 struct radv_cmd_state *state = &cmd_buffer->state;
1313 if (state->index_type != state->last_index_type) {
1314 if (cmd_buffer->device->physical_device->rad_info.chip_class >= GFX9) {
1315 radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
1316 2, state->index_type);
1318 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
1319 radeon_emit(cs, state->index_type);
1322 state->last_index_type = state->index_type;
1325 radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
1326 radeon_emit(cs, state->index_va);
1327 radeon_emit(cs, state->index_va >> 32);
1329 radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
1330 radeon_emit(cs, state->max_index_count);
1332 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_INDEX_BUFFER;
1335 void radv_set_db_count_control(struct radv_cmd_buffer *cmd_buffer)
1337 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
1338 uint32_t pa_sc_mode_cntl_1 =
1339 pipeline ? pipeline->graphics.ms.pa_sc_mode_cntl_1 : 0;
1340 uint32_t db_count_control;
1342 if(!cmd_buffer->state.active_occlusion_queries) {
1343 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1344 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
1345 pipeline->graphics.disable_out_of_order_rast_for_occlusion) {
1346 /* Re-enable out-of-order rasterization if the
1347 * bound pipeline supports it and if it's has
1348 * been disabled before starting occlusion
1351 radeon_set_context_reg(cmd_buffer->cs,
1352 R_028A4C_PA_SC_MODE_CNTL_1,
1355 db_count_control = 0;
1357 db_count_control = S_028004_ZPASS_INCREMENT_DISABLE(1);
1360 const struct radv_subpass *subpass = cmd_buffer->state.subpass;
1361 uint32_t sample_rate = subpass ? util_logbase2(subpass->max_sample_count) : 0;
1362 bool perfect = cmd_buffer->state.perfect_occlusion_queries_enabled;
1364 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
1366 S_028004_PERFECT_ZPASS_COUNTS(perfect) |
1367 S_028004_SAMPLE_RATE(sample_rate) |
1368 S_028004_ZPASS_ENABLE(1) |
1369 S_028004_SLICE_EVEN_ENABLE(1) |
1370 S_028004_SLICE_ODD_ENABLE(1);
1372 if (G_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE(pa_sc_mode_cntl_1) &&
1373 pipeline->graphics.disable_out_of_order_rast_for_occlusion) {
1374 /* If the bound pipeline has enabled
1375 * out-of-order rasterization, we should
1376 * disable it before starting occlusion
1379 pa_sc_mode_cntl_1 &= C_028A4C_OUT_OF_ORDER_PRIMITIVE_ENABLE;
1381 radeon_set_context_reg(cmd_buffer->cs,
1382 R_028A4C_PA_SC_MODE_CNTL_1,
1386 db_count_control = S_028004_PERFECT_ZPASS_COUNTS(1) |
1387 S_028004_SAMPLE_RATE(sample_rate);
1391 radeon_set_context_reg(cmd_buffer->cs, R_028004_DB_COUNT_CONTROL, db_count_control);
1395 radv_cmd_buffer_flush_dynamic_state(struct radv_cmd_buffer *cmd_buffer)
1397 uint32_t states = cmd_buffer->state.dirty & cmd_buffer->state.emitted_pipeline->graphics.needed_dynamic_state;
1399 if (states & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1400 radv_emit_viewport(cmd_buffer);
1402 if (states & (RADV_CMD_DIRTY_DYNAMIC_SCISSOR | RADV_CMD_DIRTY_DYNAMIC_VIEWPORT))
1403 radv_emit_scissor(cmd_buffer);
1405 if (states & RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH)
1406 radv_emit_line_width(cmd_buffer);
1408 if (states & RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS)
1409 radv_emit_blend_constants(cmd_buffer);
1411 if (states & (RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE |
1412 RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK |
1413 RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK))
1414 radv_emit_stencil(cmd_buffer);
1416 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS)
1417 radv_emit_depth_bounds(cmd_buffer);
1419 if (states & RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)
1420 radv_emit_depth_bias(cmd_buffer);
1422 if (states & RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE)
1423 radv_emit_discard_rectangle(cmd_buffer);
1425 cmd_buffer->state.dirty &= ~states;
1429 emit_stage_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
1430 struct radv_pipeline *pipeline,
1433 gl_shader_stage stage)
1435 struct radv_userdata_info *desc_set_loc = &pipeline->shaders[stage]->info.user_sgprs_locs.descriptor_sets[idx];
1436 uint32_t base_reg = pipeline->user_data_0[stage];
1438 if (desc_set_loc->sgpr_idx == -1 || desc_set_loc->indirect)
1441 assert(!desc_set_loc->indirect);
1442 assert(desc_set_loc->num_sgprs == 2);
1443 radeon_set_sh_reg_seq(cmd_buffer->cs,
1444 base_reg + desc_set_loc->sgpr_idx * 4, 2);
1445 radeon_emit(cmd_buffer->cs, va);
1446 radeon_emit(cmd_buffer->cs, va >> 32);
1450 radv_emit_descriptor_set_userdata(struct radv_cmd_buffer *cmd_buffer,
1451 VkShaderStageFlags stages,
1452 struct radv_descriptor_set *set,
1455 if (cmd_buffer->state.pipeline) {
1456 radv_foreach_stage(stage, stages) {
1457 if (cmd_buffer->state.pipeline->shaders[stage])
1458 emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.pipeline,
1464 if (cmd_buffer->state.compute_pipeline && (stages & VK_SHADER_STAGE_COMPUTE_BIT))
1465 emit_stage_descriptor_set_userdata(cmd_buffer, cmd_buffer->state.compute_pipeline,
1467 MESA_SHADER_COMPUTE);
1471 radv_flush_push_descriptors(struct radv_cmd_buffer *cmd_buffer,
1472 VkPipelineBindPoint bind_point)
1474 struct radv_descriptor_state *descriptors_state =
1475 radv_get_descriptors_state(cmd_buffer, bind_point);
1476 struct radv_descriptor_set *set = &descriptors_state->push_set.set;
1479 if (!radv_cmd_buffer_upload_data(cmd_buffer, set->size, 32,
1484 set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1485 set->va += bo_offset;
1489 radv_flush_indirect_descriptor_sets(struct radv_cmd_buffer *cmd_buffer,
1490 VkPipelineBindPoint bind_point)
1492 struct radv_descriptor_state *descriptors_state =
1493 radv_get_descriptors_state(cmd_buffer, bind_point);
1494 uint32_t size = MAX_SETS * 2 * 4;
1498 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, size,
1499 256, &offset, &ptr))
1502 for (unsigned i = 0; i < MAX_SETS; i++) {
1503 uint32_t *uptr = ((uint32_t *)ptr) + i * 2;
1504 uint64_t set_va = 0;
1505 struct radv_descriptor_set *set = descriptors_state->sets[i];
1506 if (descriptors_state->valid & (1u << i))
1508 uptr[0] = set_va & 0xffffffff;
1509 uptr[1] = set_va >> 32;
1512 uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1515 if (cmd_buffer->state.pipeline) {
1516 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_VERTEX])
1517 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1518 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1520 if (cmd_buffer->state.pipeline->shaders[MESA_SHADER_FRAGMENT])
1521 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_FRAGMENT,
1522 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1524 if (radv_pipeline_has_gs(cmd_buffer->state.pipeline))
1525 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
1526 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1528 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1529 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_CTRL,
1530 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1532 if (radv_pipeline_has_tess(cmd_buffer->state.pipeline))
1533 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_TESS_EVAL,
1534 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1537 if (cmd_buffer->state.compute_pipeline)
1538 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.compute_pipeline, MESA_SHADER_COMPUTE,
1539 AC_UD_INDIRECT_DESCRIPTOR_SETS, va);
1543 radv_flush_descriptors(struct radv_cmd_buffer *cmd_buffer,
1544 VkShaderStageFlags stages)
1546 VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
1547 VK_PIPELINE_BIND_POINT_COMPUTE :
1548 VK_PIPELINE_BIND_POINT_GRAPHICS;
1549 struct radv_descriptor_state *descriptors_state =
1550 radv_get_descriptors_state(cmd_buffer, bind_point);
1553 if (!descriptors_state->dirty)
1556 if (descriptors_state->push_dirty)
1557 radv_flush_push_descriptors(cmd_buffer, bind_point);
1559 if ((cmd_buffer->state.pipeline && cmd_buffer->state.pipeline->need_indirect_descriptor_sets) ||
1560 (cmd_buffer->state.compute_pipeline && cmd_buffer->state.compute_pipeline->need_indirect_descriptor_sets)) {
1561 radv_flush_indirect_descriptor_sets(cmd_buffer, bind_point);
1564 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1566 MAX_SETS * MESA_SHADER_STAGES * 4);
1568 for_each_bit(i, descriptors_state->dirty) {
1569 struct radv_descriptor_set *set = descriptors_state->sets[i];
1570 if (!(descriptors_state->valid & (1u << i)))
1573 radv_emit_descriptor_set_userdata(cmd_buffer, stages, set, i);
1575 descriptors_state->dirty = 0;
1576 descriptors_state->push_dirty = false;
1578 if (unlikely(cmd_buffer->device->trace_bo))
1579 radv_save_descriptors(cmd_buffer, bind_point);
1581 assert(cmd_buffer->cs->cdw <= cdw_max);
1585 radv_flush_constants(struct radv_cmd_buffer *cmd_buffer,
1586 VkShaderStageFlags stages)
1588 struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
1589 ? cmd_buffer->state.compute_pipeline
1590 : cmd_buffer->state.pipeline;
1591 struct radv_pipeline_layout *layout = pipeline->layout;
1596 stages &= cmd_buffer->push_constant_stages;
1598 (!layout->push_constant_size && !layout->dynamic_offset_count))
1601 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, layout->push_constant_size +
1602 16 * layout->dynamic_offset_count,
1603 256, &offset, &ptr))
1606 memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
1607 memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
1608 16 * layout->dynamic_offset_count);
1610 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1613 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
1614 cmd_buffer->cs, MESA_SHADER_STAGES * 4);
1616 radv_foreach_stage(stage, stages) {
1617 if (pipeline->shaders[stage]) {
1618 radv_emit_userdata_address(cmd_buffer, pipeline, stage,
1619 AC_UD_PUSH_CONSTANTS, va);
1623 cmd_buffer->push_constant_stages &= ~stages;
1624 assert(cmd_buffer->cs->cdw <= cdw_max);
1628 radv_flush_vertex_descriptors(struct radv_cmd_buffer *cmd_buffer,
1629 bool pipeline_is_dirty)
1631 if ((pipeline_is_dirty ||
1632 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_VERTEX_BUFFER)) &&
1633 cmd_buffer->state.pipeline->vertex_elements.count &&
1634 radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.has_vertex_buffers) {
1635 struct radv_vertex_elements_info *velems = &cmd_buffer->state.pipeline->vertex_elements;
1639 uint32_t count = velems->count;
1642 /* allocate some descriptor state for vertex buffers */
1643 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, count * 16, 256,
1644 &vb_offset, &vb_ptr))
1647 for (i = 0; i < count; i++) {
1648 uint32_t *desc = &((uint32_t *)vb_ptr)[i * 4];
1650 int vb = velems->binding[i];
1651 struct radv_buffer *buffer = cmd_buffer->vertex_bindings[vb].buffer;
1652 uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
1654 va = radv_buffer_get_va(buffer->bo);
1656 offset = cmd_buffer->vertex_bindings[vb].offset + velems->offset[i];
1657 va += offset + buffer->offset;
1659 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) | S_008F04_STRIDE(stride);
1660 if (cmd_buffer->device->physical_device->rad_info.chip_class <= CIK && stride)
1661 desc[2] = (buffer->size - offset - velems->format_size[i]) / stride + 1;
1663 desc[2] = buffer->size - offset;
1664 desc[3] = velems->rsrc_word3[i];
1667 va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
1670 radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
1671 AC_UD_VS_VERTEX_BUFFERS, va);
1673 cmd_buffer->state.vb_va = va;
1674 cmd_buffer->state.vb_size = count * 16;
1675 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_VBO_DESCRIPTORS;
1677 cmd_buffer->state.dirty &= ~RADV_CMD_DIRTY_VERTEX_BUFFER;
1681 radv_upload_graphics_shader_descriptors(struct radv_cmd_buffer *cmd_buffer, bool pipeline_is_dirty)
1683 radv_flush_vertex_descriptors(cmd_buffer, pipeline_is_dirty);
1684 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
1685 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_ALL_GRAPHICS);
1689 radv_emit_draw_registers(struct radv_cmd_buffer *cmd_buffer, bool indexed_draw,
1690 bool instanced_draw, bool indirect_draw,
1691 uint32_t draw_vertex_count)
1693 struct radeon_info *info = &cmd_buffer->device->physical_device->rad_info;
1694 struct radv_cmd_state *state = &cmd_buffer->state;
1695 struct radeon_winsys_cs *cs = cmd_buffer->cs;
1696 uint32_t ia_multi_vgt_param;
1697 int32_t primitive_reset_en;
1700 ia_multi_vgt_param =
1701 si_get_ia_multi_vgt_param(cmd_buffer, instanced_draw,
1702 indirect_draw, draw_vertex_count);
1704 if (state->last_ia_multi_vgt_param != ia_multi_vgt_param) {
1705 if (info->chip_class >= GFX9) {
1706 radeon_set_uconfig_reg_idx(cs,
1707 R_030960_IA_MULTI_VGT_PARAM,
1708 4, ia_multi_vgt_param);
1709 } else if (info->chip_class >= CIK) {
1710 radeon_set_context_reg_idx(cs,
1711 R_028AA8_IA_MULTI_VGT_PARAM,
1712 1, ia_multi_vgt_param);
1714 radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM,
1715 ia_multi_vgt_param);
1717 state->last_ia_multi_vgt_param = ia_multi_vgt_param;
1720 /* Primitive restart. */
1721 primitive_reset_en =
1722 indexed_draw && state->pipeline->graphics.prim_restart_enable;
1724 if (primitive_reset_en != state->last_primitive_reset_en) {
1725 state->last_primitive_reset_en = primitive_reset_en;
1726 if (info->chip_class >= GFX9) {
1727 radeon_set_uconfig_reg(cs,
1728 R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
1729 primitive_reset_en);
1731 radeon_set_context_reg(cs,
1732 R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
1733 primitive_reset_en);
1737 if (primitive_reset_en) {
1738 uint32_t primitive_reset_index =
1739 state->index_type ? 0xffffffffu : 0xffffu;
1741 if (primitive_reset_index != state->last_primitive_reset_index) {
1742 radeon_set_context_reg(cs,
1743 R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
1744 primitive_reset_index);
1745 state->last_primitive_reset_index = primitive_reset_index;
1750 static void radv_stage_flush(struct radv_cmd_buffer *cmd_buffer,
1751 VkPipelineStageFlags src_stage_mask)
1753 if (src_stage_mask & (VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT |
1754 VK_PIPELINE_STAGE_TRANSFER_BIT |
1755 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1756 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1757 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH;
1760 if (src_stage_mask & (VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT |
1761 VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT |
1762 VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT |
1763 VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT |
1764 VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT |
1765 VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT |
1766 VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT |
1767 VK_PIPELINE_STAGE_TRANSFER_BIT |
1768 VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT |
1769 VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT |
1770 VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)) {
1771 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_PS_PARTIAL_FLUSH;
1772 } else if (src_stage_mask & (VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
1773 VK_PIPELINE_STAGE_VERTEX_INPUT_BIT |
1774 VK_PIPELINE_STAGE_VERTEX_SHADER_BIT)) {
1775 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_VS_PARTIAL_FLUSH;
1779 static enum radv_cmd_flush_bits
1780 radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
1781 VkAccessFlags src_flags)
1783 enum radv_cmd_flush_bits flush_bits = 0;
1785 for_each_bit(b, src_flags) {
1786 switch ((VkAccessFlagBits)(1 << b)) {
1787 case VK_ACCESS_SHADER_WRITE_BIT:
1788 flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
1790 case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
1791 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1792 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
1794 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
1795 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1796 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
1798 case VK_ACCESS_TRANSFER_WRITE_BIT:
1799 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1800 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META |
1801 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1802 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META |
1803 RADV_CMD_FLAG_INV_GLOBAL_L2;
1812 static enum radv_cmd_flush_bits
1813 radv_dst_access_flush(struct radv_cmd_buffer *cmd_buffer,
1814 VkAccessFlags dst_flags,
1815 struct radv_image *image)
1817 enum radv_cmd_flush_bits flush_bits = 0;
1819 for_each_bit(b, dst_flags) {
1820 switch ((VkAccessFlagBits)(1 << b)) {
1821 case VK_ACCESS_INDIRECT_COMMAND_READ_BIT:
1822 case VK_ACCESS_INDEX_READ_BIT:
1824 case VK_ACCESS_UNIFORM_READ_BIT:
1825 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 | RADV_CMD_FLAG_INV_SMEM_L1;
1827 case VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT:
1828 case VK_ACCESS_SHADER_READ_BIT:
1829 case VK_ACCESS_TRANSFER_READ_BIT:
1830 case VK_ACCESS_INPUT_ATTACHMENT_READ_BIT:
1831 flush_bits |= RADV_CMD_FLAG_INV_VMEM_L1 |
1832 RADV_CMD_FLAG_INV_GLOBAL_L2;
1834 case VK_ACCESS_COLOR_ATTACHMENT_READ_BIT:
1835 /* TODO: change to image && when the image gets passed
1836 * through from the subpass. */
1837 if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
1838 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
1839 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
1841 case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_READ_BIT:
1842 if (!image || (image->usage & VK_IMAGE_USAGE_STORAGE_BIT))
1843 flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
1844 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
1853 static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier)
1855 cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask);
1856 radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
1857 cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
1861 static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
1862 VkAttachmentReference att)
1864 unsigned idx = att.attachment;
1865 struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
1866 VkImageSubresourceRange range;
1867 range.aspectMask = 0;
1868 range.baseMipLevel = view->base_mip;
1869 range.levelCount = 1;
1870 range.baseArrayLayer = view->base_layer;
1871 range.layerCount = cmd_buffer->state.framebuffer->layers;
1873 radv_handle_image_transition(cmd_buffer,
1875 cmd_buffer->state.attachments[idx].current_layout,
1876 att.layout, 0, 0, &range,
1877 cmd_buffer->state.attachments[idx].pending_clear_aspects);
1879 cmd_buffer->state.attachments[idx].current_layout = att.layout;
1885 radv_cmd_buffer_set_subpass(struct radv_cmd_buffer *cmd_buffer,
1886 const struct radv_subpass *subpass, bool transitions)
1889 radv_subpass_barrier(cmd_buffer, &subpass->start_barrier);
1891 for (unsigned i = 0; i < subpass->color_count; ++i) {
1892 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1893 radv_handle_subpass_image_transition(cmd_buffer,
1894 subpass->color_attachments[i]);
1897 for (unsigned i = 0; i < subpass->input_count; ++i) {
1898 radv_handle_subpass_image_transition(cmd_buffer,
1899 subpass->input_attachments[i]);
1902 if (subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1903 radv_handle_subpass_image_transition(cmd_buffer,
1904 subpass->depth_stencil_attachment);
1908 cmd_buffer->state.subpass = subpass;
1910 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_FRAMEBUFFER;
1914 radv_cmd_state_setup_attachments(struct radv_cmd_buffer *cmd_buffer,
1915 struct radv_render_pass *pass,
1916 const VkRenderPassBeginInfo *info)
1918 struct radv_cmd_state *state = &cmd_buffer->state;
1920 if (pass->attachment_count == 0) {
1921 state->attachments = NULL;
1925 state->attachments = vk_alloc(&cmd_buffer->pool->alloc,
1926 pass->attachment_count *
1927 sizeof(state->attachments[0]),
1928 8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1929 if (state->attachments == NULL) {
1930 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
1931 return cmd_buffer->record_result;
1934 for (uint32_t i = 0; i < pass->attachment_count; ++i) {
1935 struct radv_render_pass_attachment *att = &pass->attachments[i];
1936 VkImageAspectFlags att_aspects = vk_format_aspects(att->format);
1937 VkImageAspectFlags clear_aspects = 0;
1939 if (att_aspects == VK_IMAGE_ASPECT_COLOR_BIT) {
1940 /* color attachment */
1941 if (att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1942 clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
1945 /* depthstencil attachment */
1946 if ((att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
1947 att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1948 clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1949 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
1950 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_DONT_CARE)
1951 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1953 if ((att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
1954 att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1955 clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1959 state->attachments[i].pending_clear_aspects = clear_aspects;
1960 state->attachments[i].cleared_views = 0;
1961 if (clear_aspects && info) {
1962 assert(info->clearValueCount > i);
1963 state->attachments[i].clear_value = info->pClearValues[i];
1966 state->attachments[i].current_layout = att->initial_layout;
1972 VkResult radv_AllocateCommandBuffers(
1974 const VkCommandBufferAllocateInfo *pAllocateInfo,
1975 VkCommandBuffer *pCommandBuffers)
1977 RADV_FROM_HANDLE(radv_device, device, _device);
1978 RADV_FROM_HANDLE(radv_cmd_pool, pool, pAllocateInfo->commandPool);
1980 VkResult result = VK_SUCCESS;
1983 for (i = 0; i < pAllocateInfo->commandBufferCount; i++) {
1985 if (!list_empty(&pool->free_cmd_buffers)) {
1986 struct radv_cmd_buffer *cmd_buffer = list_first_entry(&pool->free_cmd_buffers, struct radv_cmd_buffer, pool_link);
1988 list_del(&cmd_buffer->pool_link);
1989 list_addtail(&cmd_buffer->pool_link, &pool->cmd_buffers);
1991 result = radv_reset_cmd_buffer(cmd_buffer);
1992 cmd_buffer->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1993 cmd_buffer->level = pAllocateInfo->level;
1995 pCommandBuffers[i] = radv_cmd_buffer_to_handle(cmd_buffer);
1997 result = radv_create_cmd_buffer(device, pool, pAllocateInfo->level,
1998 &pCommandBuffers[i]);
2000 if (result != VK_SUCCESS)
2004 if (result != VK_SUCCESS) {
2005 radv_FreeCommandBuffers(_device, pAllocateInfo->commandPool,
2006 i, pCommandBuffers);
2008 /* From the Vulkan 1.0.66 spec:
2010 * "vkAllocateCommandBuffers can be used to create multiple
2011 * command buffers. If the creation of any of those command
2012 * buffers fails, the implementation must destroy all
2013 * successfully created command buffer objects from this
2014 * command, set all entries of the pCommandBuffers array to
2015 * NULL and return the error."
2017 memset(pCommandBuffers, 0,
2018 sizeof(*pCommandBuffers) * pAllocateInfo->commandBufferCount);
2024 void radv_FreeCommandBuffers(
2026 VkCommandPool commandPool,
2027 uint32_t commandBufferCount,
2028 const VkCommandBuffer *pCommandBuffers)
2030 for (uint32_t i = 0; i < commandBufferCount; i++) {
2031 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, pCommandBuffers[i]);
2034 if (cmd_buffer->pool) {
2035 list_del(&cmd_buffer->pool_link);
2036 list_addtail(&cmd_buffer->pool_link, &cmd_buffer->pool->free_cmd_buffers);
2038 radv_cmd_buffer_destroy(cmd_buffer);
2044 VkResult radv_ResetCommandBuffer(
2045 VkCommandBuffer commandBuffer,
2046 VkCommandBufferResetFlags flags)
2048 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2049 return radv_reset_cmd_buffer(cmd_buffer);
2052 static void emit_gfx_buffer_state(struct radv_cmd_buffer *cmd_buffer)
2054 struct radv_device *device = cmd_buffer->device;
2055 if (device->gfx_init) {
2056 uint64_t va = radv_buffer_get_va(device->gfx_init);
2057 radv_cs_add_buffer(device->ws, cmd_buffer->cs, device->gfx_init, 8);
2058 radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
2059 radeon_emit(cmd_buffer->cs, va);
2060 radeon_emit(cmd_buffer->cs, va >> 32);
2061 radeon_emit(cmd_buffer->cs, device->gfx_init_size_dw & 0xffff);
2063 si_init_config(cmd_buffer);
2066 VkResult radv_BeginCommandBuffer(
2067 VkCommandBuffer commandBuffer,
2068 const VkCommandBufferBeginInfo *pBeginInfo)
2070 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2071 VkResult result = VK_SUCCESS;
2073 if (cmd_buffer->status != RADV_CMD_BUFFER_STATUS_INITIAL) {
2074 /* If the command buffer has already been resetted with
2075 * vkResetCommandBuffer, no need to do it again.
2077 result = radv_reset_cmd_buffer(cmd_buffer);
2078 if (result != VK_SUCCESS)
2082 memset(&cmd_buffer->state, 0, sizeof(cmd_buffer->state));
2083 cmd_buffer->state.last_primitive_reset_en = -1;
2084 cmd_buffer->state.last_index_type = -1;
2085 cmd_buffer->state.last_num_instances = -1;
2086 cmd_buffer->state.last_vertex_offset = -1;
2087 cmd_buffer->state.last_first_instance = -1;
2088 cmd_buffer->usage_flags = pBeginInfo->flags;
2090 /* setup initial configuration into command buffer */
2091 if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
2092 switch (cmd_buffer->queue_family_index) {
2093 case RADV_QUEUE_GENERAL:
2094 emit_gfx_buffer_state(cmd_buffer);
2096 case RADV_QUEUE_COMPUTE:
2097 si_init_compute(cmd_buffer);
2099 case RADV_QUEUE_TRANSFER:
2105 if (pBeginInfo->flags & VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
2106 assert(pBeginInfo->pInheritanceInfo);
2107 cmd_buffer->state.framebuffer = radv_framebuffer_from_handle(pBeginInfo->pInheritanceInfo->framebuffer);
2108 cmd_buffer->state.pass = radv_render_pass_from_handle(pBeginInfo->pInheritanceInfo->renderPass);
2110 struct radv_subpass *subpass =
2111 &cmd_buffer->state.pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
2113 result = radv_cmd_state_setup_attachments(cmd_buffer, cmd_buffer->state.pass, NULL);
2114 if (result != VK_SUCCESS)
2117 radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
2120 if (unlikely(cmd_buffer->device->trace_bo))
2121 radv_cmd_buffer_trace_emit(cmd_buffer);
2123 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
2128 void radv_CmdBindVertexBuffers(
2129 VkCommandBuffer commandBuffer,
2130 uint32_t firstBinding,
2131 uint32_t bindingCount,
2132 const VkBuffer* pBuffers,
2133 const VkDeviceSize* pOffsets)
2135 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2136 struct radv_vertex_binding *vb = cmd_buffer->vertex_bindings;
2137 bool changed = false;
2139 /* We have to defer setting up vertex buffer since we need the buffer
2140 * stride from the pipeline. */
2142 assert(firstBinding + bindingCount <= MAX_VBS);
2143 for (uint32_t i = 0; i < bindingCount; i++) {
2144 uint32_t idx = firstBinding + i;
2147 (vb[idx].buffer != radv_buffer_from_handle(pBuffers[i]) ||
2148 vb[idx].offset != pOffsets[i])) {
2152 vb[idx].buffer = radv_buffer_from_handle(pBuffers[i]);
2153 vb[idx].offset = pOffsets[i];
2155 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
2156 vb[idx].buffer->bo, 8);
2160 /* No state changes. */
2164 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_VERTEX_BUFFER;
2167 void radv_CmdBindIndexBuffer(
2168 VkCommandBuffer commandBuffer,
2170 VkDeviceSize offset,
2171 VkIndexType indexType)
2173 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2174 RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
2176 if (cmd_buffer->state.index_buffer == index_buffer &&
2177 cmd_buffer->state.index_offset == offset &&
2178 cmd_buffer->state.index_type == indexType) {
2179 /* No state changes. */
2183 cmd_buffer->state.index_buffer = index_buffer;
2184 cmd_buffer->state.index_offset = offset;
2185 cmd_buffer->state.index_type = indexType; /* vk matches hw */
2186 cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
2187 cmd_buffer->state.index_va += index_buffer->offset + offset;
2189 int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
2190 cmd_buffer->state.max_index_count = (index_buffer->size - offset) >> index_size_shift;
2191 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
2192 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs, index_buffer->bo, 8);
2197 radv_bind_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2198 VkPipelineBindPoint bind_point,
2199 struct radv_descriptor_set *set, unsigned idx)
2201 struct radeon_winsys *ws = cmd_buffer->device->ws;
2203 radv_set_descriptor_set(cmd_buffer, bind_point, set, idx);
2207 assert(!(set->layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR));
2209 for (unsigned j = 0; j < set->layout->buffer_count; ++j)
2210 if (set->descriptors[j])
2211 radv_cs_add_buffer(ws, cmd_buffer->cs, set->descriptors[j], 7);
2214 radv_cs_add_buffer(ws, cmd_buffer->cs, set->bo, 8);
2217 void radv_CmdBindDescriptorSets(
2218 VkCommandBuffer commandBuffer,
2219 VkPipelineBindPoint pipelineBindPoint,
2220 VkPipelineLayout _layout,
2222 uint32_t descriptorSetCount,
2223 const VkDescriptorSet* pDescriptorSets,
2224 uint32_t dynamicOffsetCount,
2225 const uint32_t* pDynamicOffsets)
2227 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2228 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2229 unsigned dyn_idx = 0;
2231 const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
2233 for (unsigned i = 0; i < descriptorSetCount; ++i) {
2234 unsigned idx = i + firstSet;
2235 RADV_FROM_HANDLE(radv_descriptor_set, set, pDescriptorSets[i]);
2236 radv_bind_descriptor_set(cmd_buffer, pipelineBindPoint, set, idx);
2238 for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
2239 unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
2240 uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4;
2241 assert(dyn_idx < dynamicOffsetCount);
2243 struct radv_descriptor_range *range = set->dynamic_descriptors + j;
2244 uint64_t va = range->va + pDynamicOffsets[dyn_idx];
2246 dst[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
2247 dst[2] = no_dynamic_bounds ? 0xffffffffu : range->size;
2248 dst[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
2249 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
2250 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
2251 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
2252 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
2253 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
2254 cmd_buffer->push_constant_stages |=
2255 set->layout->dynamic_shader_stages;
2260 static bool radv_init_push_descriptor_set(struct radv_cmd_buffer *cmd_buffer,
2261 struct radv_descriptor_set *set,
2262 struct radv_descriptor_set_layout *layout,
2263 VkPipelineBindPoint bind_point)
2265 struct radv_descriptor_state *descriptors_state =
2266 radv_get_descriptors_state(cmd_buffer, bind_point);
2267 set->size = layout->size;
2268 set->layout = layout;
2270 if (descriptors_state->push_set.capacity < set->size) {
2271 size_t new_size = MAX2(set->size, 1024);
2272 new_size = MAX2(new_size, 2 * descriptors_state->push_set.capacity);
2273 new_size = MIN2(new_size, 96 * MAX_PUSH_DESCRIPTORS);
2275 free(set->mapped_ptr);
2276 set->mapped_ptr = malloc(new_size);
2278 if (!set->mapped_ptr) {
2279 descriptors_state->push_set.capacity = 0;
2280 cmd_buffer->record_result = VK_ERROR_OUT_OF_HOST_MEMORY;
2284 descriptors_state->push_set.capacity = new_size;
2290 void radv_meta_push_descriptor_set(
2291 struct radv_cmd_buffer* cmd_buffer,
2292 VkPipelineBindPoint pipelineBindPoint,
2293 VkPipelineLayout _layout,
2295 uint32_t descriptorWriteCount,
2296 const VkWriteDescriptorSet* pDescriptorWrites)
2298 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2299 struct radv_descriptor_set *push_set = &cmd_buffer->meta_push_descriptors;
2303 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2305 push_set->size = layout->set[set].layout->size;
2306 push_set->layout = layout->set[set].layout;
2308 if (!radv_cmd_buffer_upload_alloc(cmd_buffer, push_set->size, 32,
2310 (void**) &push_set->mapped_ptr))
2313 push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
2314 push_set->va += bo_offset;
2316 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2317 radv_descriptor_set_to_handle(push_set),
2318 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2320 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
2323 void radv_CmdPushDescriptorSetKHR(
2324 VkCommandBuffer commandBuffer,
2325 VkPipelineBindPoint pipelineBindPoint,
2326 VkPipelineLayout _layout,
2328 uint32_t descriptorWriteCount,
2329 const VkWriteDescriptorSet* pDescriptorWrites)
2331 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2332 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2333 struct radv_descriptor_state *descriptors_state =
2334 radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
2335 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
2337 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2339 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
2340 layout->set[set].layout,
2344 radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
2345 radv_descriptor_set_to_handle(push_set),
2346 descriptorWriteCount, pDescriptorWrites, 0, NULL);
2348 radv_set_descriptor_set(cmd_buffer, pipelineBindPoint, push_set, set);
2349 descriptors_state->push_dirty = true;
2352 void radv_CmdPushDescriptorSetWithTemplateKHR(
2353 VkCommandBuffer commandBuffer,
2354 VkDescriptorUpdateTemplateKHR descriptorUpdateTemplate,
2355 VkPipelineLayout _layout,
2359 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2360 RADV_FROM_HANDLE(radv_pipeline_layout, layout, _layout);
2361 RADV_FROM_HANDLE(radv_descriptor_update_template, templ, descriptorUpdateTemplate);
2362 struct radv_descriptor_state *descriptors_state =
2363 radv_get_descriptors_state(cmd_buffer, templ->bind_point);
2364 struct radv_descriptor_set *push_set = &descriptors_state->push_set.set;
2366 assert(layout->set[set].layout->flags & VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR);
2368 if (!radv_init_push_descriptor_set(cmd_buffer, push_set,
2369 layout->set[set].layout,
2373 radv_update_descriptor_set_with_template(cmd_buffer->device, cmd_buffer, push_set,
2374 descriptorUpdateTemplate, pData);
2376 radv_set_descriptor_set(cmd_buffer, templ->bind_point, push_set, set);
2377 descriptors_state->push_dirty = true;
2380 void radv_CmdPushConstants(VkCommandBuffer commandBuffer,
2381 VkPipelineLayout layout,
2382 VkShaderStageFlags stageFlags,
2385 const void* pValues)
2387 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2388 memcpy(cmd_buffer->push_constants + offset, pValues, size);
2389 cmd_buffer->push_constant_stages |= stageFlags;
2392 VkResult radv_EndCommandBuffer(
2393 VkCommandBuffer commandBuffer)
2395 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2397 if (cmd_buffer->queue_family_index != RADV_QUEUE_TRANSFER) {
2398 if (cmd_buffer->device->physical_device->rad_info.chip_class == SI)
2399 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH | RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
2400 si_emit_cache_flush(cmd_buffer);
2403 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
2405 if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
2406 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
2408 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_EXECUTABLE;
2410 return cmd_buffer->record_result;
2414 radv_emit_compute_pipeline(struct radv_cmd_buffer *cmd_buffer)
2416 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
2418 if (!pipeline || pipeline == cmd_buffer->state.emitted_compute_pipeline)
2421 cmd_buffer->state.emitted_compute_pipeline = pipeline;
2423 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, pipeline->cs.cdw);
2424 radeon_emit_array(cmd_buffer->cs, pipeline->cs.buf, pipeline->cs.cdw);
2426 cmd_buffer->compute_scratch_size_needed =
2427 MAX2(cmd_buffer->compute_scratch_size_needed,
2428 pipeline->max_waves * pipeline->scratch_bytes_per_wave);
2430 radv_cs_add_buffer(cmd_buffer->device->ws, cmd_buffer->cs,
2431 pipeline->shaders[MESA_SHADER_COMPUTE]->bo, 8);
2433 if (unlikely(cmd_buffer->device->trace_bo))
2434 radv_save_pipeline(cmd_buffer, pipeline, RING_COMPUTE);
2437 static void radv_mark_descriptor_sets_dirty(struct radv_cmd_buffer *cmd_buffer,
2438 VkPipelineBindPoint bind_point)
2440 struct radv_descriptor_state *descriptors_state =
2441 radv_get_descriptors_state(cmd_buffer, bind_point);
2443 descriptors_state->dirty |= descriptors_state->valid;
2446 void radv_CmdBindPipeline(
2447 VkCommandBuffer commandBuffer,
2448 VkPipelineBindPoint pipelineBindPoint,
2449 VkPipeline _pipeline)
2451 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2452 RADV_FROM_HANDLE(radv_pipeline, pipeline, _pipeline);
2454 switch (pipelineBindPoint) {
2455 case VK_PIPELINE_BIND_POINT_COMPUTE:
2456 if (cmd_buffer->state.compute_pipeline == pipeline)
2458 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
2460 cmd_buffer->state.compute_pipeline = pipeline;
2461 cmd_buffer->push_constant_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
2463 case VK_PIPELINE_BIND_POINT_GRAPHICS:
2464 if (cmd_buffer->state.pipeline == pipeline)
2466 radv_mark_descriptor_sets_dirty(cmd_buffer, pipelineBindPoint);
2468 cmd_buffer->state.pipeline = pipeline;
2472 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_PIPELINE;
2473 cmd_buffer->push_constant_stages |= pipeline->active_stages;
2475 /* the new vertex shader might not have the same user regs */
2476 cmd_buffer->state.last_first_instance = -1;
2477 cmd_buffer->state.last_vertex_offset = -1;
2479 /* Prefetch all pipeline shaders at first draw time. */
2480 cmd_buffer->state.prefetch_L2_mask |= RADV_PREFETCH_SHADERS;
2482 radv_bind_dynamic_state(cmd_buffer, &pipeline->dynamic_state);
2484 if (pipeline->graphics.esgs_ring_size > cmd_buffer->esgs_ring_size_needed)
2485 cmd_buffer->esgs_ring_size_needed = pipeline->graphics.esgs_ring_size;
2486 if (pipeline->graphics.gsvs_ring_size > cmd_buffer->gsvs_ring_size_needed)
2487 cmd_buffer->gsvs_ring_size_needed = pipeline->graphics.gsvs_ring_size;
2489 if (radv_pipeline_has_tess(pipeline))
2490 cmd_buffer->tess_rings_needed = true;
2492 if (radv_pipeline_has_gs(pipeline)) {
2493 struct radv_userdata_info *loc = radv_lookup_user_sgpr(cmd_buffer->state.pipeline, MESA_SHADER_GEOMETRY,
2494 AC_UD_SCRATCH_RING_OFFSETS);
2495 if (cmd_buffer->ring_offsets_idx == -1)
2496 cmd_buffer->ring_offsets_idx = loc->sgpr_idx;
2497 else if (loc->sgpr_idx != -1)
2498 assert(loc->sgpr_idx == cmd_buffer->ring_offsets_idx);
2502 assert(!"invalid bind point");
2507 void radv_CmdSetViewport(
2508 VkCommandBuffer commandBuffer,
2509 uint32_t firstViewport,
2510 uint32_t viewportCount,
2511 const VkViewport* pViewports)
2513 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2514 struct radv_cmd_state *state = &cmd_buffer->state;
2515 MAYBE_UNUSED const uint32_t total_count = firstViewport + viewportCount;
2517 assert(firstViewport < MAX_VIEWPORTS);
2518 assert(total_count >= 1 && total_count <= MAX_VIEWPORTS);
2520 if (cmd_buffer->device->physical_device->has_scissor_bug) {
2521 /* Try to skip unnecessary PS partial flushes when the viewports
2524 if (!(state->dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT |
2525 RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) &&
2526 !memcmp(state->dynamic.viewport.viewports + firstViewport,
2527 pViewports, viewportCount * sizeof(*pViewports))) {
2532 memcpy(state->dynamic.viewport.viewports + firstViewport, pViewports,
2533 viewportCount * sizeof(*pViewports));
2535 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_VIEWPORT;
2538 void radv_CmdSetScissor(
2539 VkCommandBuffer commandBuffer,
2540 uint32_t firstScissor,
2541 uint32_t scissorCount,
2542 const VkRect2D* pScissors)
2544 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2545 struct radv_cmd_state *state = &cmd_buffer->state;
2546 MAYBE_UNUSED const uint32_t total_count = firstScissor + scissorCount;
2548 assert(firstScissor < MAX_SCISSORS);
2549 assert(total_count >= 1 && total_count <= MAX_SCISSORS);
2551 if (cmd_buffer->device->physical_device->has_scissor_bug) {
2552 /* Try to skip unnecessary PS partial flushes when the scissors
2555 if (!(state->dirty & (RADV_CMD_DIRTY_DYNAMIC_VIEWPORT |
2556 RADV_CMD_DIRTY_DYNAMIC_SCISSOR)) &&
2557 !memcmp(state->dynamic.scissor.scissors + firstScissor,
2558 pScissors, scissorCount * sizeof(*pScissors))) {
2563 memcpy(state->dynamic.scissor.scissors + firstScissor, pScissors,
2564 scissorCount * sizeof(*pScissors));
2566 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_SCISSOR;
2569 void radv_CmdSetLineWidth(
2570 VkCommandBuffer commandBuffer,
2573 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2574 cmd_buffer->state.dynamic.line_width = lineWidth;
2575 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_LINE_WIDTH;
2578 void radv_CmdSetDepthBias(
2579 VkCommandBuffer commandBuffer,
2580 float depthBiasConstantFactor,
2581 float depthBiasClamp,
2582 float depthBiasSlopeFactor)
2584 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2586 cmd_buffer->state.dynamic.depth_bias.bias = depthBiasConstantFactor;
2587 cmd_buffer->state.dynamic.depth_bias.clamp = depthBiasClamp;
2588 cmd_buffer->state.dynamic.depth_bias.slope = depthBiasSlopeFactor;
2590 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS;
2593 void radv_CmdSetBlendConstants(
2594 VkCommandBuffer commandBuffer,
2595 const float blendConstants[4])
2597 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2599 memcpy(cmd_buffer->state.dynamic.blend_constants,
2600 blendConstants, sizeof(float) * 4);
2602 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS;
2605 void radv_CmdSetDepthBounds(
2606 VkCommandBuffer commandBuffer,
2607 float minDepthBounds,
2608 float maxDepthBounds)
2610 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2612 cmd_buffer->state.dynamic.depth_bounds.min = minDepthBounds;
2613 cmd_buffer->state.dynamic.depth_bounds.max = maxDepthBounds;
2615 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_DEPTH_BOUNDS;
2618 void radv_CmdSetStencilCompareMask(
2619 VkCommandBuffer commandBuffer,
2620 VkStencilFaceFlags faceMask,
2621 uint32_t compareMask)
2623 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2625 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2626 cmd_buffer->state.dynamic.stencil_compare_mask.front = compareMask;
2627 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2628 cmd_buffer->state.dynamic.stencil_compare_mask.back = compareMask;
2630 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK;
2633 void radv_CmdSetStencilWriteMask(
2634 VkCommandBuffer commandBuffer,
2635 VkStencilFaceFlags faceMask,
2638 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2640 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2641 cmd_buffer->state.dynamic.stencil_write_mask.front = writeMask;
2642 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2643 cmd_buffer->state.dynamic.stencil_write_mask.back = writeMask;
2645 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK;
2648 void radv_CmdSetStencilReference(
2649 VkCommandBuffer commandBuffer,
2650 VkStencilFaceFlags faceMask,
2653 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2655 if (faceMask & VK_STENCIL_FACE_FRONT_BIT)
2656 cmd_buffer->state.dynamic.stencil_reference.front = reference;
2657 if (faceMask & VK_STENCIL_FACE_BACK_BIT)
2658 cmd_buffer->state.dynamic.stencil_reference.back = reference;
2660 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE;
2663 void radv_CmdSetDiscardRectangleEXT(
2664 VkCommandBuffer commandBuffer,
2665 uint32_t firstDiscardRectangle,
2666 uint32_t discardRectangleCount,
2667 const VkRect2D* pDiscardRectangles)
2669 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2670 struct radv_cmd_state *state = &cmd_buffer->state;
2671 MAYBE_UNUSED const uint32_t total_count = firstDiscardRectangle + discardRectangleCount;
2673 assert(firstDiscardRectangle < MAX_DISCARD_RECTANGLES);
2674 assert(total_count >= 1 && total_count <= MAX_DISCARD_RECTANGLES);
2676 typed_memcpy(&state->dynamic.discard_rectangle.rectangles[firstDiscardRectangle],
2677 pDiscardRectangles, discardRectangleCount);
2679 state->dirty |= RADV_CMD_DIRTY_DYNAMIC_DISCARD_RECTANGLE;
2682 void radv_CmdExecuteCommands(
2683 VkCommandBuffer commandBuffer,
2684 uint32_t commandBufferCount,
2685 const VkCommandBuffer* pCmdBuffers)
2687 RADV_FROM_HANDLE(radv_cmd_buffer, primary, commandBuffer);
2689 assert(commandBufferCount > 0);
2691 /* Emit pending flushes on primary prior to executing secondary */
2692 si_emit_cache_flush(primary);
2694 for (uint32_t i = 0; i < commandBufferCount; i++) {
2695 RADV_FROM_HANDLE(radv_cmd_buffer, secondary, pCmdBuffers[i]);
2697 primary->scratch_size_needed = MAX2(primary->scratch_size_needed,
2698 secondary->scratch_size_needed);
2699 primary->compute_scratch_size_needed = MAX2(primary->compute_scratch_size_needed,
2700 secondary->compute_scratch_size_needed);
2702 if (secondary->esgs_ring_size_needed > primary->esgs_ring_size_needed)
2703 primary->esgs_ring_size_needed = secondary->esgs_ring_size_needed;
2704 if (secondary->gsvs_ring_size_needed > primary->gsvs_ring_size_needed)
2705 primary->gsvs_ring_size_needed = secondary->gsvs_ring_size_needed;
2706 if (secondary->tess_rings_needed)
2707 primary->tess_rings_needed = true;
2708 if (secondary->sample_positions_needed)
2709 primary->sample_positions_needed = true;
2711 if (secondary->ring_offsets_idx != -1) {
2712 if (primary->ring_offsets_idx == -1)
2713 primary->ring_offsets_idx = secondary->ring_offsets_idx;
2715 assert(secondary->ring_offsets_idx == primary->ring_offsets_idx);
2717 primary->device->ws->cs_execute_secondary(primary->cs, secondary->cs);
2720 /* When the secondary command buffer is compute only we don't
2721 * need to re-emit the current graphics pipeline.
2723 if (secondary->state.emitted_pipeline) {
2724 primary->state.emitted_pipeline =
2725 secondary->state.emitted_pipeline;
2728 /* When the secondary command buffer is graphics only we don't
2729 * need to re-emit the current compute pipeline.
2731 if (secondary->state.emitted_compute_pipeline) {
2732 primary->state.emitted_compute_pipeline =
2733 secondary->state.emitted_compute_pipeline;
2736 /* Only re-emit the draw packets when needed. */
2737 if (secondary->state.last_primitive_reset_en != -1) {
2738 primary->state.last_primitive_reset_en =
2739 secondary->state.last_primitive_reset_en;
2742 if (secondary->state.last_primitive_reset_index) {
2743 primary->state.last_primitive_reset_index =
2744 secondary->state.last_primitive_reset_index;
2747 if (secondary->state.last_ia_multi_vgt_param) {
2748 primary->state.last_ia_multi_vgt_param =
2749 secondary->state.last_ia_multi_vgt_param;
2752 primary->state.last_first_instance = secondary->state.last_first_instance;
2753 primary->state.last_num_instances = secondary->state.last_num_instances;
2754 primary->state.last_vertex_offset = secondary->state.last_vertex_offset;
2756 if (secondary->state.last_index_type != -1) {
2757 primary->state.last_index_type =
2758 secondary->state.last_index_type;
2762 /* After executing commands from secondary buffers we have to dirty
2765 primary->state.dirty |= RADV_CMD_DIRTY_PIPELINE |
2766 RADV_CMD_DIRTY_INDEX_BUFFER |
2767 RADV_CMD_DIRTY_DYNAMIC_ALL;
2768 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_GRAPHICS);
2769 radv_mark_descriptor_sets_dirty(primary, VK_PIPELINE_BIND_POINT_COMPUTE);
2772 VkResult radv_CreateCommandPool(
2774 const VkCommandPoolCreateInfo* pCreateInfo,
2775 const VkAllocationCallbacks* pAllocator,
2776 VkCommandPool* pCmdPool)
2778 RADV_FROM_HANDLE(radv_device, device, _device);
2779 struct radv_cmd_pool *pool;
2781 pool = vk_alloc2(&device->alloc, pAllocator, sizeof(*pool), 8,
2782 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2784 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
2787 pool->alloc = *pAllocator;
2789 pool->alloc = device->alloc;
2791 list_inithead(&pool->cmd_buffers);
2792 list_inithead(&pool->free_cmd_buffers);
2794 pool->queue_family_index = pCreateInfo->queueFamilyIndex;
2796 *pCmdPool = radv_cmd_pool_to_handle(pool);
2802 void radv_DestroyCommandPool(
2804 VkCommandPool commandPool,
2805 const VkAllocationCallbacks* pAllocator)
2807 RADV_FROM_HANDLE(radv_device, device, _device);
2808 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2813 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2814 &pool->cmd_buffers, pool_link) {
2815 radv_cmd_buffer_destroy(cmd_buffer);
2818 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2819 &pool->free_cmd_buffers, pool_link) {
2820 radv_cmd_buffer_destroy(cmd_buffer);
2823 vk_free2(&device->alloc, pAllocator, pool);
2826 VkResult radv_ResetCommandPool(
2828 VkCommandPool commandPool,
2829 VkCommandPoolResetFlags flags)
2831 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2834 list_for_each_entry(struct radv_cmd_buffer, cmd_buffer,
2835 &pool->cmd_buffers, pool_link) {
2836 result = radv_reset_cmd_buffer(cmd_buffer);
2837 if (result != VK_SUCCESS)
2844 void radv_TrimCommandPool(
2846 VkCommandPool commandPool,
2847 VkCommandPoolTrimFlagsKHR flags)
2849 RADV_FROM_HANDLE(radv_cmd_pool, pool, commandPool);
2854 list_for_each_entry_safe(struct radv_cmd_buffer, cmd_buffer,
2855 &pool->free_cmd_buffers, pool_link) {
2856 radv_cmd_buffer_destroy(cmd_buffer);
2860 void radv_CmdBeginRenderPass(
2861 VkCommandBuffer commandBuffer,
2862 const VkRenderPassBeginInfo* pRenderPassBegin,
2863 VkSubpassContents contents)
2865 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2866 RADV_FROM_HANDLE(radv_render_pass, pass, pRenderPassBegin->renderPass);
2867 RADV_FROM_HANDLE(radv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
2869 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
2870 cmd_buffer->cs, 2048);
2871 MAYBE_UNUSED VkResult result;
2873 cmd_buffer->state.framebuffer = framebuffer;
2874 cmd_buffer->state.pass = pass;
2875 cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
2877 result = radv_cmd_state_setup_attachments(cmd_buffer, pass, pRenderPassBegin);
2878 if (result != VK_SUCCESS)
2881 radv_cmd_buffer_set_subpass(cmd_buffer, pass->subpasses, true);
2882 assert(cmd_buffer->cs->cdw <= cdw_max);
2884 radv_cmd_buffer_clear_subpass(cmd_buffer);
2887 void radv_CmdNextSubpass(
2888 VkCommandBuffer commandBuffer,
2889 VkSubpassContents contents)
2891 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
2893 radv_cmd_buffer_resolve_subpass(cmd_buffer);
2895 radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs,
2898 radv_cmd_buffer_set_subpass(cmd_buffer, cmd_buffer->state.subpass + 1, true);
2899 radv_cmd_buffer_clear_subpass(cmd_buffer);
2902 static void radv_emit_view_index(struct radv_cmd_buffer *cmd_buffer, unsigned index)
2904 struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
2905 for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
2906 if (!pipeline->shaders[stage])
2908 struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
2909 if (loc->sgpr_idx == -1)
2911 uint32_t base_reg = pipeline->user_data_0[stage];
2912 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
2915 if (pipeline->gs_copy_shader) {
2916 struct radv_userdata_info *loc = &pipeline->gs_copy_shader->info.user_sgprs_locs.shader_data[AC_UD_VIEW_INDEX];
2917 if (loc->sgpr_idx != -1) {
2918 uint32_t base_reg = R_00B130_SPI_SHADER_USER_DATA_VS_0;
2919 radeon_set_sh_reg(cmd_buffer->cs, base_reg + loc->sgpr_idx * 4, index);
2925 radv_cs_emit_draw_packet(struct radv_cmd_buffer *cmd_buffer,
2926 uint32_t vertex_count)
2928 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, cmd_buffer->state.predicating));
2929 radeon_emit(cmd_buffer->cs, vertex_count);
2930 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
2931 S_0287F0_USE_OPAQUE(0));
2935 radv_cs_emit_draw_indexed_packet(struct radv_cmd_buffer *cmd_buffer,
2937 uint32_t index_count)
2939 radeon_emit(cmd_buffer->cs, PKT3(PKT3_DRAW_INDEX_2, 4, false));
2940 radeon_emit(cmd_buffer->cs, cmd_buffer->state.max_index_count);
2941 radeon_emit(cmd_buffer->cs, index_va);
2942 radeon_emit(cmd_buffer->cs, index_va >> 32);
2943 radeon_emit(cmd_buffer->cs, index_count);
2944 radeon_emit(cmd_buffer->cs, V_0287F0_DI_SRC_SEL_DMA);
2948 radv_cs_emit_indirect_draw_packet(struct radv_cmd_buffer *cmd_buffer,
2950 uint32_t draw_count,
2954 struct radeon_winsys_cs *cs = cmd_buffer->cs;
2955 unsigned di_src_sel = indexed ? V_0287F0_DI_SRC_SEL_DMA
2956 : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
2957 bool draw_id_enable = radv_get_vertex_shader(cmd_buffer->state.pipeline)->info.info.vs.needs_draw_id;
2958 uint32_t base_reg = cmd_buffer->state.pipeline->graphics.vtx_base_sgpr;
2961 /* just reset draw state for vertex data */
2962 cmd_buffer->state.last_first_instance = -1;
2963 cmd_buffer->state.last_num_instances = -1;
2964 cmd_buffer->state.last_vertex_offset = -1;
2966 if (draw_count == 1 && !count_va && !draw_id_enable) {
2967 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT :
2968 PKT3_DRAW_INDIRECT, 3, false));
2970 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
2971 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
2972 radeon_emit(cs, di_src_sel);
2974 radeon_emit(cs, PKT3(indexed ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
2975 PKT3_DRAW_INDIRECT_MULTI,
2978 radeon_emit(cs, (base_reg - SI_SH_REG_OFFSET) >> 2);
2979 radeon_emit(cs, ((base_reg + 4) - SI_SH_REG_OFFSET) >> 2);
2980 radeon_emit(cs, (((base_reg + 8) - SI_SH_REG_OFFSET) >> 2) |
2981 S_2C3_DRAW_INDEX_ENABLE(draw_id_enable) |
2982 S_2C3_COUNT_INDIRECT_ENABLE(!!count_va));
2983 radeon_emit(cs, draw_count); /* count */
2984 radeon_emit(cs, count_va); /* count_addr */
2985 radeon_emit(cs, count_va >> 32);
2986 radeon_emit(cs, stride); /* stride */
2987 radeon_emit(cs, di_src_sel);
2991 struct radv_draw_info {
2993 * Number of vertices.
2998 * Index of the first vertex.
3000 int32_t vertex_offset;
3003 * First instance id.
3005 uint32_t first_instance;
3008 * Number of instances.
3010 uint32_t instance_count;
3013 * First index (indexed draws only).
3015 uint32_t first_index;
3018 * Whether it's an indexed draw.
3023 * Indirect draw parameters resource.
3025 struct radv_buffer *indirect;
3026 uint64_t indirect_offset;
3030 * Draw count parameters resource.
3032 struct radv_buffer *count_buffer;
3033 uint64_t count_buffer_offset;
3037 radv_emit_draw_packets(struct radv_cmd_buffer *cmd_buffer,
3038 const struct radv_draw_info *info)
3040 struct radv_cmd_state *state = &cmd_buffer->state;
3041 struct radeon_winsys *ws = cmd_buffer->device->ws;
3042 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3044 if (info->indirect) {
3045 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3046 uint64_t count_va = 0;
3048 va += info->indirect->offset + info->indirect_offset;
3050 radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
3052 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
3054 radeon_emit(cs, va);
3055 radeon_emit(cs, va >> 32);
3057 if (info->count_buffer) {
3058 count_va = radv_buffer_get_va(info->count_buffer->bo);
3059 count_va += info->count_buffer->offset +
3060 info->count_buffer_offset;
3062 radv_cs_add_buffer(ws, cs, info->count_buffer->bo, 8);
3065 if (!state->subpass->view_mask) {
3066 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3073 for_each_bit(i, state->subpass->view_mask) {
3074 radv_emit_view_index(cmd_buffer, i);
3076 radv_cs_emit_indirect_draw_packet(cmd_buffer,
3084 assert(state->pipeline->graphics.vtx_base_sgpr);
3086 if (info->vertex_offset != state->last_vertex_offset ||
3087 info->first_instance != state->last_first_instance) {
3088 radeon_set_sh_reg_seq(cs, state->pipeline->graphics.vtx_base_sgpr,
3089 state->pipeline->graphics.vtx_emit_num);
3091 radeon_emit(cs, info->vertex_offset);
3092 radeon_emit(cs, info->first_instance);
3093 if (state->pipeline->graphics.vtx_emit_num == 3)
3095 state->last_first_instance = info->first_instance;
3096 state->last_vertex_offset = info->vertex_offset;
3099 if (state->last_num_instances != info->instance_count) {
3100 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, false));
3101 radeon_emit(cs, info->instance_count);
3102 state->last_num_instances = info->instance_count;
3105 if (info->indexed) {
3106 int index_size = state->index_type ? 4 : 2;
3109 index_va = state->index_va;
3110 index_va += info->first_index * index_size;
3112 if (!state->subpass->view_mask) {
3113 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3118 for_each_bit(i, state->subpass->view_mask) {
3119 radv_emit_view_index(cmd_buffer, i);
3121 radv_cs_emit_draw_indexed_packet(cmd_buffer,
3127 if (!state->subpass->view_mask) {
3128 radv_cs_emit_draw_packet(cmd_buffer, info->count);
3131 for_each_bit(i, state->subpass->view_mask) {
3132 radv_emit_view_index(cmd_buffer, i);
3134 radv_cs_emit_draw_packet(cmd_buffer,
3143 radv_emit_all_graphics_states(struct radv_cmd_buffer *cmd_buffer,
3144 const struct radv_draw_info *info)
3146 if ((cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER) ||
3147 cmd_buffer->state.emitted_pipeline != cmd_buffer->state.pipeline)
3148 radv_emit_rbplus_state(cmd_buffer);
3150 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE)
3151 radv_emit_graphics_pipeline(cmd_buffer);
3153 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_FRAMEBUFFER)
3154 radv_emit_framebuffer_state(cmd_buffer);
3156 if (info->indexed) {
3157 if (cmd_buffer->state.dirty & RADV_CMD_DIRTY_INDEX_BUFFER)
3158 radv_emit_index_buffer(cmd_buffer);
3160 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
3161 * so the state must be re-emitted before the next indexed
3164 if (cmd_buffer->device->physical_device->rad_info.chip_class >= CIK) {
3165 cmd_buffer->state.last_index_type = -1;
3166 cmd_buffer->state.dirty |= RADV_CMD_DIRTY_INDEX_BUFFER;
3170 radv_cmd_buffer_flush_dynamic_state(cmd_buffer);
3172 radv_emit_draw_registers(cmd_buffer, info->indexed,
3173 info->instance_count > 1, info->indirect,
3174 info->indirect ? 0 : info->count);
3178 radv_draw(struct radv_cmd_buffer *cmd_buffer,
3179 const struct radv_draw_info *info)
3182 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
3183 bool pipeline_is_dirty =
3184 (cmd_buffer->state.dirty & RADV_CMD_DIRTY_PIPELINE) &&
3185 cmd_buffer->state.pipeline &&
3186 cmd_buffer->state.pipeline != cmd_buffer->state.emitted_pipeline;
3188 MAYBE_UNUSED unsigned cdw_max =
3189 radeon_check_space(cmd_buffer->device->ws,
3190 cmd_buffer->cs, 4096);
3192 /* Use optimal packet order based on whether we need to sync the
3195 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3196 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3197 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
3198 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
3199 /* If we have to wait for idle, set all states first, so that
3200 * all SET packets are processed in parallel with previous draw
3201 * calls. Then upload descriptors, set shader pointers, and
3202 * draw, and prefetch at the end. This ensures that the time
3203 * the CUs are idle is very short. (there are only SET_SH
3204 * packets between the wait and the draw)
3206 radv_emit_all_graphics_states(cmd_buffer, info);
3207 si_emit_cache_flush(cmd_buffer);
3208 /* <-- CUs are idle here --> */
3210 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
3212 radv_emit_draw_packets(cmd_buffer, info);
3213 /* <-- CUs are busy here --> */
3215 /* Start prefetches after the draw has been started. Both will
3216 * run in parallel, but starting the draw first is more
3219 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3220 radv_emit_prefetch_L2(cmd_buffer,
3221 cmd_buffer->state.pipeline, false);
3224 /* If we don't wait for idle, start prefetches first, then set
3225 * states, and draw at the end.
3227 si_emit_cache_flush(cmd_buffer);
3229 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3230 /* Only prefetch the vertex shader and VBO descriptors
3231 * in order to start the draw as soon as possible.
3233 radv_emit_prefetch_L2(cmd_buffer,
3234 cmd_buffer->state.pipeline, true);
3237 radv_upload_graphics_shader_descriptors(cmd_buffer, pipeline_is_dirty);
3239 radv_emit_all_graphics_states(cmd_buffer, info);
3240 radv_emit_draw_packets(cmd_buffer, info);
3242 /* Prefetch the remaining shaders after the draw has been
3245 if (has_prefetch && cmd_buffer->state.prefetch_L2_mask) {
3246 radv_emit_prefetch_L2(cmd_buffer,
3247 cmd_buffer->state.pipeline, false);
3251 assert(cmd_buffer->cs->cdw <= cdw_max);
3252 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_PS_PARTIAL_FLUSH);
3256 VkCommandBuffer commandBuffer,
3257 uint32_t vertexCount,
3258 uint32_t instanceCount,
3259 uint32_t firstVertex,
3260 uint32_t firstInstance)
3262 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3263 struct radv_draw_info info = {};
3265 info.count = vertexCount;
3266 info.instance_count = instanceCount;
3267 info.first_instance = firstInstance;
3268 info.vertex_offset = firstVertex;
3270 radv_draw(cmd_buffer, &info);
3273 void radv_CmdDrawIndexed(
3274 VkCommandBuffer commandBuffer,
3275 uint32_t indexCount,
3276 uint32_t instanceCount,
3277 uint32_t firstIndex,
3278 int32_t vertexOffset,
3279 uint32_t firstInstance)
3281 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3282 struct radv_draw_info info = {};
3284 info.indexed = true;
3285 info.count = indexCount;
3286 info.instance_count = instanceCount;
3287 info.first_index = firstIndex;
3288 info.vertex_offset = vertexOffset;
3289 info.first_instance = firstInstance;
3291 radv_draw(cmd_buffer, &info);
3294 void radv_CmdDrawIndirect(
3295 VkCommandBuffer commandBuffer,
3297 VkDeviceSize offset,
3301 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3302 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3303 struct radv_draw_info info = {};
3305 info.count = drawCount;
3306 info.indirect = buffer;
3307 info.indirect_offset = offset;
3308 info.stride = stride;
3310 radv_draw(cmd_buffer, &info);
3313 void radv_CmdDrawIndexedIndirect(
3314 VkCommandBuffer commandBuffer,
3316 VkDeviceSize offset,
3320 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3321 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3322 struct radv_draw_info info = {};
3324 info.indexed = true;
3325 info.count = drawCount;
3326 info.indirect = buffer;
3327 info.indirect_offset = offset;
3328 info.stride = stride;
3330 radv_draw(cmd_buffer, &info);
3333 void radv_CmdDrawIndirectCountAMD(
3334 VkCommandBuffer commandBuffer,
3336 VkDeviceSize offset,
3337 VkBuffer _countBuffer,
3338 VkDeviceSize countBufferOffset,
3339 uint32_t maxDrawCount,
3342 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3343 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3344 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3345 struct radv_draw_info info = {};
3347 info.count = maxDrawCount;
3348 info.indirect = buffer;
3349 info.indirect_offset = offset;
3350 info.count_buffer = count_buffer;
3351 info.count_buffer_offset = countBufferOffset;
3352 info.stride = stride;
3354 radv_draw(cmd_buffer, &info);
3357 void radv_CmdDrawIndexedIndirectCountAMD(
3358 VkCommandBuffer commandBuffer,
3360 VkDeviceSize offset,
3361 VkBuffer _countBuffer,
3362 VkDeviceSize countBufferOffset,
3363 uint32_t maxDrawCount,
3366 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3367 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3368 RADV_FROM_HANDLE(radv_buffer, count_buffer, _countBuffer);
3369 struct radv_draw_info info = {};
3371 info.indexed = true;
3372 info.count = maxDrawCount;
3373 info.indirect = buffer;
3374 info.indirect_offset = offset;
3375 info.count_buffer = count_buffer;
3376 info.count_buffer_offset = countBufferOffset;
3377 info.stride = stride;
3379 radv_draw(cmd_buffer, &info);
3382 struct radv_dispatch_info {
3384 * Determine the layout of the grid (in block units) to be used.
3389 * A starting offset for the grid. If unaligned is set, the offset
3390 * must still be aligned.
3392 uint32_t offsets[3];
3394 * Whether it's an unaligned compute dispatch.
3399 * Indirect compute parameters resource.
3401 struct radv_buffer *indirect;
3402 uint64_t indirect_offset;
3406 radv_emit_dispatch_packets(struct radv_cmd_buffer *cmd_buffer,
3407 const struct radv_dispatch_info *info)
3409 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3410 struct radv_shader_variant *compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
3411 unsigned dispatch_initiator = cmd_buffer->device->dispatch_initiator;
3412 struct radeon_winsys *ws = cmd_buffer->device->ws;
3413 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3414 struct radv_userdata_info *loc;
3416 loc = radv_lookup_user_sgpr(pipeline, MESA_SHADER_COMPUTE,
3417 AC_UD_CS_GRID_SIZE);
3419 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
3421 if (info->indirect) {
3422 uint64_t va = radv_buffer_get_va(info->indirect->bo);
3424 va += info->indirect->offset + info->indirect_offset;
3426 radv_cs_add_buffer(ws, cs, info->indirect->bo, 8);
3428 if (loc->sgpr_idx != -1) {
3429 for (unsigned i = 0; i < 3; ++i) {
3430 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
3431 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_MEM) |
3432 COPY_DATA_DST_SEL(COPY_DATA_REG));
3433 radeon_emit(cs, (va + 4 * i));
3434 radeon_emit(cs, (va + 4 * i) >> 32);
3435 radeon_emit(cs, ((R_00B900_COMPUTE_USER_DATA_0
3436 + loc->sgpr_idx * 4) >> 2) + i);
3441 if (radv_cmd_buffer_uses_mec(cmd_buffer)) {
3442 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 2, 0) |
3443 PKT3_SHADER_TYPE_S(1));
3444 radeon_emit(cs, va);
3445 radeon_emit(cs, va >> 32);
3446 radeon_emit(cs, dispatch_initiator);
3448 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0) |
3449 PKT3_SHADER_TYPE_S(1));
3451 radeon_emit(cs, va);
3452 radeon_emit(cs, va >> 32);
3454 radeon_emit(cs, PKT3(PKT3_DISPATCH_INDIRECT, 1, 0) |
3455 PKT3_SHADER_TYPE_S(1));
3457 radeon_emit(cs, dispatch_initiator);
3460 unsigned blocks[3] = { info->blocks[0], info->blocks[1], info->blocks[2] };
3461 unsigned offsets[3] = { info->offsets[0], info->offsets[1], info->offsets[2] };
3463 if (info->unaligned) {
3464 unsigned *cs_block_size = compute_shader->info.cs.block_size;
3465 unsigned remainder[3];
3467 /* If aligned, these should be an entire block size,
3470 remainder[0] = blocks[0] + cs_block_size[0] -
3471 align_u32_npot(blocks[0], cs_block_size[0]);
3472 remainder[1] = blocks[1] + cs_block_size[1] -
3473 align_u32_npot(blocks[1], cs_block_size[1]);
3474 remainder[2] = blocks[2] + cs_block_size[2] -
3475 align_u32_npot(blocks[2], cs_block_size[2]);
3477 blocks[0] = round_up_u32(blocks[0], cs_block_size[0]);
3478 blocks[1] = round_up_u32(blocks[1], cs_block_size[1]);
3479 blocks[2] = round_up_u32(blocks[2], cs_block_size[2]);
3481 for(unsigned i = 0; i < 3; ++i) {
3482 assert(offsets[i] % cs_block_size[i] == 0);
3483 offsets[i] /= cs_block_size[i];
3486 radeon_set_sh_reg_seq(cs, R_00B81C_COMPUTE_NUM_THREAD_X, 3);
3488 S_00B81C_NUM_THREAD_FULL(cs_block_size[0]) |
3489 S_00B81C_NUM_THREAD_PARTIAL(remainder[0]));
3491 S_00B81C_NUM_THREAD_FULL(cs_block_size[1]) |
3492 S_00B81C_NUM_THREAD_PARTIAL(remainder[1]));
3494 S_00B81C_NUM_THREAD_FULL(cs_block_size[2]) |
3495 S_00B81C_NUM_THREAD_PARTIAL(remainder[2]));
3497 dispatch_initiator |= S_00B800_PARTIAL_TG_EN(1);
3500 if (loc->sgpr_idx != -1) {
3501 assert(!loc->indirect);
3502 assert(loc->num_sgprs == 3);
3504 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0 +
3505 loc->sgpr_idx * 4, 3);
3506 radeon_emit(cs, blocks[0]);
3507 radeon_emit(cs, blocks[1]);
3508 radeon_emit(cs, blocks[2]);
3511 if (offsets[0] || offsets[1] || offsets[2]) {
3512 radeon_set_sh_reg_seq(cs, R_00B810_COMPUTE_START_X, 3);
3513 radeon_emit(cs, offsets[0]);
3514 radeon_emit(cs, offsets[1]);
3515 radeon_emit(cs, offsets[2]);
3517 /* The blocks in the packet are not counts but end values. */
3518 for (unsigned i = 0; i < 3; ++i)
3519 blocks[i] += offsets[i];
3521 dispatch_initiator |= S_00B800_FORCE_START_AT_000(1);
3524 radeon_emit(cs, PKT3(PKT3_DISPATCH_DIRECT, 3, 0) |
3525 PKT3_SHADER_TYPE_S(1));
3526 radeon_emit(cs, blocks[0]);
3527 radeon_emit(cs, blocks[1]);
3528 radeon_emit(cs, blocks[2]);
3529 radeon_emit(cs, dispatch_initiator);
3532 assert(cmd_buffer->cs->cdw <= cdw_max);
3536 radv_upload_compute_shader_descriptors(struct radv_cmd_buffer *cmd_buffer)
3538 radv_flush_descriptors(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
3539 radv_flush_constants(cmd_buffer, VK_SHADER_STAGE_COMPUTE_BIT);
3543 radv_dispatch(struct radv_cmd_buffer *cmd_buffer,
3544 const struct radv_dispatch_info *info)
3546 struct radv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
3548 cmd_buffer->device->physical_device->rad_info.chip_class >= CIK;
3549 bool pipeline_is_dirty = pipeline &&
3550 pipeline != cmd_buffer->state.emitted_compute_pipeline;
3552 if (cmd_buffer->state.flush_bits & (RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3553 RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3554 RADV_CMD_FLAG_PS_PARTIAL_FLUSH |
3555 RADV_CMD_FLAG_CS_PARTIAL_FLUSH)) {
3556 /* If we have to wait for idle, set all states first, so that
3557 * all SET packets are processed in parallel with previous draw
3558 * calls. Then upload descriptors, set shader pointers, and
3559 * dispatch, and prefetch at the end. This ensures that the
3560 * time the CUs are idle is very short. (there are only SET_SH
3561 * packets between the wait and the draw)
3563 radv_emit_compute_pipeline(cmd_buffer);
3564 si_emit_cache_flush(cmd_buffer);
3565 /* <-- CUs are idle here --> */
3567 radv_upload_compute_shader_descriptors(cmd_buffer);
3569 radv_emit_dispatch_packets(cmd_buffer, info);
3570 /* <-- CUs are busy here --> */
3572 /* Start prefetches after the dispatch has been started. Both
3573 * will run in parallel, but starting the dispatch first is
3576 if (has_prefetch && pipeline_is_dirty) {
3577 radv_emit_shader_prefetch(cmd_buffer,
3578 pipeline->shaders[MESA_SHADER_COMPUTE]);
3581 /* If we don't wait for idle, start prefetches first, then set
3582 * states, and dispatch at the end.
3584 si_emit_cache_flush(cmd_buffer);
3586 if (has_prefetch && pipeline_is_dirty) {
3587 radv_emit_shader_prefetch(cmd_buffer,
3588 pipeline->shaders[MESA_SHADER_COMPUTE]);
3591 radv_upload_compute_shader_descriptors(cmd_buffer);
3593 radv_emit_compute_pipeline(cmd_buffer);
3594 radv_emit_dispatch_packets(cmd_buffer, info);
3597 radv_cmd_buffer_after_draw(cmd_buffer, RADV_CMD_FLAG_CS_PARTIAL_FLUSH);
3600 void radv_CmdDispatchBase(
3601 VkCommandBuffer commandBuffer,
3609 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3610 struct radv_dispatch_info info = {};
3616 info.offsets[0] = base_x;
3617 info.offsets[1] = base_y;
3618 info.offsets[2] = base_z;
3619 radv_dispatch(cmd_buffer, &info);
3622 void radv_CmdDispatch(
3623 VkCommandBuffer commandBuffer,
3628 radv_CmdDispatchBase(commandBuffer, 0, 0, 0, x, y, z);
3631 void radv_CmdDispatchIndirect(
3632 VkCommandBuffer commandBuffer,
3634 VkDeviceSize offset)
3636 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3637 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3638 struct radv_dispatch_info info = {};
3640 info.indirect = buffer;
3641 info.indirect_offset = offset;
3643 radv_dispatch(cmd_buffer, &info);
3646 void radv_unaligned_dispatch(
3647 struct radv_cmd_buffer *cmd_buffer,
3652 struct radv_dispatch_info info = {};
3659 radv_dispatch(cmd_buffer, &info);
3662 void radv_CmdEndRenderPass(
3663 VkCommandBuffer commandBuffer)
3665 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3667 radv_subpass_barrier(cmd_buffer, &cmd_buffer->state.pass->end_barrier);
3669 radv_cmd_buffer_resolve_subpass(cmd_buffer);
3671 for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
3672 VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
3673 radv_handle_subpass_image_transition(cmd_buffer,
3674 (VkAttachmentReference){i, layout});
3677 vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
3679 cmd_buffer->state.pass = NULL;
3680 cmd_buffer->state.subpass = NULL;
3681 cmd_buffer->state.attachments = NULL;
3682 cmd_buffer->state.framebuffer = NULL;
3686 * For HTILE we have the following interesting clear words:
3687 * 0xfffff30f: Uncompressed, full depth range, for depth+stencil HTILE
3688 * 0xfffc000f: Uncompressed, full depth range, for depth only HTILE.
3689 * 0xfffffff0: Clear depth to 1.0
3690 * 0x00000000: Clear depth to 0.0
3692 static void radv_initialize_htile(struct radv_cmd_buffer *cmd_buffer,
3693 struct radv_image *image,
3694 const VkImageSubresourceRange *range,
3695 uint32_t clear_word)
3697 assert(range->baseMipLevel == 0);
3698 assert(range->levelCount == 1 || range->levelCount == VK_REMAINING_ARRAY_LAYERS);
3699 unsigned layer_count = radv_get_layerCount(image, range);
3700 uint64_t size = image->surface.htile_slice_size * layer_count;
3701 uint64_t offset = image->offset + image->htile_offset +
3702 image->surface.htile_slice_size * range->baseArrayLayer;
3703 struct radv_cmd_state *state = &cmd_buffer->state;
3705 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3706 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3708 state->flush_bits |= radv_fill_buffer(cmd_buffer, image->bo, offset,
3711 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3714 static void radv_handle_depth_image_transition(struct radv_cmd_buffer *cmd_buffer,
3715 struct radv_image *image,
3716 VkImageLayout src_layout,
3717 VkImageLayout dst_layout,
3718 unsigned src_queue_mask,
3719 unsigned dst_queue_mask,
3720 const VkImageSubresourceRange *range,
3721 VkImageAspectFlags pending_clears)
3723 if (!radv_image_has_htile(image))
3726 if (dst_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL &&
3727 (pending_clears & vk_format_aspects(image->vk_format)) == vk_format_aspects(image->vk_format) &&
3728 cmd_buffer->state.render_area.offset.x == 0 && cmd_buffer->state.render_area.offset.y == 0 &&
3729 cmd_buffer->state.render_area.extent.width == image->info.width &&
3730 cmd_buffer->state.render_area.extent.height == image->info.height) {
3731 /* The clear will initialize htile. */
3733 } else if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED &&
3734 radv_layout_has_htile(image, dst_layout, dst_queue_mask)) {
3735 /* TODO: merge with the clear if applicable */
3736 radv_initialize_htile(cmd_buffer, image, range, 0);
3737 } else if (!radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
3738 radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
3739 uint32_t clear_value = vk_format_is_stencil(image->vk_format) ? 0xfffff30f : 0xfffc000f;
3740 radv_initialize_htile(cmd_buffer, image, range, clear_value);
3741 } else if (radv_layout_is_htile_compressed(image, src_layout, src_queue_mask) &&
3742 !radv_layout_is_htile_compressed(image, dst_layout, dst_queue_mask)) {
3743 VkImageSubresourceRange local_range = *range;
3744 local_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
3745 local_range.baseMipLevel = 0;
3746 local_range.levelCount = 1;
3748 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3749 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3751 radv_decompress_depth_image_inplace(cmd_buffer, image, &local_range);
3753 cmd_buffer->state.flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
3754 RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
3758 static void radv_initialise_cmask(struct radv_cmd_buffer *cmd_buffer,
3759 struct radv_image *image, uint32_t value)
3761 struct radv_cmd_state *state = &cmd_buffer->state;
3763 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3764 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3766 state->flush_bits |= radv_clear_cmask(cmd_buffer, image, value);
3768 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3771 void radv_initialize_dcc(struct radv_cmd_buffer *cmd_buffer,
3772 struct radv_image *image, uint32_t value)
3774 struct radv_cmd_state *state = &cmd_buffer->state;
3776 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3777 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3779 state->flush_bits |= radv_clear_dcc(cmd_buffer, image, value);
3781 state->flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
3782 RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
3786 * Initialize DCC/FMASK/CMASK metadata for a color image.
3788 static void radv_init_color_image_metadata(struct radv_cmd_buffer *cmd_buffer,
3789 struct radv_image *image,
3790 VkImageLayout src_layout,
3791 VkImageLayout dst_layout,
3792 unsigned src_queue_mask,
3793 unsigned dst_queue_mask)
3795 if (radv_image_has_cmask(image)) {
3796 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
3798 /* TODO: clarify this. */
3799 if (radv_image_has_fmask(image)) {
3800 value = 0xccccccccu;
3803 radv_initialise_cmask(cmd_buffer, image, value);
3806 if (radv_image_has_dcc(image)) {
3807 uint32_t value = 0xffffffffu; /* Fully expanded mode. */
3809 if (radv_layout_dcc_compressed(image, dst_layout,
3811 value = 0x20202020u;
3814 radv_initialize_dcc(cmd_buffer, image, value);
3819 * Handle color image transitions for DCC/FMASK/CMASK.
3821 static void radv_handle_color_image_transition(struct radv_cmd_buffer *cmd_buffer,
3822 struct radv_image *image,
3823 VkImageLayout src_layout,
3824 VkImageLayout dst_layout,
3825 unsigned src_queue_mask,
3826 unsigned dst_queue_mask,
3827 const VkImageSubresourceRange *range)
3829 if (src_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
3830 radv_init_color_image_metadata(cmd_buffer, image,
3831 src_layout, dst_layout,
3832 src_queue_mask, dst_queue_mask);
3836 if (radv_image_has_dcc(image)) {
3837 if (src_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
3838 radv_initialize_dcc(cmd_buffer, image, 0xffffffffu);
3839 } else if (radv_layout_dcc_compressed(image, src_layout, src_queue_mask) &&
3840 !radv_layout_dcc_compressed(image, dst_layout, dst_queue_mask)) {
3841 radv_decompress_dcc(cmd_buffer, image, range);
3842 } else if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
3843 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
3844 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
3846 } else if (radv_image_has_cmask(image) || radv_image_has_fmask(image)) {
3847 if (radv_layout_can_fast_clear(image, src_layout, src_queue_mask) &&
3848 !radv_layout_can_fast_clear(image, dst_layout, dst_queue_mask)) {
3849 radv_fast_clear_flush_image_inplace(cmd_buffer, image, range);
3854 static void radv_handle_image_transition(struct radv_cmd_buffer *cmd_buffer,
3855 struct radv_image *image,
3856 VkImageLayout src_layout,
3857 VkImageLayout dst_layout,
3858 uint32_t src_family,
3859 uint32_t dst_family,
3860 const VkImageSubresourceRange *range,
3861 VkImageAspectFlags pending_clears)
3863 if (image->exclusive && src_family != dst_family) {
3864 /* This is an acquire or a release operation and there will be
3865 * a corresponding release/acquire. Do the transition in the
3866 * most flexible queue. */
3868 assert(src_family == cmd_buffer->queue_family_index ||
3869 dst_family == cmd_buffer->queue_family_index);
3871 if (cmd_buffer->queue_family_index == RADV_QUEUE_TRANSFER)
3874 if (cmd_buffer->queue_family_index == RADV_QUEUE_COMPUTE &&
3875 (src_family == RADV_QUEUE_GENERAL ||
3876 dst_family == RADV_QUEUE_GENERAL))
3880 unsigned src_queue_mask =
3881 radv_image_queue_family_mask(image, src_family,
3882 cmd_buffer->queue_family_index);
3883 unsigned dst_queue_mask =
3884 radv_image_queue_family_mask(image, dst_family,
3885 cmd_buffer->queue_family_index);
3887 if (vk_format_is_depth(image->vk_format)) {
3888 radv_handle_depth_image_transition(cmd_buffer, image,
3889 src_layout, dst_layout,
3890 src_queue_mask, dst_queue_mask,
3891 range, pending_clears);
3893 radv_handle_color_image_transition(cmd_buffer, image,
3894 src_layout, dst_layout,
3895 src_queue_mask, dst_queue_mask,
3900 void radv_CmdPipelineBarrier(
3901 VkCommandBuffer commandBuffer,
3902 VkPipelineStageFlags srcStageMask,
3903 VkPipelineStageFlags destStageMask,
3905 uint32_t memoryBarrierCount,
3906 const VkMemoryBarrier* pMemoryBarriers,
3907 uint32_t bufferMemoryBarrierCount,
3908 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
3909 uint32_t imageMemoryBarrierCount,
3910 const VkImageMemoryBarrier* pImageMemoryBarriers)
3912 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3913 enum radv_cmd_flush_bits src_flush_bits = 0;
3914 enum radv_cmd_flush_bits dst_flush_bits = 0;
3916 for (uint32_t i = 0; i < memoryBarrierCount; i++) {
3917 src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask);
3918 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
3922 for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
3923 src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask);
3924 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
3928 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3929 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
3930 src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask);
3931 dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
3935 radv_stage_flush(cmd_buffer, srcStageMask);
3936 cmd_buffer->state.flush_bits |= src_flush_bits;
3938 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
3939 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
3940 radv_handle_image_transition(cmd_buffer, image,
3941 pImageMemoryBarriers[i].oldLayout,
3942 pImageMemoryBarriers[i].newLayout,
3943 pImageMemoryBarriers[i].srcQueueFamilyIndex,
3944 pImageMemoryBarriers[i].dstQueueFamilyIndex,
3945 &pImageMemoryBarriers[i].subresourceRange,
3949 cmd_buffer->state.flush_bits |= dst_flush_bits;
3953 static void write_event(struct radv_cmd_buffer *cmd_buffer,
3954 struct radv_event *event,
3955 VkPipelineStageFlags stageMask,
3958 struct radeon_winsys_cs *cs = cmd_buffer->cs;
3959 uint64_t va = radv_buffer_get_va(event->bo);
3961 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
3963 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
3965 /* TODO: this is overkill. Probably should figure something out from
3966 * the stage mask. */
3968 si_cs_emit_write_event_eop(cs,
3969 cmd_buffer->state.predicating,
3970 cmd_buffer->device->physical_device->rad_info.chip_class,
3971 radv_cmd_buffer_uses_mec(cmd_buffer),
3972 V_028A90_BOTTOM_OF_PIPE_TS, 0,
3975 assert(cmd_buffer->cs->cdw <= cdw_max);
3978 void radv_CmdSetEvent(VkCommandBuffer commandBuffer,
3980 VkPipelineStageFlags stageMask)
3982 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3983 RADV_FROM_HANDLE(radv_event, event, _event);
3985 write_event(cmd_buffer, event, stageMask, 1);
3988 void radv_CmdResetEvent(VkCommandBuffer commandBuffer,
3990 VkPipelineStageFlags stageMask)
3992 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
3993 RADV_FROM_HANDLE(radv_event, event, _event);
3995 write_event(cmd_buffer, event, stageMask, 0);
3998 void radv_CmdWaitEvents(VkCommandBuffer commandBuffer,
3999 uint32_t eventCount,
4000 const VkEvent* pEvents,
4001 VkPipelineStageFlags srcStageMask,
4002 VkPipelineStageFlags dstStageMask,
4003 uint32_t memoryBarrierCount,
4004 const VkMemoryBarrier* pMemoryBarriers,
4005 uint32_t bufferMemoryBarrierCount,
4006 const VkBufferMemoryBarrier* pBufferMemoryBarriers,
4007 uint32_t imageMemoryBarrierCount,
4008 const VkImageMemoryBarrier* pImageMemoryBarriers)
4010 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
4011 struct radeon_winsys_cs *cs = cmd_buffer->cs;
4013 for (unsigned i = 0; i < eventCount; ++i) {
4014 RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
4015 uint64_t va = radv_buffer_get_va(event->bo);
4017 radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
4019 MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
4021 si_emit_wait_fence(cs, false, va, 1, 0xffffffff);
4022 assert(cmd_buffer->cs->cdw <= cdw_max);
4026 for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
4027 RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
4029 radv_handle_image_transition(cmd_buffer, image,
4030 pImageMemoryBarriers[i].oldLayout,
4031 pImageMemoryBarriers[i].newLayout,
4032 pImageMemoryBarriers[i].srcQueueFamilyIndex,
4033 pImageMemoryBarriers[i].dstQueueFamilyIndex,
4034 &pImageMemoryBarriers[i].subresourceRange,
4038 /* TODO: figure out how to do memory barriers without waiting */
4039 cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER |
4040 RADV_CMD_FLAG_INV_GLOBAL_L2 |
4041 RADV_CMD_FLAG_INV_VMEM_L1 |
4042 RADV_CMD_FLAG_INV_SMEM_L1;
4046 void radv_CmdSetDeviceMask(VkCommandBuffer commandBuffer,
4047 uint32_t deviceMask)