MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cmd_buffer->cs, 7);
++cmd_buffer->state.trace_id;
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
radv_emit_write_data_packet(cs, va, 1, &cmd_buffer->state.trace_id);
radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
radeon_emit(cs, AC_ENCODE_TRACE_POINT(cmd_buffer->state.trace_id));
data[0] = (uintptr_t)pipeline;
data[1] = (uintptr_t)pipeline >> 32;
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
radv_emit_write_data_packet(cs, va, 2, data);
}
data[i * 2 + 1] = (uintptr_t)set >> 32;
}
- radv_cs_add_buffer(device->ws, cs, device->trace_bo, 8);
radv_emit_write_data_packet(cs, va, MAX_SETS * 2, data);
}
uint32_t sh_base = pipeline->user_data_0[stage];
struct radv_userdata_locations *locs =
&pipeline->shaders[stage]->info.user_sgprs_locs;
- unsigned mask;
+ unsigned mask = locs->descriptor_sets_enabled;
- mask = descriptors_state->dirty & descriptors_state->valid;
-
- for (int i = 0; i < MAX_SETS; i++) {
- struct radv_userdata_info *loc = &locs->descriptor_sets[i];
- if (loc->sgpr_idx != -1 && !loc->indirect)
- continue;
- mask &= ~(1 << i);
- }
+ mask &= descriptors_state->dirty & descriptors_state->valid;
while (mask) {
int start, count;
struct radv_pipeline *pipeline = stages & VK_SHADER_STAGE_COMPUTE_BIT
? cmd_buffer->state.compute_pipeline
: cmd_buffer->state.pipeline;
+ VkPipelineBindPoint bind_point = stages & VK_SHADER_STAGE_COMPUTE_BIT ?
+ VK_PIPELINE_BIND_POINT_COMPUTE :
+ VK_PIPELINE_BIND_POINT_GRAPHICS;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, bind_point);
struct radv_pipeline_layout *layout = pipeline->layout;
struct radv_shader_variant *shader, *prev_shader;
unsigned offset;
return;
memcpy(ptr, cmd_buffer->push_constants, layout->push_constant_size);
- memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
+ memcpy((char*)ptr + layout->push_constant_size,
+ descriptors_state->dynamic_buffers,
16 * layout->dynamic_offset_count);
va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
static enum radv_cmd_flush_bits
radv_src_access_flush(struct radv_cmd_buffer *cmd_buffer,
- VkAccessFlags src_flags)
+ VkAccessFlags src_flags,
+ struct radv_image *image)
{
enum radv_cmd_flush_bits flush_bits = 0;
uint32_t b;
flush_bits |= RADV_CMD_FLAG_WRITEBACK_GLOBAL_L2;
break;
case VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT:
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
- RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB;
+ if (!image || (image && radv_image_has_CB_metadata(image))) {
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB_META;
+ }
break;
case VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT:
- flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB |
- RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB;
+ if (!image || (image && radv_image_has_htile(image))) {
+ flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_DB_META;
+ }
break;
case VK_ACCESS_TRANSFER_WRITE_BIT:
flush_bits |= RADV_CMD_FLAG_FLUSH_AND_INV_CB |
static void radv_subpass_barrier(struct radv_cmd_buffer *cmd_buffer, const struct radv_subpass_barrier *barrier)
{
- cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask);
+ cmd_buffer->state.flush_bits |= radv_src_access_flush(cmd_buffer, barrier->src_access_mask,
+ NULL);
radv_stage_flush(cmd_buffer, barrier->src_stage_mask);
cmd_buffer->state.flush_bits |= radv_dst_access_flush(cmd_buffer, barrier->dst_access_mask,
NULL);
}
static void radv_handle_subpass_image_transition(struct radv_cmd_buffer *cmd_buffer,
- VkAttachmentReference att)
+ struct radv_subpass_attachment att)
{
unsigned idx = att.attachment;
struct radv_image_view *view = cmd_buffer->state.framebuffer->attachments[idx].attachment;
radv_cmd_buffer_set_subpass(cmd_buffer, subpass, false);
}
- if (unlikely(cmd_buffer->device->trace_bo))
+ if (unlikely(cmd_buffer->device->trace_bo)) {
+ struct radv_device *device = cmd_buffer->device;
+
+ radv_cs_add_buffer(device->ws, cmd_buffer->cs,
+ device->trace_bo, 8);
+
radv_cmd_buffer_trace_emit(cmd_buffer);
+ }
cmd_buffer->status = RADV_CMD_BUFFER_STATUS_RECORDING;
unsigned dyn_idx = 0;
const bool no_dynamic_bounds = cmd_buffer->device->instance->debug_flags & RADV_DEBUG_NO_DYNAMIC_BOUNDS;
+ struct radv_descriptor_state *descriptors_state =
+ radv_get_descriptors_state(cmd_buffer, pipelineBindPoint);
for (unsigned i = 0; i < descriptorSetCount; ++i) {
unsigned idx = i + firstSet;
for(unsigned j = 0; j < set->layout->dynamic_offset_count; ++j, ++dyn_idx) {
unsigned idx = j + layout->set[i + firstSet].dynamic_offset_start;
- uint32_t *dst = cmd_buffer->dynamic_buffers + idx * 4;
+ uint32_t *dst = descriptors_state->dynamic_buffers + idx * 4;
assert(dyn_idx < dynamicOffsetCount);
struct radv_descriptor_range *range = set->dynamic_descriptors + j;
si_emit_cache_flush(cmd_buffer);
}
+ /* Make sure CP DMA is idle at the end of IBs because the kernel
+ * doesn't wait for it.
+ */
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
if (!cmd_buffer->device->ws->cs_finalize(cmd_buffer->cs))
{
struct radv_pipeline *pipeline = cmd_buffer->state.pipeline;
for (unsigned stage = 0; stage < MESA_SHADER_STAGES; ++stage) {
- if (!pipeline->shaders[stage])
+ if (!radv_get_shader(pipeline, stage))
continue;
+
struct radv_userdata_info *loc = radv_lookup_user_sgpr(pipeline, stage, AC_UD_VIEW_INDEX);
if (loc->sgpr_idx == -1)
continue;
for (unsigned i = 0; i < cmd_buffer->state.framebuffer->attachment_count; ++i) {
VkImageLayout layout = cmd_buffer->state.pass->attachments[i].final_layout;
radv_handle_subpass_image_transition(cmd_buffer,
- (VkAttachmentReference){i, layout});
+ (struct radv_subpass_attachment){i, layout});
}
vk_free(&cmd_buffer->pool->alloc, cmd_buffer->state.attachments);
}
}
-void radv_CmdPipelineBarrier(
- VkCommandBuffer commandBuffer,
- VkPipelineStageFlags srcStageMask,
- VkPipelineStageFlags destStageMask,
- VkBool32 byRegion,
- uint32_t memoryBarrierCount,
- const VkMemoryBarrier* pMemoryBarriers,
- uint32_t bufferMemoryBarrierCount,
- const VkBufferMemoryBarrier* pBufferMemoryBarriers,
- uint32_t imageMemoryBarrierCount,
- const VkImageMemoryBarrier* pImageMemoryBarriers)
+struct radv_barrier_info {
+ uint32_t eventCount;
+ const VkEvent *pEvents;
+ VkPipelineStageFlags srcStageMask;
+};
+
+static void
+radv_barrier(struct radv_cmd_buffer *cmd_buffer,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier *pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier *pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier *pImageMemoryBarriers,
+ const struct radv_barrier_info *info)
{
- RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radeon_cmdbuf *cs = cmd_buffer->cs;
enum radv_cmd_flush_bits src_flush_bits = 0;
enum radv_cmd_flush_bits dst_flush_bits = 0;
+ for (unsigned i = 0; i < info->eventCount; ++i) {
+ RADV_FROM_HANDLE(radv_event, event, info->pEvents[i]);
+ uint64_t va = radv_buffer_get_va(event->bo);
+
+ radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
+
+ MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
+
+ si_emit_wait_fence(cs, va, 1, 0xffffffff);
+ assert(cmd_buffer->cs->cdw <= cdw_max);
+ }
+
for (uint32_t i = 0; i < memoryBarrierCount; i++) {
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask);
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pMemoryBarriers[i].srcAccessMask,
+ NULL);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pMemoryBarriers[i].dstAccessMask,
NULL);
}
for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask);
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pBufferMemoryBarriers[i].srcAccessMask,
+ NULL);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pBufferMemoryBarriers[i].dstAccessMask,
NULL);
}
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
- src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask);
+
+ src_flush_bits |= radv_src_access_flush(cmd_buffer, pImageMemoryBarriers[i].srcAccessMask,
+ image);
dst_flush_bits |= radv_dst_access_flush(cmd_buffer, pImageMemoryBarriers[i].dstAccessMask,
image);
}
- radv_stage_flush(cmd_buffer, srcStageMask);
+ radv_stage_flush(cmd_buffer, info->srcStageMask);
cmd_buffer->state.flush_bits |= src_flush_bits;
for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
0);
}
+ /* Make sure CP DMA is idle because the driver might have performed a
+ * DMA operation for copying or filling buffers/images.
+ */
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
cmd_buffer->state.flush_bits |= dst_flush_bits;
}
+void radv_CmdPipelineBarrier(
+ VkCommandBuffer commandBuffer,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags destStageMask,
+ VkBool32 byRegion,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers)
+{
+ RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
+ struct radv_barrier_info info;
+
+ info.eventCount = 0;
+ info.pEvents = NULL;
+ info.srcStageMask = srcStageMask;
+
+ radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers, &info);
+}
+
static void write_event(struct radv_cmd_buffer *cmd_buffer,
struct radv_event *event,
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 18);
- /* TODO: this is overkill. Probably should figure something out from
- * the stage mask. */
+ /* Flags that only require a top-of-pipe event. */
+ VkPipelineStageFlags top_of_pipe_flags =
+ VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT;
+
+ /* Flags that only require a post-index-fetch event. */
+ VkPipelineStageFlags post_index_fetch_flags =
+ top_of_pipe_flags |
+ VK_PIPELINE_STAGE_DRAW_INDIRECT_BIT |
+ VK_PIPELINE_STAGE_VERTEX_INPUT_BIT;
+
+ /* Make sure CP DMA is idle because the driver might have performed a
+ * DMA operation for copying or filling buffers/images.
+ */
+ si_cp_dma_wait_for_idle(cmd_buffer);
+
+ /* TODO: Emit EOS events for syncing PS/CS stages. */
- si_cs_emit_write_event_eop(cs,
- cmd_buffer->state.predicating,
- cmd_buffer->device->physical_device->rad_info.chip_class,
- radv_cmd_buffer_uses_mec(cmd_buffer),
- V_028A90_BOTTOM_OF_PIPE_TS, 0,
- EOP_DATA_SEL_VALUE_32BIT, va, 2, value);
+ if (!(stageMask & ~top_of_pipe_flags)) {
+ /* Just need to sync the PFP engine. */
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_PFP));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, value);
+ } else if (!(stageMask & ~post_index_fetch_flags)) {
+ /* Sync ME because PFP reads index and indirect buffers. */
+ radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
+ radeon_emit(cs, S_370_DST_SEL(V_370_MEM_ASYNC) |
+ S_370_WR_CONFIRM(1) |
+ S_370_ENGINE_SEL(V_370_ME));
+ radeon_emit(cs, va);
+ radeon_emit(cs, va >> 32);
+ radeon_emit(cs, value);
+ } else {
+ /* Otherwise, sync all prior GPU work using an EOP event. */
+ si_cs_emit_write_event_eop(cs,
+ cmd_buffer->device->physical_device->rad_info.chip_class,
+ radv_cmd_buffer_uses_mec(cmd_buffer),
+ V_028A90_BOTTOM_OF_PIPE_TS, 0,
+ EOP_DATA_SEL_VALUE_32BIT, va, 2, value);
+ }
assert(cmd_buffer->cs->cdw <= cdw_max);
}
const VkImageMemoryBarrier* pImageMemoryBarriers)
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
- struct radeon_cmdbuf *cs = cmd_buffer->cs;
-
- for (unsigned i = 0; i < eventCount; ++i) {
- RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
- uint64_t va = radv_buffer_get_va(event->bo);
-
- radv_cs_add_buffer(cmd_buffer->device->ws, cs, event->bo, 8);
-
- MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws, cs, 7);
-
- si_emit_wait_fence(cs, false, va, 1, 0xffffffff);
- assert(cmd_buffer->cs->cdw <= cdw_max);
- }
+ struct radv_barrier_info info;
+ info.eventCount = eventCount;
+ info.pEvents = pEvents;
+ info.srcStageMask = 0;
- for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
- RADV_FROM_HANDLE(radv_image, image, pImageMemoryBarriers[i].image);
-
- radv_handle_image_transition(cmd_buffer, image,
- pImageMemoryBarriers[i].oldLayout,
- pImageMemoryBarriers[i].newLayout,
- pImageMemoryBarriers[i].srcQueueFamilyIndex,
- pImageMemoryBarriers[i].dstQueueFamilyIndex,
- &pImageMemoryBarriers[i].subresourceRange,
- 0);
- }
-
- /* TODO: figure out how to do memory barriers without waiting */
- cmd_buffer->state.flush_bits |= RADV_CMD_FLUSH_AND_INV_FRAMEBUFFER |
- RADV_CMD_FLAG_INV_GLOBAL_L2 |
- RADV_CMD_FLAG_INV_VMEM_L1 |
- RADV_CMD_FLAG_INV_SMEM_L1;
+ radv_barrier(cmd_buffer, memoryBarrierCount, pMemoryBarriers,
+ bufferMemoryBarrierCount, pBufferMemoryBarriers,
+ imageMemoryBarrierCount, pImageMemoryBarriers, &info);
}