2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Marek Olšák <marek.olsak@amd.com>
27 /* Resource binding slots and sampler states (each described with 8 or
28 * 4 dwords) are stored in lists in memory which is accessed by shaders
29 * using scalar load instructions.
31 * This file is responsible for managing such lists. It keeps a copy of all
32 * descriptors in CPU memory and re-uploads a whole list if some slots have
35 * This code is also reponsible for updating shader pointers to those lists.
37 * Note that CP DMA can't be used for updating the lists, because a GPU hang
38 * could leave the list in a mid-IB state and the next IB would get wrong
39 * descriptors and the whole context would be unusable at that point.
40 * (Note: The register shadowing can't be used due to the same reason)
42 * Also, uploading descriptors to newly allocated memory doesn't require
46 #include "radeon/r600_cs.h"
48 #include "si_shader.h"
51 #include "util/u_memory.h"
52 #include "util/u_upload_mgr.h"
55 /* NULL image and buffer descriptor.
57 * For images, all fields must be zero except for the swizzle, which
58 * supports arbitrary combinations of 0s and 1s. The texture type must be
59 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
61 * For buffers, all fields must be zero. If they are not, the hw hangs.
63 * This is the only reason why the buffer descriptor must be in words [4:7].
65 static uint32_t null_descriptor[8] = {
69 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
70 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
71 /* the rest must contain zeros, which is also used by the buffer
75 static void si_init_descriptors(struct si_descriptors *desc,
76 unsigned shader_userdata_index,
77 unsigned element_dw_size,
78 unsigned num_elements)
82 assert(num_elements <= sizeof(desc->enabled_mask)*8);
84 desc->list = CALLOC(num_elements, element_dw_size * 4);
85 desc->element_dw_size = element_dw_size;
86 desc->num_elements = num_elements;
87 desc->list_dirty = true; /* upload the list before the next draw */
88 desc->shader_userdata_offset = shader_userdata_index * 4;
90 /* Initialize the array to NULL descriptors if the element size is 8. */
91 if (element_dw_size == 8)
92 for (i = 0; i < num_elements; i++)
93 memcpy(desc->list + i*element_dw_size, null_descriptor,
94 sizeof(null_descriptor));
97 static void si_release_descriptors(struct si_descriptors *desc)
99 pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
103 static bool si_upload_descriptors(struct si_context *sctx,
104 struct si_descriptors *desc)
106 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
109 if (!desc->list_dirty)
112 u_upload_alloc(sctx->b.uploader, 0, list_size,
113 &desc->buffer_offset,
114 (struct pipe_resource**)&desc->buffer, &ptr);
116 return false; /* skip the draw call */
118 util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
120 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, desc->buffer,
121 RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
123 desc->list_dirty = false;
124 desc->pointer_dirty = true;
125 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
131 static void si_release_sampler_views(struct si_sampler_views *views)
135 for (i = 0; i < Elements(views->views); i++) {
136 pipe_sampler_view_reference(&views->views[i], NULL);
138 si_release_descriptors(&views->desc);
141 static enum radeon_bo_priority si_get_resource_ro_priority(struct r600_resource *res)
143 if (res->b.b.target == PIPE_BUFFER)
144 return RADEON_PRIO_SHADER_BUFFER_RO;
146 if (res->b.b.nr_samples > 1)
147 return RADEON_PRIO_SHADER_TEXTURE_MSAA;
149 return RADEON_PRIO_SHADER_TEXTURE_RO;
152 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
153 struct si_sampler_views *views)
155 uint64_t mask = views->desc.enabled_mask;
157 /* Add relocations to the CS. */
159 int i = u_bit_scan64(&mask);
160 struct si_sampler_view *rview =
161 (struct si_sampler_view*)views->views[i];
163 if (!rview->resource)
166 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
167 rview->resource, RADEON_USAGE_READ,
168 si_get_resource_ro_priority(rview->resource));
171 if (!views->desc.buffer)
173 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, views->desc.buffer,
174 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
177 static void si_set_sampler_view(struct si_context *sctx, unsigned shader,
178 unsigned slot, struct pipe_sampler_view *view,
181 struct si_sampler_views *views = &sctx->samplers[shader].views;
183 if (views->views[slot] == view)
187 struct si_sampler_view *rview =
188 (struct si_sampler_view*)view;
191 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
192 rview->resource, RADEON_USAGE_READ,
193 si_get_resource_ro_priority(rview->resource));
195 pipe_sampler_view_reference(&views->views[slot], view);
196 memcpy(views->desc.list + slot*8, view_desc, 8*4);
197 views->desc.enabled_mask |= 1llu << slot;
199 pipe_sampler_view_reference(&views->views[slot], NULL);
200 memcpy(views->desc.list + slot*8, null_descriptor, 8*4);
201 views->desc.enabled_mask &= ~(1llu << slot);
204 views->desc.list_dirty = true;
207 static void si_set_sampler_views(struct pipe_context *ctx,
208 unsigned shader, unsigned start,
210 struct pipe_sampler_view **views)
212 struct si_context *sctx = (struct si_context *)ctx;
213 struct si_textures_info *samplers = &sctx->samplers[shader];
214 struct si_sampler_view **rviews = (struct si_sampler_view **)views;
217 if (!count || shader >= SI_NUM_SHADERS)
220 for (i = 0; i < count; i++) {
221 unsigned slot = start + i;
223 if (!views || !views[i]) {
224 samplers->depth_texture_mask &= ~(1 << slot);
225 samplers->compressed_colortex_mask &= ~(1 << slot);
226 si_set_sampler_view(sctx, shader, slot, NULL, NULL);
227 si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
232 si_set_sampler_view(sctx, shader, slot, views[i], rviews[i]->state);
234 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
235 struct r600_texture *rtex =
236 (struct r600_texture*)views[i]->texture;
238 if (rtex->is_depth && !rtex->is_flushing_texture) {
239 samplers->depth_texture_mask |= 1 << slot;
241 samplers->depth_texture_mask &= ~(1 << slot);
243 if (rtex->cmask.size || rtex->fmask.size) {
244 samplers->compressed_colortex_mask |= 1 << slot;
246 samplers->compressed_colortex_mask &= ~(1 << slot);
249 if (rtex->fmask.size) {
250 si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
251 views[i], rviews[i]->fmask_state);
253 si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
257 samplers->depth_texture_mask &= ~(1 << slot);
258 samplers->compressed_colortex_mask &= ~(1 << slot);
259 si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
267 static void si_sampler_states_begin_new_cs(struct si_context *sctx,
268 struct si_sampler_states *states)
270 if (!states->desc.buffer)
272 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, states->desc.buffer,
273 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
276 void si_set_sampler_descriptors(struct si_context *sctx, unsigned shader,
277 unsigned start, unsigned count, void **states)
279 struct si_sampler_states *samplers = &sctx->samplers[shader].states;
280 struct si_sampler_state **sstates = (struct si_sampler_state**)states;
284 samplers->saved_states[0] = states[0];
286 samplers->saved_states[1] = states[0];
287 else if (start == 0 && count >= 2)
288 samplers->saved_states[1] = states[1];
290 for (i = 0; i < count; i++) {
291 unsigned slot = start + i;
296 memcpy(samplers->desc.list + slot*4, sstates[i]->val, 4*4);
297 samplers->desc.list_dirty = true;
301 /* BUFFER RESOURCES */
303 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
304 unsigned num_buffers,
305 unsigned shader_userdata_index,
306 enum radeon_bo_usage shader_usage,
307 enum radeon_bo_priority priority)
309 buffers->shader_usage = shader_usage;
310 buffers->priority = priority;
311 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
313 si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
317 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
321 for (i = 0; i < buffers->desc.num_elements; i++) {
322 pipe_resource_reference(&buffers->buffers[i], NULL);
325 FREE(buffers->buffers);
326 si_release_descriptors(&buffers->desc);
329 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
330 struct si_buffer_resources *buffers)
332 uint64_t mask = buffers->desc.enabled_mask;
334 /* Add relocations to the CS. */
336 int i = u_bit_scan64(&mask);
338 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
339 (struct r600_resource*)buffers->buffers[i],
340 buffers->shader_usage, buffers->priority);
343 if (!buffers->desc.buffer)
345 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
346 buffers->desc.buffer, RADEON_USAGE_READWRITE,
347 RADEON_PRIO_SHADER_DATA);
352 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
354 struct si_descriptors *desc = &sctx->vertex_buffers;
355 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
358 for (i = 0; i < count; i++) {
359 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
361 if (vb >= Elements(sctx->vertex_buffer))
363 if (!sctx->vertex_buffer[vb].buffer)
366 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
367 (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
368 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
373 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
374 desc->buffer, RADEON_USAGE_READ,
375 RADEON_PRIO_SHADER_DATA);
378 static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
380 struct si_descriptors *desc = &sctx->vertex_buffers;
381 bool bound[SI_NUM_VERTEX_BUFFERS] = {};
382 unsigned i, count = sctx->vertex_elements->count;
386 if (!sctx->vertex_buffers_dirty)
388 if (!count || !sctx->vertex_elements)
391 /* Vertex buffer descriptors are the only ones which are uploaded
392 * directly through a staging buffer and don't go through
393 * the fine-grained upload path.
395 u_upload_alloc(sctx->b.uploader, 0, count * 16, &desc->buffer_offset,
396 (struct pipe_resource**)&desc->buffer, (void**)&ptr);
400 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
401 desc->buffer, RADEON_USAGE_READ,
402 RADEON_PRIO_SHADER_DATA);
404 assert(count <= SI_NUM_VERTEX_BUFFERS);
406 for (i = 0; i < count; i++) {
407 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
408 struct pipe_vertex_buffer *vb;
409 struct r600_resource *rbuffer;
411 uint32_t *desc = &ptr[i*4];
413 if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
418 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
419 rbuffer = (struct r600_resource*)vb->buffer;
420 if (rbuffer == NULL) {
425 offset = vb->buffer_offset + ve->src_offset;
426 va = rbuffer->gpu_address + offset;
428 /* Fill in T# buffer resource description */
430 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
431 S_008F04_STRIDE(vb->stride);
433 if (sctx->b.chip_class <= CIK && vb->stride)
434 /* Round up by rounding down and adding 1 */
435 desc[2] = (vb->buffer->width0 - offset -
436 sctx->vertex_elements->format_size[i]) /
439 desc[2] = vb->buffer->width0 - offset;
441 desc[3] = sctx->vertex_elements->rsrc_word3[i];
443 if (!bound[ve->vertex_buffer_index]) {
444 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
445 (struct r600_resource*)vb->buffer,
446 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
447 bound[ve->vertex_buffer_index] = true;
451 /* Don't flush the const cache. It would have a very negative effect
452 * on performance (confirmed by testing). New descriptors are always
453 * uploaded to a fresh new buffer, so I don't think flushing the const
454 * cache is needed. */
455 desc->pointer_dirty = true;
456 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
457 sctx->vertex_buffers_dirty = false;
462 /* CONSTANT BUFFERS */
464 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
465 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
469 u_upload_alloc(sctx->b.uploader, 0, size, const_offset,
470 (struct pipe_resource**)rbuffer, &tmp);
471 util_memcpy_cpu_to_le32(tmp, ptr, size);
474 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
475 struct pipe_constant_buffer *input)
477 struct si_context *sctx = (struct si_context *)ctx;
478 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
480 if (shader >= SI_NUM_SHADERS)
483 assert(slot < buffers->desc.num_elements);
484 pipe_resource_reference(&buffers->buffers[slot], NULL);
486 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
487 * with a NULL buffer). We need to use a dummy buffer instead. */
488 if (sctx->b.chip_class == CIK &&
489 (!input || (!input->buffer && !input->user_buffer)))
490 input = &sctx->null_const_buf;
492 if (input && (input->buffer || input->user_buffer)) {
493 struct pipe_resource *buffer = NULL;
496 /* Upload the user buffer if needed. */
497 if (input->user_buffer) {
498 unsigned buffer_offset;
500 si_upload_const_buffer(sctx,
501 (struct r600_resource**)&buffer, input->user_buffer,
502 input->buffer_size, &buffer_offset);
503 va = r600_resource(buffer)->gpu_address + buffer_offset;
505 pipe_resource_reference(&buffer, input->buffer);
506 va = r600_resource(buffer)->gpu_address + input->buffer_offset;
509 /* Set the descriptor. */
510 uint32_t *desc = buffers->desc.list + slot*4;
512 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
514 desc[2] = input->buffer_size;
515 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
516 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
517 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
518 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
519 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
520 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
522 buffers->buffers[slot] = buffer;
523 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
524 (struct r600_resource*)buffer,
525 buffers->shader_usage, buffers->priority);
526 buffers->desc.enabled_mask |= 1llu << slot;
528 /* Clear the descriptor. */
529 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
530 buffers->desc.enabled_mask &= ~(1llu << slot);
533 buffers->desc.list_dirty = true;
538 void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
539 struct pipe_resource *buffer,
540 unsigned stride, unsigned num_records,
541 bool add_tid, bool swizzle,
542 unsigned element_size, unsigned index_stride, uint64_t offset)
544 struct si_context *sctx = (struct si_context *)ctx;
545 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
547 if (shader >= SI_NUM_SHADERS)
550 /* The stride field in the resource descriptor has 14 bits */
551 assert(stride < (1 << 14));
553 assert(slot < buffers->desc.num_elements);
554 pipe_resource_reference(&buffers->buffers[slot], NULL);
559 va = r600_resource(buffer)->gpu_address + offset;
561 switch (element_size) {
563 assert(!"Unsupported ring buffer element size");
579 switch (index_stride) {
581 assert(!"Unsupported ring buffer index stride");
597 if (sctx->b.chip_class >= VI && stride)
598 num_records *= stride;
600 /* Set the descriptor. */
601 uint32_t *desc = buffers->desc.list + slot*4;
603 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
604 S_008F04_STRIDE(stride) |
605 S_008F04_SWIZZLE_ENABLE(swizzle);
606 desc[2] = num_records;
607 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
608 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
609 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
610 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
611 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
612 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
613 S_008F0C_ELEMENT_SIZE(element_size) |
614 S_008F0C_INDEX_STRIDE(index_stride) |
615 S_008F0C_ADD_TID_ENABLE(add_tid);
617 pipe_resource_reference(&buffers->buffers[slot], buffer);
618 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
619 (struct r600_resource*)buffer,
620 buffers->shader_usage, buffers->priority);
621 buffers->desc.enabled_mask |= 1llu << slot;
623 /* Clear the descriptor. */
624 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
625 buffers->desc.enabled_mask &= ~(1llu << slot);
628 buffers->desc.list_dirty = true;
631 /* STREAMOUT BUFFERS */
633 static void si_set_streamout_targets(struct pipe_context *ctx,
634 unsigned num_targets,
635 struct pipe_stream_output_target **targets,
636 const unsigned *offsets)
638 struct si_context *sctx = (struct si_context *)ctx;
639 struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
640 unsigned old_num_targets = sctx->b.streamout.num_targets;
643 /* We are going to unbind the buffers. Mark which caches need to be flushed. */
644 if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
645 /* Since streamout uses vector writes which go through TC L2
646 * and most other clients can use TC L2 as well, we don't need
649 * The only case which requires flushing it is VGT DMA index
650 * fetching, which is a rare case. Thus, flag the TC L2
651 * dirtiness in the resource and handle it when index fetching
654 for (i = 0; i < sctx->b.streamout.num_targets; i++)
655 if (sctx->b.streamout.targets[i])
656 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
658 /* Invalidate the scalar cache in case a streamout buffer is
659 * going to be used as a constant buffer.
661 * Invalidate TC L1, because streamout bypasses it (done by
662 * setting GLC=1 in the store instruction), but it can contain
663 * outdated data of streamout buffers.
665 * VS_PARTIAL_FLUSH is required if the buffers are going to be
666 * used as an input immediately.
668 sctx->b.flags |= SI_CONTEXT_INV_KCACHE |
669 SI_CONTEXT_INV_TC_L1 |
670 SI_CONTEXT_VS_PARTIAL_FLUSH;
673 /* Streamout buffers must be bound in 2 places:
674 * 1) in VGT by setting the VGT_STRMOUT registers
675 * 2) as shader resources
678 /* Set the VGT regs. */
679 r600_set_streamout_targets(ctx, num_targets, targets, offsets);
681 /* Set the shader resources.*/
682 for (i = 0; i < num_targets; i++) {
683 bufidx = SI_SO_BUF_OFFSET + i;
686 struct pipe_resource *buffer = targets[i]->buffer;
687 uint64_t va = r600_resource(buffer)->gpu_address;
689 /* Set the descriptor.
691 * On VI, the format must be non-INVALID, otherwise
692 * the buffer will be considered not bound and store
693 * instructions will be no-ops.
695 uint32_t *desc = buffers->desc.list + bufidx*4;
697 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
698 desc[2] = 0xffffffff;
699 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
700 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
701 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
702 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
703 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
705 /* Set the resource. */
706 pipe_resource_reference(&buffers->buffers[bufidx],
708 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
709 (struct r600_resource*)buffer,
710 buffers->shader_usage, buffers->priority);
711 buffers->desc.enabled_mask |= 1llu << bufidx;
713 /* Clear the descriptor and unset the resource. */
714 memset(buffers->desc.list + bufidx*4, 0,
715 sizeof(uint32_t) * 4);
716 pipe_resource_reference(&buffers->buffers[bufidx],
718 buffers->desc.enabled_mask &= ~(1llu << bufidx);
721 for (; i < old_num_targets; i++) {
722 bufidx = SI_SO_BUF_OFFSET + i;
723 /* Clear the descriptor and unset the resource. */
724 memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
725 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
726 buffers->desc.enabled_mask &= ~(1llu << bufidx);
729 buffers->desc.list_dirty = true;
732 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
733 uint32_t *desc, uint64_t old_buf_va,
734 struct pipe_resource *new_buf)
736 /* Retrieve the buffer offset from the descriptor. */
737 uint64_t old_desc_va =
738 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
740 assert(old_buf_va <= old_desc_va);
741 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
743 /* Update the descriptor. */
744 uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
747 desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
748 S_008F04_BASE_ADDRESS_HI(va >> 32);
751 /* BUFFER DISCARD/INVALIDATION */
753 /* Reallocate a buffer a update all resource bindings where the buffer is
756 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
757 * idle by discarding its contents. Apps usually tell us when to do this using
758 * map_buffer flags, for example.
760 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
762 struct si_context *sctx = (struct si_context*)ctx;
763 struct r600_resource *rbuffer = r600_resource(buf);
764 unsigned i, shader, alignment = rbuffer->buf->alignment;
765 uint64_t old_va = rbuffer->gpu_address;
766 unsigned num_elems = sctx->vertex_elements ?
767 sctx->vertex_elements->count : 0;
768 struct si_sampler_view *view;
770 /* Reallocate the buffer in the same pipe_resource. */
771 r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
774 /* We changed the buffer, now we need to bind it where the old one
775 * was bound. This consists of 2 things:
776 * 1) Updating the resource descriptor and dirtying it.
777 * 2) Adding a relocation to the CS, so that it's usable.
780 /* Vertex buffers. */
781 for (i = 0; i < num_elems; i++) {
782 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
784 if (vb >= Elements(sctx->vertex_buffer))
786 if (!sctx->vertex_buffer[vb].buffer)
789 if (sctx->vertex_buffer[vb].buffer == buf) {
790 sctx->vertex_buffers_dirty = true;
795 /* Read/Write buffers. */
796 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
797 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
798 uint64_t mask = buffers->desc.enabled_mask;
801 i = u_bit_scan64(&mask);
802 if (buffers->buffers[i] == buf) {
803 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
805 buffers->desc.list_dirty = true;
807 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
808 rbuffer, buffers->shader_usage,
811 if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
812 /* Update the streamout state. */
813 if (sctx->b.streamout.begin_emitted) {
814 r600_emit_streamout_end(&sctx->b);
816 sctx->b.streamout.append_bitmask =
817 sctx->b.streamout.enabled_mask;
818 r600_streamout_buffers_dirty(&sctx->b);
824 /* Constant buffers. */
825 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
826 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
827 uint64_t mask = buffers->desc.enabled_mask;
830 unsigned i = u_bit_scan64(&mask);
831 if (buffers->buffers[i] == buf) {
832 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
834 buffers->desc.list_dirty = true;
836 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
837 rbuffer, buffers->shader_usage,
843 /* Texture buffers - update virtual addresses in sampler view descriptors. */
844 LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
845 if (view->base.texture == buf) {
846 si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
849 /* Texture buffers - update bindings. */
850 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
851 struct si_sampler_views *views = &sctx->samplers[shader].views;
852 uint64_t mask = views->desc.enabled_mask;
855 unsigned i = u_bit_scan64(&mask);
856 if (views->views[i]->texture == buf) {
857 si_desc_reset_buffer_offset(ctx, views->desc.list + i*8+4,
859 views->desc.list_dirty = true;
861 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
862 rbuffer, RADEON_USAGE_READ,
863 RADEON_PRIO_SHADER_BUFFER_RO);
869 /* SHADER USER DATA */
871 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
874 sctx->const_buffers[shader].desc.pointer_dirty = true;
875 sctx->rw_buffers[shader].desc.pointer_dirty = true;
876 sctx->samplers[shader].views.desc.pointer_dirty = true;
877 sctx->samplers[shader].states.desc.pointer_dirty = true;
879 if (shader == PIPE_SHADER_VERTEX)
880 sctx->vertex_buffers.pointer_dirty = true;
882 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
885 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
889 for (i = 0; i < SI_NUM_SHADERS; i++) {
890 si_mark_shader_pointers_dirty(sctx, i);
894 /* Set a base register address for user data constants in the given shader.
895 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
897 static void si_set_user_data_base(struct si_context *sctx,
898 unsigned shader, uint32_t new_base)
900 uint32_t *base = &sctx->shader_userdata.sh_base[shader];
902 if (*base != new_base) {
906 si_mark_shader_pointers_dirty(sctx, shader);
910 /* This must be called when these shaders are changed from non-NULL to NULL
913 * - tessellation control shader
914 * - tessellation evaluation shader
916 void si_shader_change_notify(struct si_context *sctx)
918 /* VS can be bound as VS, ES, or LS. */
919 if (sctx->tes_shader)
920 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
921 R_00B530_SPI_SHADER_USER_DATA_LS_0);
922 else if (sctx->gs_shader)
923 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
924 R_00B330_SPI_SHADER_USER_DATA_ES_0);
926 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
927 R_00B130_SPI_SHADER_USER_DATA_VS_0);
929 /* TES can be bound as ES, VS, or not bound. */
930 if (sctx->tes_shader) {
932 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
933 R_00B330_SPI_SHADER_USER_DATA_ES_0);
935 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
936 R_00B130_SPI_SHADER_USER_DATA_VS_0);
938 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
942 static void si_emit_shader_pointer(struct si_context *sctx,
943 struct si_descriptors *desc,
944 unsigned sh_base, bool keep_dirty)
946 struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
949 if (!desc->pointer_dirty || !desc->buffer)
952 va = desc->buffer->gpu_address +
955 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
956 radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
958 radeon_emit(cs, va >> 32);
960 desc->pointer_dirty = keep_dirty;
963 void si_emit_shader_userdata(struct si_context *sctx, struct r600_atom *atom)
966 uint32_t *sh_base = sctx->shader_userdata.sh_base;
968 if (sctx->gs_shader) {
969 /* The VS copy shader needs these for clipping, streamout, and rings. */
970 unsigned vs_base = R_00B130_SPI_SHADER_USER_DATA_VS_0;
971 unsigned i = PIPE_SHADER_VERTEX;
973 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, vs_base, true);
974 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, vs_base, true);
976 /* The TESSEVAL shader needs this for the ESGS ring buffer. */
977 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc,
978 R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
979 } else if (sctx->tes_shader) {
980 /* The TESSEVAL shader needs this for streamout. */
981 si_emit_shader_pointer(sctx, &sctx->rw_buffers[PIPE_SHADER_VERTEX].desc,
982 R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
985 for (i = 0; i < SI_NUM_SHADERS; i++) {
986 unsigned base = sh_base[i];
991 if (i != PIPE_SHADER_TESS_EVAL)
992 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, base, false);
994 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
995 si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
996 si_emit_shader_pointer(sctx, &sctx->samplers[i].states.desc, base, false);
998 si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1001 /* INIT/DEINIT/UPLOAD */
1003 void si_init_all_descriptors(struct si_context *sctx)
1007 for (i = 0; i < SI_NUM_SHADERS; i++) {
1008 si_init_buffer_resources(&sctx->const_buffers[i],
1009 SI_NUM_CONST_BUFFERS, SI_SGPR_CONST,
1010 RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
1011 si_init_buffer_resources(&sctx->rw_buffers[i],
1012 SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1013 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW);
1015 si_init_descriptors(&sctx->samplers[i].views.desc,
1016 SI_SGPR_RESOURCE, 8, SI_NUM_SAMPLER_VIEWS);
1017 si_init_descriptors(&sctx->samplers[i].states.desc,
1018 SI_SGPR_SAMPLER, 4, SI_NUM_SAMPLER_STATES);
1021 si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFER,
1022 4, SI_NUM_VERTEX_BUFFERS);
1024 /* Set pipe_context functions. */
1025 sctx->b.b.set_constant_buffer = si_set_constant_buffer;
1026 sctx->b.b.set_sampler_views = si_set_sampler_views;
1027 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1028 sctx->b.invalidate_buffer = si_invalidate_buffer;
1030 /* Shader user data.
1031 * The number of dwords is set to the upper bound:
1032 * 4 pointers per shader, +1 for vertex buffers, +2 for the VS copy shader.
1034 si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1035 si_emit_shader_userdata, (SI_NUM_SHADERS * 4 + 1 + 2) * 4);
1037 /* Set default and immutable mappings. */
1038 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1039 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1040 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1041 si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1044 bool si_upload_shader_descriptors(struct si_context *sctx)
1048 for (i = 0; i < SI_NUM_SHADERS; i++) {
1049 if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc) ||
1050 !si_upload_descriptors(sctx, &sctx->rw_buffers[i].desc) ||
1051 !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc) ||
1052 !si_upload_descriptors(sctx, &sctx->samplers[i].states.desc))
1055 return si_upload_vertex_buffer_descriptors(sctx);
1058 void si_release_all_descriptors(struct si_context *sctx)
1062 for (i = 0; i < SI_NUM_SHADERS; i++) {
1063 si_release_buffer_resources(&sctx->const_buffers[i]);
1064 si_release_buffer_resources(&sctx->rw_buffers[i]);
1065 si_release_sampler_views(&sctx->samplers[i].views);
1066 si_release_descriptors(&sctx->samplers[i].states.desc);
1068 si_release_descriptors(&sctx->vertex_buffers);
1071 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1075 for (i = 0; i < SI_NUM_SHADERS; i++) {
1076 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1077 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
1078 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1079 si_sampler_states_begin_new_cs(sctx, &sctx->samplers[i].states);
1081 si_vertex_buffers_begin_new_cs(sctx);
1082 si_shader_userdata_begin_new_cs(sctx);