2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Marek Olšák <marek.olsak@amd.com>
27 /* Resource binding slots and sampler states (each described with 8 or
28 * 4 dwords) are stored in lists in memory which is accessed by shaders
29 * using scalar load instructions.
31 * This file is responsible for managing such lists. It keeps a copy of all
32 * descriptors in CPU memory and re-uploads a whole list if some slots have
35 * This code is also reponsible for updating shader pointers to those lists.
37 * Note that CP DMA can't be used for updating the lists, because a GPU hang
38 * could leave the list in a mid-IB state and the next IB would get wrong
39 * descriptors and the whole context would be unusable at that point.
40 * (Note: The register shadowing can't be used due to the same reason)
42 * Also, uploading descriptors to newly allocated memory doesn't require
46 * Possible scenarios for one 16 dword image+sampler slot:
48 * | Image | w/ FMASK | Buffer | NULL
49 * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
50 * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
51 * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
52 * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
54 * FMASK implies MSAA, therefore no sampler state.
55 * Sampler states are never unbound except when FMASK is bound.
58 #include "radeon/r600_cs.h"
60 #include "si_shader.h"
63 #include "util/u_memory.h"
64 #include "util/u_suballoc.h"
65 #include "util/u_upload_mgr.h"
68 /* NULL image and buffer descriptor for textures (alpha = 1) and images
71 * For images, all fields must be zero except for the swizzle, which
72 * supports arbitrary combinations of 0s and 1s. The texture type must be
73 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
75 * For buffers, all fields must be zero. If they are not, the hw hangs.
77 * This is the only reason why the buffer descriptor must be in words [4:7].
79 static uint32_t null_texture_descriptor[8] = {
83 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
84 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
85 /* the rest must contain zeros, which is also used by the buffer
89 static uint32_t null_image_descriptor[8] = {
93 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
94 /* the rest must contain zeros, which is also used by the buffer
98 static void si_init_descriptors(struct si_descriptors *desc,
99 unsigned shader_userdata_index,
100 unsigned element_dw_size,
101 unsigned num_elements,
102 const uint32_t *null_descriptor,
107 assert(num_elements <= sizeof(desc->enabled_mask)*8);
109 desc->list = CALLOC(num_elements, element_dw_size * 4);
110 desc->element_dw_size = element_dw_size;
111 desc->num_elements = num_elements;
112 desc->list_dirty = true; /* upload the list before the next draw */
113 desc->shader_userdata_offset = shader_userdata_index * 4;
116 desc->ce_offset = *ce_offset;
118 /* make sure that ce_offset stays 32 byte aligned */
119 *ce_offset += align(element_dw_size * num_elements * 4, 32);
122 /* Initialize the array to NULL descriptors if the element size is 8. */
123 if (null_descriptor) {
124 assert(element_dw_size % 8 == 0);
125 for (i = 0; i < num_elements * element_dw_size / 8; i++)
126 memcpy(desc->list + i * 8, null_descriptor,
131 static void si_release_descriptors(struct si_descriptors *desc)
133 pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
137 static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
138 unsigned *out_offset, struct r600_resource **out_buf) {
141 u_suballocator_alloc(sctx->ce_suballocator, size, out_offset,
142 (struct pipe_resource**)out_buf);
146 va = (*out_buf)->gpu_address + *out_offset;
148 radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
149 radeon_emit(sctx->ce_ib, ce_offset);
150 radeon_emit(sctx->ce_ib, size / 4);
151 radeon_emit(sctx->ce_ib, va);
152 radeon_emit(sctx->ce_ib, va >> 32);
154 sctx->ce_need_synchronization = true;
159 static bool si_upload_descriptors(struct si_context *sctx,
160 struct si_descriptors *desc)
162 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
165 if (!desc->list_dirty)
168 u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
169 &desc->buffer_offset,
170 (struct pipe_resource**)&desc->buffer, &ptr);
172 return false; /* skip the draw call */
174 util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
176 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
177 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
179 desc->list_dirty = false;
180 desc->pointer_dirty = true;
181 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
187 static void si_release_sampler_views(struct si_sampler_views *views)
191 for (i = 0; i < Elements(views->views); i++) {
192 pipe_sampler_view_reference(&views->views[i], NULL);
194 si_release_descriptors(&views->desc);
197 static void si_sampler_view_add_buffer(struct si_context *sctx,
198 struct pipe_resource *resource)
200 struct r600_resource *rres = (struct r600_resource*)resource;
205 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres,
207 r600_get_sampler_view_priority(rres));
210 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
211 struct si_sampler_views *views)
213 uint64_t mask = views->desc.enabled_mask;
215 /* Add buffers to the CS. */
217 int i = u_bit_scan64(&mask);
219 si_sampler_view_add_buffer(sctx, views->views[i]->texture);
222 if (!views->desc.buffer)
224 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
225 RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
228 static void si_set_sampler_view(struct si_context *sctx,
229 struct si_sampler_views *views,
230 unsigned slot, struct pipe_sampler_view *view)
232 struct si_sampler_view *rview = (struct si_sampler_view*)view;
234 if (view && view->texture && view->texture->target != PIPE_BUFFER &&
235 G_008F28_COMPRESSION_EN(rview->state[6]) &&
236 ((struct r600_texture*)view->texture)->dcc_offset == 0) {
237 rview->state[6] &= C_008F28_COMPRESSION_EN &
238 C_008F28_ALPHA_IS_ON_MSB;
239 } else if (views->views[slot] == view)
243 struct r600_texture *rtex = (struct r600_texture *)view->texture;
245 si_sampler_view_add_buffer(sctx, view->texture);
247 pipe_sampler_view_reference(&views->views[slot], view);
248 memcpy(views->desc.list + slot * 16, rview->state, 8*4);
250 if (view->texture && view->texture->target != PIPE_BUFFER &&
252 memcpy(views->desc.list + slot*16 + 8,
253 rview->fmask_state, 8*4);
255 /* Disable FMASK and bind sampler state in [12:15]. */
256 memcpy(views->desc.list + slot*16 + 8,
257 null_texture_descriptor, 4*4);
259 if (views->sampler_states[slot])
260 memcpy(views->desc.list + slot*16 + 12,
261 views->sampler_states[slot], 4*4);
264 views->desc.enabled_mask |= 1llu << slot;
266 pipe_sampler_view_reference(&views->views[slot], NULL);
267 memcpy(views->desc.list + slot*16, null_texture_descriptor, 8*4);
268 /* Only clear the lower dwords of FMASK. */
269 memcpy(views->desc.list + slot*16 + 8, null_texture_descriptor, 4*4);
270 views->desc.enabled_mask &= ~(1llu << slot);
273 views->desc.list_dirty = true;
276 static bool is_compressed_colortex(struct r600_texture *rtex)
278 return rtex->cmask.size || rtex->fmask.size ||
279 (rtex->dcc_offset && rtex->dirty_level_mask);
282 static void si_set_sampler_views(struct pipe_context *ctx,
283 unsigned shader, unsigned start,
285 struct pipe_sampler_view **views)
287 struct si_context *sctx = (struct si_context *)ctx;
288 struct si_textures_info *samplers = &sctx->samplers[shader];
291 if (!count || shader >= SI_NUM_SHADERS)
294 for (i = 0; i < count; i++) {
295 unsigned slot = start + i;
297 if (!views || !views[i]) {
298 samplers->depth_texture_mask &= ~(1llu << slot);
299 samplers->compressed_colortex_mask &= ~(1llu << slot);
300 si_set_sampler_view(sctx, &samplers->views, slot, NULL);
304 si_set_sampler_view(sctx, &samplers->views, slot, views[i]);
306 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
307 struct r600_texture *rtex =
308 (struct r600_texture*)views[i]->texture;
310 if (rtex->is_depth && !rtex->is_flushing_texture) {
311 samplers->depth_texture_mask |= 1llu << slot;
313 samplers->depth_texture_mask &= ~(1llu << slot);
315 if (is_compressed_colortex(rtex)) {
316 samplers->compressed_colortex_mask |= 1llu << slot;
318 samplers->compressed_colortex_mask &= ~(1llu << slot);
321 samplers->depth_texture_mask &= ~(1llu << slot);
322 samplers->compressed_colortex_mask &= ~(1llu << slot);
328 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
330 uint64_t mask = samplers->views.desc.enabled_mask;
333 int i = u_bit_scan64(&mask);
334 struct pipe_resource *res = samplers->views.views[i]->texture;
336 if (res && res->target != PIPE_BUFFER) {
337 struct r600_texture *rtex = (struct r600_texture *)res;
339 if (is_compressed_colortex(rtex)) {
340 samplers->compressed_colortex_mask |= 1llu << i;
342 samplers->compressed_colortex_mask &= ~(1llu << i);
351 si_release_image_views(struct si_images_info *images)
355 for (i = 0; i < SI_NUM_IMAGES; ++i) {
356 struct pipe_image_view *view = &images->views[i];
358 pipe_resource_reference(&view->resource, NULL);
361 si_release_descriptors(&images->desc);
365 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
367 uint mask = images->desc.enabled_mask;
369 /* Add buffers to the CS. */
371 int i = u_bit_scan(&mask);
372 struct pipe_image_view *view = &images->views[i];
374 assert(view->resource);
376 si_sampler_view_add_buffer(sctx, view->resource);
379 if (images->desc.buffer) {
380 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
383 RADEON_PRIO_DESCRIPTORS);
388 si_disable_shader_image(struct si_images_info *images, unsigned slot)
390 if (images->desc.enabled_mask & (1llu << slot)) {
391 pipe_resource_reference(&images->views[slot].resource, NULL);
392 images->compressed_colortex_mask &= ~(1 << slot);
394 memcpy(images->desc.list + slot*8, null_image_descriptor, 8*4);
395 images->desc.enabled_mask &= ~(1llu << slot);
396 images->desc.list_dirty = true;
401 si_set_shader_images(struct pipe_context *pipe, unsigned shader,
402 unsigned start_slot, unsigned count,
403 struct pipe_image_view *views)
405 struct si_context *ctx = (struct si_context *)pipe;
406 struct si_screen *screen = ctx->screen;
407 struct si_images_info *images = &ctx->images[shader];
410 assert(shader < SI_NUM_SHADERS);
415 assert(start_slot + count <= SI_NUM_IMAGES);
417 for (i = 0, slot = start_slot; i < count; ++i, ++slot) {
418 struct r600_resource *res;
420 if (!views || !views[i].resource) {
421 si_disable_shader_image(images, slot);
425 res = (struct r600_resource *)views[i].resource;
426 util_copy_image_view(&images->views[slot], &views[i]);
428 si_sampler_view_add_buffer(ctx, &res->b.b);
430 if (res->b.b.target == PIPE_BUFFER) {
431 si_make_buffer_descriptor(screen, res,
433 views[i].u.buf.first_element,
434 views[i].u.buf.last_element,
435 images->desc.list + slot * 8);
436 images->compressed_colortex_mask &= ~(1 << slot);
438 static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
439 struct r600_texture *tex = (struct r600_texture *)res;
441 unsigned width, height, depth;
443 assert(!tex->is_depth);
444 assert(tex->fmask.size == 0);
446 if (tex->dcc_offset &&
447 views[i].access & PIPE_IMAGE_ACCESS_WRITE)
448 r600_texture_disable_dcc(&screen->b, tex);
450 if (is_compressed_colortex(tex)) {
451 images->compressed_colortex_mask |= 1 << slot;
453 images->compressed_colortex_mask &= ~(1 << slot);
456 /* Always force the base level to the selected level.
458 * This is required for 3D textures, where otherwise
459 * selecting a single slice for non-layered bindings
460 * fails. It doesn't hurt the other targets.
462 level = views[i].u.tex.level;
463 width = u_minify(res->b.b.width0, level);
464 height = u_minify(res->b.b.height0, level);
465 depth = u_minify(res->b.b.depth0, level);
467 si_make_texture_descriptor(screen, tex, false, res->b.b.target,
468 views[i].format, swizzle,
470 views[i].u.tex.first_layer, views[i].u.tex.last_layer,
471 width, height, depth,
472 images->desc.list + slot * 8,
476 images->desc.enabled_mask |= 1llu << slot;
477 images->desc.list_dirty = true;
482 si_images_update_compressed_colortex_mask(struct si_images_info *images)
484 uint64_t mask = images->desc.enabled_mask;
487 int i = u_bit_scan64(&mask);
488 struct pipe_resource *res = images->views[i].resource;
490 if (res && res->target != PIPE_BUFFER) {
491 struct r600_texture *rtex = (struct r600_texture *)res;
493 if (is_compressed_colortex(rtex)) {
494 images->compressed_colortex_mask |= 1 << i;
496 images->compressed_colortex_mask &= ~(1 << i);
504 static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
505 unsigned start, unsigned count, void **states)
507 struct si_context *sctx = (struct si_context *)ctx;
508 struct si_textures_info *samplers = &sctx->samplers[shader];
509 struct si_descriptors *desc = &samplers->views.desc;
510 struct si_sampler_state **sstates = (struct si_sampler_state**)states;
513 if (!count || shader >= SI_NUM_SHADERS)
516 for (i = 0; i < count; i++) {
517 unsigned slot = start + i;
520 sstates[i] == samplers->views.sampler_states[slot])
523 samplers->views.sampler_states[slot] = sstates[i];
525 /* If FMASK is bound, don't overwrite it.
526 * The sampler state will be set after FMASK is unbound.
528 if (samplers->views.views[i] &&
529 samplers->views.views[i]->texture &&
530 samplers->views.views[i]->texture->target != PIPE_BUFFER &&
531 ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
534 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
535 desc->list_dirty = true;
539 /* BUFFER RESOURCES */
541 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
542 unsigned num_buffers,
543 unsigned shader_userdata_index,
544 enum radeon_bo_usage shader_usage,
545 enum radeon_bo_priority priority,
548 buffers->shader_usage = shader_usage;
549 buffers->priority = priority;
550 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
552 si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
553 num_buffers, NULL, ce_offset);
556 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
560 for (i = 0; i < buffers->desc.num_elements; i++) {
561 pipe_resource_reference(&buffers->buffers[i], NULL);
564 FREE(buffers->buffers);
565 si_release_descriptors(&buffers->desc);
568 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
569 struct si_buffer_resources *buffers)
571 uint64_t mask = buffers->desc.enabled_mask;
573 /* Add buffers to the CS. */
575 int i = u_bit_scan64(&mask);
577 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
578 (struct r600_resource*)buffers->buffers[i],
579 buffers->shader_usage, buffers->priority);
582 if (!buffers->desc.buffer)
584 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
585 buffers->desc.buffer, RADEON_USAGE_READWRITE,
586 RADEON_PRIO_DESCRIPTORS);
591 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
593 struct si_descriptors *desc = &sctx->vertex_buffers;
594 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
597 for (i = 0; i < count; i++) {
598 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
600 if (vb >= Elements(sctx->vertex_buffer))
602 if (!sctx->vertex_buffer[vb].buffer)
605 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
606 (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
607 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
612 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
613 desc->buffer, RADEON_USAGE_READ,
614 RADEON_PRIO_DESCRIPTORS);
617 static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
619 struct si_descriptors *desc = &sctx->vertex_buffers;
620 bool bound[SI_NUM_VERTEX_BUFFERS] = {};
621 unsigned i, count = sctx->vertex_elements->count;
625 if (!sctx->vertex_buffers_dirty)
627 if (!count || !sctx->vertex_elements)
630 /* Vertex buffer descriptors are the only ones which are uploaded
631 * directly through a staging buffer and don't go through
632 * the fine-grained upload path.
634 u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
635 (struct pipe_resource**)&desc->buffer, (void**)&ptr);
639 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
640 desc->buffer, RADEON_USAGE_READ,
641 RADEON_PRIO_DESCRIPTORS);
643 assert(count <= SI_NUM_VERTEX_BUFFERS);
645 for (i = 0; i < count; i++) {
646 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
647 struct pipe_vertex_buffer *vb;
648 struct r600_resource *rbuffer;
650 uint32_t *desc = &ptr[i*4];
652 if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
657 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
658 rbuffer = (struct r600_resource*)vb->buffer;
664 offset = vb->buffer_offset + ve->src_offset;
665 va = rbuffer->gpu_address + offset;
667 /* Fill in T# buffer resource description */
669 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
670 S_008F04_STRIDE(vb->stride);
672 if (sctx->b.chip_class <= CIK && vb->stride)
673 /* Round up by rounding down and adding 1 */
674 desc[2] = (vb->buffer->width0 - offset -
675 sctx->vertex_elements->format_size[i]) /
678 desc[2] = vb->buffer->width0 - offset;
680 desc[3] = sctx->vertex_elements->rsrc_word3[i];
682 if (!bound[ve->vertex_buffer_index]) {
683 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
684 (struct r600_resource*)vb->buffer,
685 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
686 bound[ve->vertex_buffer_index] = true;
690 /* Don't flush the const cache. It would have a very negative effect
691 * on performance (confirmed by testing). New descriptors are always
692 * uploaded to a fresh new buffer, so I don't think flushing the const
693 * cache is needed. */
694 desc->pointer_dirty = true;
695 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
696 sctx->vertex_buffers_dirty = false;
701 /* CONSTANT BUFFERS */
703 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
704 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
708 u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
709 (struct pipe_resource**)rbuffer, &tmp);
711 util_memcpy_cpu_to_le32(tmp, ptr, size);
714 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
715 struct pipe_constant_buffer *input)
717 struct si_context *sctx = (struct si_context *)ctx;
718 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
720 if (shader >= SI_NUM_SHADERS)
723 assert(slot < buffers->desc.num_elements);
724 pipe_resource_reference(&buffers->buffers[slot], NULL);
726 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
727 * with a NULL buffer). We need to use a dummy buffer instead. */
728 if (sctx->b.chip_class == CIK &&
729 (!input || (!input->buffer && !input->user_buffer)))
730 input = &sctx->null_const_buf;
732 if (input && (input->buffer || input->user_buffer)) {
733 struct pipe_resource *buffer = NULL;
736 /* Upload the user buffer if needed. */
737 if (input->user_buffer) {
738 unsigned buffer_offset;
740 si_upload_const_buffer(sctx,
741 (struct r600_resource**)&buffer, input->user_buffer,
742 input->buffer_size, &buffer_offset);
744 /* Just unbind on failure. */
745 si_set_constant_buffer(ctx, shader, slot, NULL);
748 va = r600_resource(buffer)->gpu_address + buffer_offset;
750 pipe_resource_reference(&buffer, input->buffer);
751 va = r600_resource(buffer)->gpu_address + input->buffer_offset;
754 /* Set the descriptor. */
755 uint32_t *desc = buffers->desc.list + slot*4;
757 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
759 desc[2] = input->buffer_size;
760 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
761 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
762 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
763 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
764 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
765 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
767 buffers->buffers[slot] = buffer;
768 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
769 (struct r600_resource*)buffer,
770 buffers->shader_usage, buffers->priority);
771 buffers->desc.enabled_mask |= 1llu << slot;
773 /* Clear the descriptor. */
774 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
775 buffers->desc.enabled_mask &= ~(1llu << slot);
778 buffers->desc.list_dirty = true;
783 static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
784 unsigned start_slot, unsigned count,
785 struct pipe_shader_buffer *sbuffers)
787 struct si_context *sctx = (struct si_context *)ctx;
788 struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
791 assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
793 for (i = 0; i < count; ++i) {
794 struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
795 struct r600_resource *buf;
796 unsigned slot = start_slot + i;
797 uint32_t *desc = buffers->desc.list + slot * 4;
800 if (!sbuffer || !sbuffer->buffer) {
801 pipe_resource_reference(&buffers->buffers[slot], NULL);
802 memset(desc, 0, sizeof(uint32_t) * 4);
803 buffers->desc.enabled_mask &= ~(1llu << slot);
807 buf = (struct r600_resource *)sbuffer->buffer;
808 va = buf->gpu_address + sbuffer->buffer_offset;
811 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
813 desc[2] = sbuffer->buffer_size;
814 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
815 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
816 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
817 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
818 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
819 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
821 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
822 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buf,
823 buffers->shader_usage, buffers->priority);
824 buffers->desc.enabled_mask |= 1llu << slot;
827 buffers->desc.list_dirty = true;
832 void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
833 struct pipe_resource *buffer,
834 unsigned stride, unsigned num_records,
835 bool add_tid, bool swizzle,
836 unsigned element_size, unsigned index_stride, uint64_t offset)
838 struct si_context *sctx = (struct si_context *)ctx;
839 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
841 if (shader >= SI_NUM_SHADERS)
844 /* The stride field in the resource descriptor has 14 bits */
845 assert(stride < (1 << 14));
847 assert(slot < buffers->desc.num_elements);
848 pipe_resource_reference(&buffers->buffers[slot], NULL);
853 va = r600_resource(buffer)->gpu_address + offset;
855 switch (element_size) {
857 assert(!"Unsupported ring buffer element size");
873 switch (index_stride) {
875 assert(!"Unsupported ring buffer index stride");
891 if (sctx->b.chip_class >= VI && stride)
892 num_records *= stride;
894 /* Set the descriptor. */
895 uint32_t *desc = buffers->desc.list + slot*4;
897 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
898 S_008F04_STRIDE(stride) |
899 S_008F04_SWIZZLE_ENABLE(swizzle);
900 desc[2] = num_records;
901 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
902 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
903 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
904 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
905 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
906 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
907 S_008F0C_ELEMENT_SIZE(element_size) |
908 S_008F0C_INDEX_STRIDE(index_stride) |
909 S_008F0C_ADD_TID_ENABLE(add_tid);
911 pipe_resource_reference(&buffers->buffers[slot], buffer);
912 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
913 (struct r600_resource*)buffer,
914 buffers->shader_usage, buffers->priority);
915 buffers->desc.enabled_mask |= 1llu << slot;
917 /* Clear the descriptor. */
918 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
919 buffers->desc.enabled_mask &= ~(1llu << slot);
922 buffers->desc.list_dirty = true;
925 /* STREAMOUT BUFFERS */
927 static void si_set_streamout_targets(struct pipe_context *ctx,
928 unsigned num_targets,
929 struct pipe_stream_output_target **targets,
930 const unsigned *offsets)
932 struct si_context *sctx = (struct si_context *)ctx;
933 struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
934 unsigned old_num_targets = sctx->b.streamout.num_targets;
937 /* We are going to unbind the buffers. Mark which caches need to be flushed. */
938 if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
939 /* Since streamout uses vector writes which go through TC L2
940 * and most other clients can use TC L2 as well, we don't need
943 * The only case which requires flushing it is VGT DMA index
944 * fetching, which is a rare case. Thus, flag the TC L2
945 * dirtiness in the resource and handle it when index fetching
948 for (i = 0; i < sctx->b.streamout.num_targets; i++)
949 if (sctx->b.streamout.targets[i])
950 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
952 /* Invalidate the scalar cache in case a streamout buffer is
953 * going to be used as a constant buffer.
955 * Invalidate TC L1, because streamout bypasses it (done by
956 * setting GLC=1 in the store instruction), but it can contain
957 * outdated data of streamout buffers.
959 * VS_PARTIAL_FLUSH is required if the buffers are going to be
960 * used as an input immediately.
962 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
963 SI_CONTEXT_INV_VMEM_L1 |
964 SI_CONTEXT_VS_PARTIAL_FLUSH;
967 /* All readers of the streamout targets need to be finished before we can
968 * start writing to the targets.
971 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
973 /* Streamout buffers must be bound in 2 places:
974 * 1) in VGT by setting the VGT_STRMOUT registers
975 * 2) as shader resources
978 /* Set the VGT regs. */
979 r600_set_streamout_targets(ctx, num_targets, targets, offsets);
981 /* Set the shader resources.*/
982 for (i = 0; i < num_targets; i++) {
983 bufidx = SI_SO_BUF_OFFSET + i;
986 struct pipe_resource *buffer = targets[i]->buffer;
987 uint64_t va = r600_resource(buffer)->gpu_address;
989 /* Set the descriptor.
991 * On VI, the format must be non-INVALID, otherwise
992 * the buffer will be considered not bound and store
993 * instructions will be no-ops.
995 uint32_t *desc = buffers->desc.list + bufidx*4;
997 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
998 desc[2] = 0xffffffff;
999 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1000 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1001 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1002 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1003 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1005 /* Set the resource. */
1006 pipe_resource_reference(&buffers->buffers[bufidx],
1008 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1009 (struct r600_resource*)buffer,
1010 buffers->shader_usage, buffers->priority);
1011 buffers->desc.enabled_mask |= 1llu << bufidx;
1013 /* Clear the descriptor and unset the resource. */
1014 memset(buffers->desc.list + bufidx*4, 0,
1015 sizeof(uint32_t) * 4);
1016 pipe_resource_reference(&buffers->buffers[bufidx],
1018 buffers->desc.enabled_mask &= ~(1llu << bufidx);
1021 for (; i < old_num_targets; i++) {
1022 bufidx = SI_SO_BUF_OFFSET + i;
1023 /* Clear the descriptor and unset the resource. */
1024 memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
1025 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
1026 buffers->desc.enabled_mask &= ~(1llu << bufidx);
1029 buffers->desc.list_dirty = true;
1032 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
1033 uint32_t *desc, uint64_t old_buf_va,
1034 struct pipe_resource *new_buf)
1036 /* Retrieve the buffer offset from the descriptor. */
1037 uint64_t old_desc_va =
1038 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
1040 assert(old_buf_va <= old_desc_va);
1041 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
1043 /* Update the descriptor. */
1044 uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
1047 desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
1048 S_008F04_BASE_ADDRESS_HI(va >> 32);
1051 /* TEXTURE METADATA ENABLE/DISABLE */
1053 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1054 * while the texture is bound, possibly by a different context. In that case,
1055 * call this function to update compressed_colortex_masks.
1057 void si_update_compressed_colortex_masks(struct si_context *sctx)
1059 for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1060 si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
1061 si_images_update_compressed_colortex_mask(&sctx->images[i]);
1065 /* BUFFER DISCARD/INVALIDATION */
1067 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1068 static void si_reset_buffer_resources(struct si_context *sctx,
1069 struct si_buffer_resources *buffers,
1070 struct pipe_resource *buf,
1073 uint64_t mask = buffers->desc.enabled_mask;
1076 unsigned i = u_bit_scan64(&mask);
1077 if (buffers->buffers[i] == buf) {
1078 si_desc_reset_buffer_offset(&sctx->b.b,
1079 buffers->desc.list + i*4,
1081 buffers->desc.list_dirty = true;
1083 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1084 (struct r600_resource *)buf,
1085 buffers->shader_usage,
1091 /* Reallocate a buffer a update all resource bindings where the buffer is
1094 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1095 * idle by discarding its contents. Apps usually tell us when to do this using
1096 * map_buffer flags, for example.
1098 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
1100 struct si_context *sctx = (struct si_context*)ctx;
1101 struct r600_resource *rbuffer = r600_resource(buf);
1102 unsigned i, shader, alignment = rbuffer->buf->alignment;
1103 uint64_t old_va = rbuffer->gpu_address;
1104 unsigned num_elems = sctx->vertex_elements ?
1105 sctx->vertex_elements->count : 0;
1106 struct si_sampler_view *view;
1108 /* Reallocate the buffer in the same pipe_resource. */
1109 r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
1112 /* We changed the buffer, now we need to bind it where the old one
1113 * was bound. This consists of 2 things:
1114 * 1) Updating the resource descriptor and dirtying it.
1115 * 2) Adding a relocation to the CS, so that it's usable.
1118 /* Vertex buffers. */
1119 for (i = 0; i < num_elems; i++) {
1120 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
1122 if (vb >= Elements(sctx->vertex_buffer))
1124 if (!sctx->vertex_buffer[vb].buffer)
1127 if (sctx->vertex_buffer[vb].buffer == buf) {
1128 sctx->vertex_buffers_dirty = true;
1133 /* Read/Write buffers. */
1134 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1135 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
1136 uint64_t mask = buffers->desc.enabled_mask;
1139 i = u_bit_scan64(&mask);
1140 if (buffers->buffers[i] == buf) {
1141 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
1143 buffers->desc.list_dirty = true;
1145 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1146 rbuffer, buffers->shader_usage,
1149 if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
1150 /* Update the streamout state. */
1151 if (sctx->b.streamout.begin_emitted) {
1152 r600_emit_streamout_end(&sctx->b);
1154 sctx->b.streamout.append_bitmask =
1155 sctx->b.streamout.enabled_mask;
1156 r600_streamout_buffers_dirty(&sctx->b);
1162 /* Constant and shader buffers. */
1163 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1164 si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
1166 si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
1170 /* Texture buffers - update virtual addresses in sampler view descriptors. */
1171 LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
1172 if (view->base.texture == buf) {
1173 si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
1176 /* Texture buffers - update bindings. */
1177 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1178 struct si_sampler_views *views = &sctx->samplers[shader].views;
1179 uint64_t mask = views->desc.enabled_mask;
1182 unsigned i = u_bit_scan64(&mask);
1183 if (views->views[i]->texture == buf) {
1184 si_desc_reset_buffer_offset(ctx,
1188 views->desc.list_dirty = true;
1190 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1191 rbuffer, RADEON_USAGE_READ,
1192 RADEON_PRIO_SAMPLER_BUFFER);
1198 for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1199 struct si_images_info *images = &sctx->images[shader];
1200 unsigned mask = images->desc.enabled_mask;
1203 unsigned i = u_bit_scan(&mask);
1205 if (images->views[i].resource == buf) {
1206 si_desc_reset_buffer_offset(
1207 ctx, images->desc.list + i * 8 + 4,
1209 images->desc.list_dirty = true;
1211 radeon_add_to_buffer_list(
1212 &sctx->b, &sctx->b.gfx, rbuffer,
1213 RADEON_USAGE_READWRITE,
1214 RADEON_PRIO_SAMPLER_BUFFER);
1220 /* SHADER USER DATA */
1222 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
1225 sctx->const_buffers[shader].desc.pointer_dirty = true;
1226 sctx->rw_buffers[shader].desc.pointer_dirty = true;
1227 sctx->shader_buffers[shader].desc.pointer_dirty = true;
1228 sctx->samplers[shader].views.desc.pointer_dirty = true;
1229 sctx->images[shader].desc.pointer_dirty = true;
1231 if (shader == PIPE_SHADER_VERTEX)
1232 sctx->vertex_buffers.pointer_dirty = true;
1234 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1237 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
1241 for (i = 0; i < SI_NUM_SHADERS; i++) {
1242 si_mark_shader_pointers_dirty(sctx, i);
1246 /* Set a base register address for user data constants in the given shader.
1247 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
1249 static void si_set_user_data_base(struct si_context *sctx,
1250 unsigned shader, uint32_t new_base)
1252 uint32_t *base = &sctx->shader_userdata.sh_base[shader];
1254 if (*base != new_base) {
1258 si_mark_shader_pointers_dirty(sctx, shader);
1262 /* This must be called when these shaders are changed from non-NULL to NULL
1265 * - tessellation control shader
1266 * - tessellation evaluation shader
1268 void si_shader_change_notify(struct si_context *sctx)
1270 /* VS can be bound as VS, ES, or LS. */
1271 if (sctx->tes_shader.cso)
1272 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1273 R_00B530_SPI_SHADER_USER_DATA_LS_0);
1274 else if (sctx->gs_shader.cso)
1275 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1276 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1278 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1279 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1281 /* TES can be bound as ES, VS, or not bound. */
1282 if (sctx->tes_shader.cso) {
1283 if (sctx->gs_shader.cso)
1284 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1285 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1287 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1288 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1290 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
1294 static void si_emit_shader_pointer(struct si_context *sctx,
1295 struct si_descriptors *desc,
1296 unsigned sh_base, bool keep_dirty)
1298 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1301 if (!desc->pointer_dirty || !desc->buffer)
1304 va = desc->buffer->gpu_address +
1305 desc->buffer_offset;
1307 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
1308 radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
1309 radeon_emit(cs, va);
1310 radeon_emit(cs, va >> 32);
1312 desc->pointer_dirty = keep_dirty;
1315 void si_emit_shader_userdata(struct si_context *sctx, struct r600_atom *atom)
1318 uint32_t *sh_base = sctx->shader_userdata.sh_base;
1320 if (sctx->gs_shader.cso) {
1321 /* The VS copy shader needs these for clipping, streamout, and rings. */
1322 unsigned vs_base = R_00B130_SPI_SHADER_USER_DATA_VS_0;
1323 unsigned i = PIPE_SHADER_VERTEX;
1325 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, vs_base, true);
1326 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, vs_base, true);
1328 if (sctx->tes_shader.cso) {
1329 /* The TESSEVAL shader needs this for the ESGS ring buffer. */
1330 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc,
1331 R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
1333 } else if (sctx->tes_shader.cso) {
1334 /* The TESSEVAL shader needs this for streamout. */
1335 si_emit_shader_pointer(sctx, &sctx->rw_buffers[PIPE_SHADER_VERTEX].desc,
1336 R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1339 for (i = 0; i < SI_NUM_SHADERS; i++) {
1340 unsigned base = sh_base[i];
1345 if (i != PIPE_SHADER_TESS_EVAL)
1346 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, base, false);
1348 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
1349 si_emit_shader_pointer(sctx, &sctx->shader_buffers[i].desc, base, false);
1350 si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
1351 si_emit_shader_pointer(sctx, &sctx->images[i].desc, base, false);
1353 si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1356 /* INIT/DEINIT/UPLOAD */
1358 void si_init_all_descriptors(struct si_context *sctx)
1361 unsigned ce_offset = 0;
1363 for (i = 0; i < SI_NUM_SHADERS; i++) {
1364 si_init_buffer_resources(&sctx->const_buffers[i],
1365 SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1366 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
1368 si_init_buffer_resources(&sctx->rw_buffers[i],
1369 SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1370 RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT,
1372 si_init_buffer_resources(&sctx->shader_buffers[i],
1373 SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
1374 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
1377 si_init_descriptors(&sctx->samplers[i].views.desc,
1378 SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
1379 null_texture_descriptor, &ce_offset);
1381 si_init_descriptors(&sctx->images[i].desc,
1382 SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
1383 null_image_descriptor, &ce_offset);
1386 si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1387 4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
1389 assert(ce_offset <= 32768);
1391 /* Set pipe_context functions. */
1392 sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1393 sctx->b.b.set_shader_images = si_set_shader_images;
1394 sctx->b.b.set_constant_buffer = si_set_constant_buffer;
1395 sctx->b.b.set_shader_buffers = si_set_shader_buffers;
1396 sctx->b.b.set_sampler_views = si_set_sampler_views;
1397 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1398 sctx->b.invalidate_buffer = si_invalidate_buffer;
1400 /* Shader user data. */
1401 si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1402 si_emit_shader_userdata);
1404 /* Set default and immutable mappings. */
1405 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1406 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1407 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1408 si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1411 bool si_upload_shader_descriptors(struct si_context *sctx)
1415 for (i = 0; i < SI_NUM_SHADERS; i++) {
1416 if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc) ||
1417 !si_upload_descriptors(sctx, &sctx->rw_buffers[i].desc) ||
1418 !si_upload_descriptors(sctx, &sctx->shader_buffers[i].desc) ||
1419 !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc) ||
1420 !si_upload_descriptors(sctx, &sctx->images[i].desc))
1423 return si_upload_vertex_buffer_descriptors(sctx);
1426 void si_release_all_descriptors(struct si_context *sctx)
1430 for (i = 0; i < SI_NUM_SHADERS; i++) {
1431 si_release_buffer_resources(&sctx->const_buffers[i]);
1432 si_release_buffer_resources(&sctx->rw_buffers[i]);
1433 si_release_buffer_resources(&sctx->shader_buffers[i]);
1434 si_release_sampler_views(&sctx->samplers[i].views);
1435 si_release_image_views(&sctx->images[i]);
1437 si_release_descriptors(&sctx->vertex_buffers);
1440 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1444 for (i = 0; i < SI_NUM_SHADERS; i++) {
1445 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1446 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
1447 si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
1448 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1449 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
1451 si_vertex_buffers_begin_new_cs(sctx);
1452 si_shader_userdata_begin_new_cs(sctx);