2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
24 * Marek Olšák <marek.olsak@amd.com>
27 /* Resource binding slots and sampler states (each described with 8 or
28 * 4 dwords) are stored in lists in memory which is accessed by shaders
29 * using scalar load instructions.
31 * This file is responsible for managing such lists. It keeps a copy of all
32 * descriptors in CPU memory and re-uploads a whole list if some slots have
35 * This code is also reponsible for updating shader pointers to those lists.
37 * Note that CP DMA can't be used for updating the lists, because a GPU hang
38 * could leave the list in a mid-IB state and the next IB would get wrong
39 * descriptors and the whole context would be unusable at that point.
40 * (Note: The register shadowing can't be used due to the same reason)
42 * Also, uploading descriptors to newly allocated memory doesn't require
46 * Possible scenarios for one 16 dword image+sampler slot:
48 * | Image | w/ FMASK | Buffer | NULL
49 * [ 0: 3] Image[0:3] | Image[0:3] | Null[0:3] | Null[0:3]
50 * [ 4: 7] Image[4:7] | Image[4:7] | Buffer[0:3] | 0
51 * [ 8:11] Null[0:3] | Fmask[0:3] | Null[0:3] | Null[0:3]
52 * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
54 * FMASK implies MSAA, therefore no sampler state.
55 * Sampler states are never unbound except when FMASK is bound.
58 #include "radeon/r600_cs.h"
60 #include "si_shader.h"
63 #include "util/u_format.h"
64 #include "util/u_math.h"
65 #include "util/u_memory.h"
66 #include "util/u_suballoc.h"
67 #include "util/u_upload_mgr.h"
70 /* NULL image and buffer descriptor for textures (alpha = 1) and images
73 * For images, all fields must be zero except for the swizzle, which
74 * supports arbitrary combinations of 0s and 1s. The texture type must be
75 * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
77 * For buffers, all fields must be zero. If they are not, the hw hangs.
79 * This is the only reason why the buffer descriptor must be in words [4:7].
81 static uint32_t null_texture_descriptor[8] = {
85 S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
86 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
87 /* the rest must contain zeros, which is also used by the buffer
91 static uint32_t null_image_descriptor[8] = {
95 S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
96 /* the rest must contain zeros, which is also used by the buffer
100 static void si_init_descriptors(struct si_descriptors *desc,
101 unsigned shader_userdata_index,
102 unsigned element_dw_size,
103 unsigned num_elements,
104 const uint32_t *null_descriptor,
109 assert(num_elements <= sizeof(desc->enabled_mask)*8);
111 desc->list = CALLOC(num_elements, element_dw_size * 4);
112 desc->element_dw_size = element_dw_size;
113 desc->num_elements = num_elements;
114 desc->dirty_mask = num_elements == 32 ? ~0u : (1u << num_elements) - 1;
115 desc->shader_userdata_offset = shader_userdata_index * 4;
118 desc->ce_offset = *ce_offset;
120 /* make sure that ce_offset stays 32 byte aligned */
121 *ce_offset += align(element_dw_size * num_elements * 4, 32);
124 /* Initialize the array to NULL descriptors if the element size is 8. */
125 if (null_descriptor) {
126 assert(element_dw_size % 8 == 0);
127 for (i = 0; i < num_elements * element_dw_size / 8; i++)
128 memcpy(desc->list + i * 8, null_descriptor,
133 static void si_release_descriptors(struct si_descriptors *desc)
135 pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
139 static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
140 unsigned *out_offset, struct r600_resource **out_buf) {
143 u_suballocator_alloc(sctx->ce_suballocator, size, out_offset,
144 (struct pipe_resource**)out_buf);
148 va = (*out_buf)->gpu_address + *out_offset;
150 radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
151 radeon_emit(sctx->ce_ib, ce_offset);
152 radeon_emit(sctx->ce_ib, size / 4);
153 radeon_emit(sctx->ce_ib, va);
154 radeon_emit(sctx->ce_ib, va >> 32);
156 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
157 RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
159 sctx->ce_need_synchronization = true;
163 static void si_ce_reinitialize_descriptors(struct si_context *sctx,
164 struct si_descriptors *desc)
167 struct r600_resource *buffer = (struct r600_resource*)desc->buffer;
168 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
169 uint64_t va = buffer->gpu_address + desc->buffer_offset;
170 struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
175 list_size = align(list_size, 32);
177 radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
179 radeon_emit(ib, va >> 32);
180 radeon_emit(ib, list_size / 4);
181 radeon_emit(ib, desc->ce_offset);
183 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
184 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
186 desc->ce_ram_dirty = false;
189 void si_ce_reinitialize_all_descriptors(struct si_context *sctx)
191 for (int i = 0; i < SI_NUM_SHADERS; i++) {
192 si_ce_reinitialize_descriptors(sctx, &sctx->const_buffers[i].desc);
193 si_ce_reinitialize_descriptors(sctx, &sctx->shader_buffers[i].desc);
194 si_ce_reinitialize_descriptors(sctx, &sctx->samplers[i].views.desc);
195 si_ce_reinitialize_descriptors(sctx, &sctx->images[i].desc);
197 si_ce_reinitialize_descriptors(sctx, &sctx->rw_buffers.desc);
200 void si_ce_enable_loads(struct radeon_winsys_cs *ib)
202 radeon_emit(ib, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
203 radeon_emit(ib, CONTEXT_CONTROL_LOAD_ENABLE(1) |
204 CONTEXT_CONTROL_LOAD_CE_RAM(1));
205 radeon_emit(ib, CONTEXT_CONTROL_SHADOW_ENABLE(1));
208 static bool si_upload_descriptors(struct si_context *sctx,
209 struct si_descriptors *desc,
210 struct r600_atom * atom)
212 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
214 if (!desc->dirty_mask)
218 uint32_t const* list = (uint32_t const*)desc->list;
220 if (desc->ce_ram_dirty)
221 si_ce_reinitialize_descriptors(sctx, desc);
223 while(desc->dirty_mask) {
225 u_bit_scan_consecutive_range(&desc->dirty_mask, &begin,
228 begin *= desc->element_dw_size;
229 count *= desc->element_dw_size;
231 radeon_emit(sctx->ce_ib,
232 PKT3(PKT3_WRITE_CONST_RAM, count, 0));
233 radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
234 radeon_emit_array(sctx->ce_ib, list + begin, count);
237 if (!si_ce_upload(sctx, desc->ce_offset, list_size,
238 &desc->buffer_offset, &desc->buffer))
243 u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
244 &desc->buffer_offset,
245 (struct pipe_resource**)&desc->buffer, &ptr);
247 return false; /* skip the draw call */
249 util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
251 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
252 RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
254 desc->pointer_dirty = true;
255 desc->dirty_mask = 0;
258 si_mark_atom_dirty(sctx, atom);
265 static void si_release_sampler_views(struct si_sampler_views *views)
269 for (i = 0; i < ARRAY_SIZE(views->views); i++) {
270 pipe_sampler_view_reference(&views->views[i], NULL);
272 si_release_descriptors(&views->desc);
275 static void si_sampler_view_add_buffer(struct si_context *sctx,
276 struct pipe_resource *resource,
277 enum radeon_bo_usage usage)
279 struct r600_resource *rres = (struct r600_resource*)resource;
284 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres, usage,
285 r600_get_sampler_view_priority(rres));
288 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
289 struct si_sampler_views *views)
291 unsigned mask = views->desc.enabled_mask;
293 /* Add buffers to the CS. */
295 int i = u_bit_scan(&mask);
297 si_sampler_view_add_buffer(sctx, views->views[i]->texture,
301 views->desc.ce_ram_dirty = true;
303 if (!views->desc.buffer)
305 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
306 RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
309 static void si_set_sampler_view(struct si_context *sctx,
310 struct si_sampler_views *views,
311 unsigned slot, struct pipe_sampler_view *view)
313 struct si_sampler_view *rview = (struct si_sampler_view*)view;
315 if (view && view->texture && view->texture->target != PIPE_BUFFER &&
316 G_008F28_COMPRESSION_EN(rview->state[6]) &&
317 ((struct r600_texture*)view->texture)->dcc_offset == 0) {
318 rview->state[6] &= C_008F28_COMPRESSION_EN &
319 C_008F28_ALPHA_IS_ON_MSB;
320 } else if (views->views[slot] == view)
324 struct r600_texture *rtex = (struct r600_texture *)view->texture;
326 si_sampler_view_add_buffer(sctx, view->texture,
329 pipe_sampler_view_reference(&views->views[slot], view);
330 memcpy(views->desc.list + slot * 16, rview->state, 8*4);
332 if (view->texture && view->texture->target != PIPE_BUFFER &&
334 memcpy(views->desc.list + slot*16 + 8,
335 rview->fmask_state, 8*4);
337 /* Disable FMASK and bind sampler state in [12:15]. */
338 memcpy(views->desc.list + slot*16 + 8,
339 null_texture_descriptor, 4*4);
341 if (views->sampler_states[slot])
342 memcpy(views->desc.list + slot*16 + 12,
343 views->sampler_states[slot], 4*4);
346 views->desc.enabled_mask |= 1u << slot;
348 pipe_sampler_view_reference(&views->views[slot], NULL);
349 memcpy(views->desc.list + slot*16, null_texture_descriptor, 8*4);
350 /* Only clear the lower dwords of FMASK. */
351 memcpy(views->desc.list + slot*16 + 8, null_texture_descriptor, 4*4);
352 views->desc.enabled_mask &= ~(1u << slot);
355 views->desc.dirty_mask |= 1u << slot;
358 static bool is_compressed_colortex(struct r600_texture *rtex)
360 return rtex->cmask.size || rtex->fmask.size ||
361 (rtex->dcc_offset && rtex->dirty_level_mask);
364 static void si_set_sampler_views(struct pipe_context *ctx,
365 unsigned shader, unsigned start,
367 struct pipe_sampler_view **views)
369 struct si_context *sctx = (struct si_context *)ctx;
370 struct si_textures_info *samplers = &sctx->samplers[shader];
373 if (!count || shader >= SI_NUM_SHADERS)
376 for (i = 0; i < count; i++) {
377 unsigned slot = start + i;
379 if (!views || !views[i]) {
380 samplers->depth_texture_mask &= ~(1u << slot);
381 samplers->compressed_colortex_mask &= ~(1u << slot);
382 si_set_sampler_view(sctx, &samplers->views, slot, NULL);
386 si_set_sampler_view(sctx, &samplers->views, slot, views[i]);
388 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
389 struct r600_texture *rtex =
390 (struct r600_texture*)views[i]->texture;
392 if (rtex->is_depth && !rtex->is_flushing_texture) {
393 samplers->depth_texture_mask |= 1u << slot;
395 samplers->depth_texture_mask &= ~(1u << slot);
397 if (is_compressed_colortex(rtex)) {
398 samplers->compressed_colortex_mask |= 1u << slot;
400 samplers->compressed_colortex_mask &= ~(1u << slot);
403 samplers->depth_texture_mask &= ~(1u << slot);
404 samplers->compressed_colortex_mask &= ~(1u << slot);
410 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
412 unsigned mask = samplers->views.desc.enabled_mask;
415 int i = u_bit_scan(&mask);
416 struct pipe_resource *res = samplers->views.views[i]->texture;
418 if (res && res->target != PIPE_BUFFER) {
419 struct r600_texture *rtex = (struct r600_texture *)res;
421 if (is_compressed_colortex(rtex)) {
422 samplers->compressed_colortex_mask |= 1u << i;
424 samplers->compressed_colortex_mask &= ~(1u << i);
433 si_release_image_views(struct si_images_info *images)
437 for (i = 0; i < SI_NUM_IMAGES; ++i) {
438 struct pipe_image_view *view = &images->views[i];
440 pipe_resource_reference(&view->resource, NULL);
443 si_release_descriptors(&images->desc);
447 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
449 uint mask = images->desc.enabled_mask;
451 /* Add buffers to the CS. */
453 int i = u_bit_scan(&mask);
454 struct pipe_image_view *view = &images->views[i];
456 assert(view->resource);
458 si_sampler_view_add_buffer(sctx, view->resource,
459 RADEON_USAGE_READWRITE);
462 images->desc.ce_ram_dirty = true;
464 if (images->desc.buffer) {
465 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
468 RADEON_PRIO_DESCRIPTORS);
473 si_disable_shader_image(struct si_images_info *images, unsigned slot)
475 if (images->desc.enabled_mask & (1u << slot)) {
476 pipe_resource_reference(&images->views[slot].resource, NULL);
477 images->compressed_colortex_mask &= ~(1 << slot);
479 memcpy(images->desc.list + slot*8, null_image_descriptor, 8*4);
480 images->desc.enabled_mask &= ~(1u << slot);
481 images->desc.dirty_mask |= 1u << slot;
486 si_mark_image_range_valid(struct pipe_image_view *view)
488 struct r600_resource *res = (struct r600_resource *)view->resource;
489 const struct util_format_description *desc;
492 assert(res && res->b.b.target == PIPE_BUFFER);
494 desc = util_format_description(view->format);
495 stride = desc->block.bits / 8;
497 util_range_add(&res->valid_buffer_range,
498 stride * (view->u.buf.first_element),
499 stride * (view->u.buf.last_element + 1));
503 si_set_shader_images(struct pipe_context *pipe, unsigned shader,
504 unsigned start_slot, unsigned count,
505 struct pipe_image_view *views)
507 struct si_context *ctx = (struct si_context *)pipe;
508 struct si_screen *screen = ctx->screen;
509 struct si_images_info *images = &ctx->images[shader];
512 assert(shader < SI_NUM_SHADERS);
517 assert(start_slot + count <= SI_NUM_IMAGES);
519 for (i = 0, slot = start_slot; i < count; ++i, ++slot) {
520 struct r600_resource *res;
522 if (!views || !views[i].resource) {
523 si_disable_shader_image(images, slot);
527 res = (struct r600_resource *)views[i].resource;
528 util_copy_image_view(&images->views[slot], &views[i]);
530 si_sampler_view_add_buffer(ctx, &res->b.b,
531 RADEON_USAGE_READWRITE);
533 if (res->b.b.target == PIPE_BUFFER) {
534 if (views[i].access & PIPE_IMAGE_ACCESS_WRITE)
535 si_mark_image_range_valid(&views[i]);
537 si_make_buffer_descriptor(screen, res,
539 views[i].u.buf.first_element,
540 views[i].u.buf.last_element,
541 images->desc.list + slot * 8);
542 images->compressed_colortex_mask &= ~(1 << slot);
544 static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
545 struct r600_texture *tex = (struct r600_texture *)res;
547 unsigned width, height, depth;
549 assert(!tex->is_depth);
550 assert(tex->fmask.size == 0);
552 if (tex->dcc_offset &&
553 views[i].access & PIPE_IMAGE_ACCESS_WRITE)
554 r600_texture_disable_dcc(&screen->b, tex);
556 if (is_compressed_colortex(tex)) {
557 images->compressed_colortex_mask |= 1 << slot;
559 images->compressed_colortex_mask &= ~(1 << slot);
562 /* Always force the base level to the selected level.
564 * This is required for 3D textures, where otherwise
565 * selecting a single slice for non-layered bindings
566 * fails. It doesn't hurt the other targets.
568 level = views[i].u.tex.level;
569 width = u_minify(res->b.b.width0, level);
570 height = u_minify(res->b.b.height0, level);
571 depth = u_minify(res->b.b.depth0, level);
573 si_make_texture_descriptor(screen, tex, false, res->b.b.target,
574 views[i].format, swizzle,
576 views[i].u.tex.first_layer, views[i].u.tex.last_layer,
577 width, height, depth,
578 images->desc.list + slot * 8,
582 images->desc.enabled_mask |= 1u << slot;
583 images->desc.dirty_mask |= 1u << slot;
588 si_images_update_compressed_colortex_mask(struct si_images_info *images)
590 unsigned mask = images->desc.enabled_mask;
593 int i = u_bit_scan(&mask);
594 struct pipe_resource *res = images->views[i].resource;
596 if (res && res->target != PIPE_BUFFER) {
597 struct r600_texture *rtex = (struct r600_texture *)res;
599 if (is_compressed_colortex(rtex)) {
600 images->compressed_colortex_mask |= 1 << i;
602 images->compressed_colortex_mask &= ~(1 << i);
610 static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
611 unsigned start, unsigned count, void **states)
613 struct si_context *sctx = (struct si_context *)ctx;
614 struct si_textures_info *samplers = &sctx->samplers[shader];
615 struct si_descriptors *desc = &samplers->views.desc;
616 struct si_sampler_state **sstates = (struct si_sampler_state**)states;
619 if (!count || shader >= SI_NUM_SHADERS)
622 for (i = 0; i < count; i++) {
623 unsigned slot = start + i;
626 sstates[i] == samplers->views.sampler_states[slot])
629 samplers->views.sampler_states[slot] = sstates[i];
631 /* If FMASK is bound, don't overwrite it.
632 * The sampler state will be set after FMASK is unbound.
634 if (samplers->views.views[i] &&
635 samplers->views.views[i]->texture &&
636 samplers->views.views[i]->texture->target != PIPE_BUFFER &&
637 ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
640 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
641 desc->dirty_mask |= 1u << slot;
645 /* BUFFER RESOURCES */
647 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
648 unsigned num_buffers,
649 unsigned shader_userdata_index,
650 enum radeon_bo_usage shader_usage,
651 enum radeon_bo_priority priority,
654 buffers->shader_usage = shader_usage;
655 buffers->priority = priority;
656 buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
658 si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
659 num_buffers, NULL, ce_offset);
662 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
666 for (i = 0; i < buffers->desc.num_elements; i++) {
667 pipe_resource_reference(&buffers->buffers[i], NULL);
670 FREE(buffers->buffers);
671 si_release_descriptors(&buffers->desc);
674 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
675 struct si_buffer_resources *buffers)
677 unsigned mask = buffers->desc.enabled_mask;
679 /* Add buffers to the CS. */
681 int i = u_bit_scan(&mask);
683 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
684 (struct r600_resource*)buffers->buffers[i],
685 buffers->shader_usage, buffers->priority);
688 buffers->desc.ce_ram_dirty = true;
690 if (!buffers->desc.buffer)
692 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
693 buffers->desc.buffer, RADEON_USAGE_READWRITE,
694 RADEON_PRIO_DESCRIPTORS);
699 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
701 struct si_descriptors *desc = &sctx->vertex_buffers;
702 int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
705 for (i = 0; i < count; i++) {
706 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
708 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
710 if (!sctx->vertex_buffer[vb].buffer)
713 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
714 (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
715 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
720 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
721 desc->buffer, RADEON_USAGE_READ,
722 RADEON_PRIO_DESCRIPTORS);
725 static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
727 struct si_descriptors *desc = &sctx->vertex_buffers;
728 bool bound[SI_NUM_VERTEX_BUFFERS] = {};
729 unsigned i, count = sctx->vertex_elements->count;
733 if (!sctx->vertex_buffers_dirty)
735 if (!count || !sctx->vertex_elements)
738 /* Vertex buffer descriptors are the only ones which are uploaded
739 * directly through a staging buffer and don't go through
740 * the fine-grained upload path.
742 u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
743 (struct pipe_resource**)&desc->buffer, (void**)&ptr);
747 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
748 desc->buffer, RADEON_USAGE_READ,
749 RADEON_PRIO_DESCRIPTORS);
751 assert(count <= SI_NUM_VERTEX_BUFFERS);
753 for (i = 0; i < count; i++) {
754 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
755 struct pipe_vertex_buffer *vb;
756 struct r600_resource *rbuffer;
758 uint32_t *desc = &ptr[i*4];
760 if (ve->vertex_buffer_index >= ARRAY_SIZE(sctx->vertex_buffer)) {
765 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
766 rbuffer = (struct r600_resource*)vb->buffer;
772 offset = vb->buffer_offset + ve->src_offset;
773 va = rbuffer->gpu_address + offset;
775 /* Fill in T# buffer resource description */
777 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
778 S_008F04_STRIDE(vb->stride);
780 if (sctx->b.chip_class <= CIK && vb->stride)
781 /* Round up by rounding down and adding 1 */
782 desc[2] = (vb->buffer->width0 - offset -
783 sctx->vertex_elements->format_size[i]) /
786 desc[2] = vb->buffer->width0 - offset;
788 desc[3] = sctx->vertex_elements->rsrc_word3[i];
790 if (!bound[ve->vertex_buffer_index]) {
791 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
792 (struct r600_resource*)vb->buffer,
793 RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
794 bound[ve->vertex_buffer_index] = true;
798 /* Don't flush the const cache. It would have a very negative effect
799 * on performance (confirmed by testing). New descriptors are always
800 * uploaded to a fresh new buffer, so I don't think flushing the const
801 * cache is needed. */
802 desc->pointer_dirty = true;
803 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
804 sctx->vertex_buffers_dirty = false;
809 /* CONSTANT BUFFERS */
811 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
812 const uint8_t *ptr, unsigned size, uint32_t *const_offset)
816 u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
817 (struct pipe_resource**)rbuffer, &tmp);
819 util_memcpy_cpu_to_le32(tmp, ptr, size);
822 static void si_set_constant_buffer(struct si_context *sctx,
823 struct si_buffer_resources *buffers,
824 uint slot, struct pipe_constant_buffer *input)
826 assert(slot < buffers->desc.num_elements);
827 pipe_resource_reference(&buffers->buffers[slot], NULL);
829 /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
830 * with a NULL buffer). We need to use a dummy buffer instead. */
831 if (sctx->b.chip_class == CIK &&
832 (!input || (!input->buffer && !input->user_buffer)))
833 input = &sctx->null_const_buf;
835 if (input && (input->buffer || input->user_buffer)) {
836 struct pipe_resource *buffer = NULL;
839 /* Upload the user buffer if needed. */
840 if (input->user_buffer) {
841 unsigned buffer_offset;
843 si_upload_const_buffer(sctx,
844 (struct r600_resource**)&buffer, input->user_buffer,
845 input->buffer_size, &buffer_offset);
847 /* Just unbind on failure. */
848 si_set_constant_buffer(sctx, buffers, slot, NULL);
851 va = r600_resource(buffer)->gpu_address + buffer_offset;
853 pipe_resource_reference(&buffer, input->buffer);
854 va = r600_resource(buffer)->gpu_address + input->buffer_offset;
857 /* Set the descriptor. */
858 uint32_t *desc = buffers->desc.list + slot*4;
860 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
862 desc[2] = input->buffer_size;
863 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
864 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
865 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
866 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
867 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
868 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
870 buffers->buffers[slot] = buffer;
871 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
872 (struct r600_resource*)buffer,
873 buffers->shader_usage, buffers->priority);
874 buffers->desc.enabled_mask |= 1u << slot;
876 /* Clear the descriptor. */
877 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
878 buffers->desc.enabled_mask &= ~(1u << slot);
881 buffers->desc.dirty_mask |= 1u << slot;
884 void si_set_rw_buffer(struct si_context *sctx,
885 uint slot, struct pipe_constant_buffer *input)
887 si_set_constant_buffer(sctx, &sctx->rw_buffers, slot, input);
890 static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
891 uint shader, uint slot,
892 struct pipe_constant_buffer *input)
894 struct si_context *sctx = (struct si_context *)ctx;
896 if (shader >= SI_NUM_SHADERS)
899 si_set_constant_buffer(sctx, &sctx->const_buffers[shader], slot, input);
904 static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
905 unsigned start_slot, unsigned count,
906 struct pipe_shader_buffer *sbuffers)
908 struct si_context *sctx = (struct si_context *)ctx;
909 struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
912 assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
914 for (i = 0; i < count; ++i) {
915 struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
916 struct r600_resource *buf;
917 unsigned slot = start_slot + i;
918 uint32_t *desc = buffers->desc.list + slot * 4;
921 if (!sbuffer || !sbuffer->buffer) {
922 pipe_resource_reference(&buffers->buffers[slot], NULL);
923 memset(desc, 0, sizeof(uint32_t) * 4);
924 buffers->desc.enabled_mask &= ~(1u << slot);
925 buffers->desc.dirty_mask |= 1u << slot;
929 buf = (struct r600_resource *)sbuffer->buffer;
930 va = buf->gpu_address + sbuffer->buffer_offset;
933 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
935 desc[2] = sbuffer->buffer_size;
936 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
937 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
938 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
939 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
940 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
941 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
943 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
944 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buf,
945 buffers->shader_usage, buffers->priority);
946 buffers->desc.enabled_mask |= 1u << slot;
947 buffers->desc.dirty_mask |= 1u << slot;
953 void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
954 struct pipe_resource *buffer,
955 unsigned stride, unsigned num_records,
956 bool add_tid, bool swizzle,
957 unsigned element_size, unsigned index_stride, uint64_t offset)
959 struct si_context *sctx = (struct si_context *)ctx;
960 struct si_buffer_resources *buffers = &sctx->rw_buffers;
962 /* The stride field in the resource descriptor has 14 bits */
963 assert(stride < (1 << 14));
965 assert(slot < buffers->desc.num_elements);
966 pipe_resource_reference(&buffers->buffers[slot], NULL);
971 va = r600_resource(buffer)->gpu_address + offset;
973 switch (element_size) {
975 assert(!"Unsupported ring buffer element size");
991 switch (index_stride) {
993 assert(!"Unsupported ring buffer index stride");
1009 if (sctx->b.chip_class >= VI && stride)
1010 num_records *= stride;
1012 /* Set the descriptor. */
1013 uint32_t *desc = buffers->desc.list + slot*4;
1015 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1016 S_008F04_STRIDE(stride) |
1017 S_008F04_SWIZZLE_ENABLE(swizzle);
1018 desc[2] = num_records;
1019 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1020 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1021 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1022 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1023 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1024 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1025 S_008F0C_ELEMENT_SIZE(element_size) |
1026 S_008F0C_INDEX_STRIDE(index_stride) |
1027 S_008F0C_ADD_TID_ENABLE(add_tid);
1029 pipe_resource_reference(&buffers->buffers[slot], buffer);
1030 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1031 (struct r600_resource*)buffer,
1032 buffers->shader_usage, buffers->priority);
1033 buffers->desc.enabled_mask |= 1u << slot;
1035 /* Clear the descriptor. */
1036 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
1037 buffers->desc.enabled_mask &= ~(1u << slot);
1040 buffers->desc.dirty_mask |= 1u << slot;
1043 /* STREAMOUT BUFFERS */
1045 static void si_set_streamout_targets(struct pipe_context *ctx,
1046 unsigned num_targets,
1047 struct pipe_stream_output_target **targets,
1048 const unsigned *offsets)
1050 struct si_context *sctx = (struct si_context *)ctx;
1051 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1052 unsigned old_num_targets = sctx->b.streamout.num_targets;
1055 /* We are going to unbind the buffers. Mark which caches need to be flushed. */
1056 if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
1057 /* Since streamout uses vector writes which go through TC L2
1058 * and most other clients can use TC L2 as well, we don't need
1061 * The only cases which requires flushing it is VGT DMA index
1062 * fetching (on <= CIK) and indirect draw data, which are rare
1063 * cases. Thus, flag the TC L2 dirtiness in the resource and
1064 * handle it at draw call time.
1066 for (i = 0; i < sctx->b.streamout.num_targets; i++)
1067 if (sctx->b.streamout.targets[i])
1068 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
1070 /* Invalidate the scalar cache in case a streamout buffer is
1071 * going to be used as a constant buffer.
1073 * Invalidate TC L1, because streamout bypasses it (done by
1074 * setting GLC=1 in the store instruction), but it can contain
1075 * outdated data of streamout buffers.
1077 * VS_PARTIAL_FLUSH is required if the buffers are going to be
1078 * used as an input immediately.
1080 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
1081 SI_CONTEXT_INV_VMEM_L1 |
1082 SI_CONTEXT_VS_PARTIAL_FLUSH;
1085 /* All readers of the streamout targets need to be finished before we can
1086 * start writing to the targets.
1089 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1090 SI_CONTEXT_CS_PARTIAL_FLUSH;
1092 /* Streamout buffers must be bound in 2 places:
1093 * 1) in VGT by setting the VGT_STRMOUT registers
1094 * 2) as shader resources
1097 /* Set the VGT regs. */
1098 r600_set_streamout_targets(ctx, num_targets, targets, offsets);
1100 /* Set the shader resources.*/
1101 for (i = 0; i < num_targets; i++) {
1102 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1105 struct pipe_resource *buffer = targets[i]->buffer;
1106 uint64_t va = r600_resource(buffer)->gpu_address;
1108 /* Set the descriptor.
1110 * On VI, the format must be non-INVALID, otherwise
1111 * the buffer will be considered not bound and store
1112 * instructions will be no-ops.
1114 uint32_t *desc = buffers->desc.list + bufidx*4;
1116 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
1117 desc[2] = 0xffffffff;
1118 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1119 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1120 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1121 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1122 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1124 /* Set the resource. */
1125 pipe_resource_reference(&buffers->buffers[bufidx],
1127 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1128 (struct r600_resource*)buffer,
1129 buffers->shader_usage, buffers->priority);
1130 buffers->desc.enabled_mask |= 1u << bufidx;
1132 /* Clear the descriptor and unset the resource. */
1133 memset(buffers->desc.list + bufidx*4, 0,
1134 sizeof(uint32_t) * 4);
1135 pipe_resource_reference(&buffers->buffers[bufidx],
1137 buffers->desc.enabled_mask &= ~(1u << bufidx);
1139 buffers->desc.dirty_mask |= 1u << bufidx;
1141 for (; i < old_num_targets; i++) {
1142 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1143 /* Clear the descriptor and unset the resource. */
1144 memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
1145 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
1146 buffers->desc.enabled_mask &= ~(1u << bufidx);
1147 buffers->desc.dirty_mask |= 1u << bufidx;
1151 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
1152 uint32_t *desc, uint64_t old_buf_va,
1153 struct pipe_resource *new_buf)
1155 /* Retrieve the buffer offset from the descriptor. */
1156 uint64_t old_desc_va =
1157 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
1159 assert(old_buf_va <= old_desc_va);
1160 uint64_t offset_within_buffer = old_desc_va - old_buf_va;
1162 /* Update the descriptor. */
1163 uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
1166 desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
1167 S_008F04_BASE_ADDRESS_HI(va >> 32);
1170 /* INTERNAL CONST BUFFERS */
1172 static void si_set_polygon_stipple(struct pipe_context *ctx,
1173 const struct pipe_poly_stipple *state)
1175 struct si_context *sctx = (struct si_context *)ctx;
1176 struct pipe_constant_buffer cb = {};
1177 unsigned stipple[32];
1180 for (i = 0; i < 32; i++)
1181 stipple[i] = util_bitreverse(state->stipple[i]);
1183 cb.user_buffer = stipple;
1184 cb.buffer_size = sizeof(stipple);
1186 si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE, &cb);
1189 /* TEXTURE METADATA ENABLE/DISABLE */
1191 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1192 * while the texture is bound, possibly by a different context. In that case,
1193 * call this function to update compressed_colortex_masks.
1195 void si_update_compressed_colortex_masks(struct si_context *sctx)
1197 for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1198 si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
1199 si_images_update_compressed_colortex_mask(&sctx->images[i]);
1203 /* BUFFER DISCARD/INVALIDATION */
1205 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1206 static void si_reset_buffer_resources(struct si_context *sctx,
1207 struct si_buffer_resources *buffers,
1208 struct pipe_resource *buf,
1211 unsigned mask = buffers->desc.enabled_mask;
1214 unsigned i = u_bit_scan(&mask);
1215 if (buffers->buffers[i] == buf) {
1216 si_desc_reset_buffer_offset(&sctx->b.b,
1217 buffers->desc.list + i*4,
1219 buffers->desc.dirty_mask |= 1u << i;
1221 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1222 (struct r600_resource *)buf,
1223 buffers->shader_usage,
1229 /* Reallocate a buffer a update all resource bindings where the buffer is
1232 * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1233 * idle by discarding its contents. Apps usually tell us when to do this using
1234 * map_buffer flags, for example.
1236 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
1238 struct si_context *sctx = (struct si_context*)ctx;
1239 struct r600_resource *rbuffer = r600_resource(buf);
1240 unsigned i, shader, alignment = rbuffer->buf->alignment;
1241 uint64_t old_va = rbuffer->gpu_address;
1242 unsigned num_elems = sctx->vertex_elements ?
1243 sctx->vertex_elements->count : 0;
1244 struct si_sampler_view *view;
1246 /* Reallocate the buffer in the same pipe_resource. */
1247 r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
1250 /* We changed the buffer, now we need to bind it where the old one
1251 * was bound. This consists of 2 things:
1252 * 1) Updating the resource descriptor and dirtying it.
1253 * 2) Adding a relocation to the CS, so that it's usable.
1256 /* Vertex buffers. */
1257 for (i = 0; i < num_elems; i++) {
1258 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
1260 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1262 if (!sctx->vertex_buffer[vb].buffer)
1265 if (sctx->vertex_buffer[vb].buffer == buf) {
1266 sctx->vertex_buffers_dirty = true;
1271 /* Streamout buffers. (other internal buffers can't be invalidated) */
1272 for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1273 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1275 if (buffers->buffers[i] != buf)
1278 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
1280 buffers->desc.dirty_mask |= 1u << i;
1282 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1283 rbuffer, buffers->shader_usage,
1286 /* Update the streamout state. */
1287 if (sctx->b.streamout.begin_emitted)
1288 r600_emit_streamout_end(&sctx->b);
1289 sctx->b.streamout.append_bitmask =
1290 sctx->b.streamout.enabled_mask;
1291 r600_streamout_buffers_dirty(&sctx->b);
1294 /* Constant and shader buffers. */
1295 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1296 si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
1298 si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
1302 /* Texture buffers - update virtual addresses in sampler view descriptors. */
1303 LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
1304 if (view->base.texture == buf) {
1305 si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
1308 /* Texture buffers - update bindings. */
1309 for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1310 struct si_sampler_views *views = &sctx->samplers[shader].views;
1311 unsigned mask = views->desc.enabled_mask;
1314 unsigned i = u_bit_scan(&mask);
1315 if (views->views[i]->texture == buf) {
1316 si_desc_reset_buffer_offset(ctx,
1320 views->desc.dirty_mask |= 1u << i;
1322 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1323 rbuffer, RADEON_USAGE_READ,
1324 RADEON_PRIO_SAMPLER_BUFFER);
1330 for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1331 struct si_images_info *images = &sctx->images[shader];
1332 unsigned mask = images->desc.enabled_mask;
1335 unsigned i = u_bit_scan(&mask);
1337 if (images->views[i].resource == buf) {
1338 if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
1339 si_mark_image_range_valid(&images->views[i]);
1341 si_desc_reset_buffer_offset(
1342 ctx, images->desc.list + i * 8 + 4,
1344 images->desc.dirty_mask |= 1u << i;
1346 radeon_add_to_buffer_list(
1347 &sctx->b, &sctx->b.gfx, rbuffer,
1348 RADEON_USAGE_READWRITE,
1349 RADEON_PRIO_SAMPLER_BUFFER);
1355 /* SHADER USER DATA */
1357 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
1360 sctx->const_buffers[shader].desc.pointer_dirty = true;
1361 sctx->shader_buffers[shader].desc.pointer_dirty = true;
1362 sctx->samplers[shader].views.desc.pointer_dirty = true;
1363 sctx->images[shader].desc.pointer_dirty = true;
1365 if (shader == PIPE_SHADER_VERTEX)
1366 sctx->vertex_buffers.pointer_dirty = true;
1368 si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1371 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
1375 for (i = 0; i < SI_NUM_SHADERS; i++) {
1376 si_mark_shader_pointers_dirty(sctx, i);
1378 sctx->rw_buffers.desc.pointer_dirty = true;
1381 /* Set a base register address for user data constants in the given shader.
1382 * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
1384 static void si_set_user_data_base(struct si_context *sctx,
1385 unsigned shader, uint32_t new_base)
1387 uint32_t *base = &sctx->shader_userdata.sh_base[shader];
1389 if (*base != new_base) {
1393 si_mark_shader_pointers_dirty(sctx, shader);
1397 /* This must be called when these shaders are changed from non-NULL to NULL
1400 * - tessellation control shader
1401 * - tessellation evaluation shader
1403 void si_shader_change_notify(struct si_context *sctx)
1405 /* VS can be bound as VS, ES, or LS. */
1406 if (sctx->tes_shader.cso)
1407 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1408 R_00B530_SPI_SHADER_USER_DATA_LS_0);
1409 else if (sctx->gs_shader.cso)
1410 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1411 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1413 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1414 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1416 /* TES can be bound as ES, VS, or not bound. */
1417 if (sctx->tes_shader.cso) {
1418 if (sctx->gs_shader.cso)
1419 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1420 R_00B330_SPI_SHADER_USER_DATA_ES_0);
1422 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1423 R_00B130_SPI_SHADER_USER_DATA_VS_0);
1425 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
1429 static void si_emit_shader_pointer(struct si_context *sctx,
1430 struct si_descriptors *desc,
1431 unsigned sh_base, bool keep_dirty)
1433 struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1436 if (!desc->pointer_dirty || !desc->buffer)
1439 va = desc->buffer->gpu_address +
1440 desc->buffer_offset;
1442 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
1443 radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
1444 radeon_emit(cs, va);
1445 radeon_emit(cs, va >> 32);
1447 desc->pointer_dirty = keep_dirty;
1450 void si_emit_graphics_shader_userdata(struct si_context *sctx,
1451 struct r600_atom *atom)
1454 uint32_t *sh_base = sctx->shader_userdata.sh_base;
1456 if (sctx->rw_buffers.desc.pointer_dirty) {
1457 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1458 R_00B030_SPI_SHADER_USER_DATA_PS_0, true);
1459 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1460 R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1461 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1462 R_00B230_SPI_SHADER_USER_DATA_GS_0, true);
1463 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1464 R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
1465 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1466 R_00B430_SPI_SHADER_USER_DATA_HS_0, true);
1467 sctx->rw_buffers.desc.pointer_dirty = false;
1470 for (i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
1471 unsigned base = sh_base[i];
1476 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
1477 si_emit_shader_pointer(sctx, &sctx->shader_buffers[i].desc, base, false);
1478 si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
1479 si_emit_shader_pointer(sctx, &sctx->images[i].desc, base, false);
1481 si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1484 void si_emit_compute_shader_userdata(struct si_context *sctx)
1486 unsigned base = R_00B900_COMPUTE_USER_DATA_0;
1488 si_emit_shader_pointer(sctx, &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc,
1490 si_emit_shader_pointer(sctx, &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc,
1492 si_emit_shader_pointer(sctx, &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc,
1494 si_emit_shader_pointer(sctx, &sctx->images[PIPE_SHADER_COMPUTE].desc,
1498 /* INIT/DEINIT/UPLOAD */
1500 void si_init_all_descriptors(struct si_context *sctx)
1503 unsigned ce_offset = 0;
1505 for (i = 0; i < SI_NUM_SHADERS; i++) {
1506 si_init_buffer_resources(&sctx->const_buffers[i],
1507 SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1508 RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
1510 si_init_buffer_resources(&sctx->shader_buffers[i],
1511 SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
1512 RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
1515 si_init_descriptors(&sctx->samplers[i].views.desc,
1516 SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
1517 null_texture_descriptor, &ce_offset);
1519 si_init_descriptors(&sctx->images[i].desc,
1520 SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
1521 null_image_descriptor, &ce_offset);
1524 si_init_buffer_resources(&sctx->rw_buffers,
1525 SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1526 RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT,
1528 si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1529 4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
1531 assert(ce_offset <= 32768);
1533 /* Set pipe_context functions. */
1534 sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1535 sctx->b.b.set_shader_images = si_set_shader_images;
1536 sctx->b.b.set_constant_buffer = si_pipe_set_constant_buffer;
1537 sctx->b.b.set_polygon_stipple = si_set_polygon_stipple;
1538 sctx->b.b.set_shader_buffers = si_set_shader_buffers;
1539 sctx->b.b.set_sampler_views = si_set_sampler_views;
1540 sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1541 sctx->b.invalidate_buffer = si_invalidate_buffer;
1543 /* Shader user data. */
1544 si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1545 si_emit_graphics_shader_userdata);
1547 /* Set default and immutable mappings. */
1548 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1549 si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1550 si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1551 si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1554 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
1558 for (i = 0; i < SI_NUM_SHADERS; i++) {
1559 if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc,
1560 &sctx->shader_userdata.atom) ||
1561 !si_upload_descriptors(sctx, &sctx->shader_buffers[i].desc,
1562 &sctx->shader_userdata.atom) ||
1563 !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc,
1564 &sctx->shader_userdata.atom) ||
1565 !si_upload_descriptors(sctx, &sctx->images[i].desc,
1566 &sctx->shader_userdata.atom))
1569 return si_upload_descriptors(sctx, &sctx->rw_buffers.desc,
1570 &sctx->shader_userdata.atom) &&
1571 si_upload_vertex_buffer_descriptors(sctx);
1574 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
1576 /* Does not update rw_buffers as that is not needed for compute shaders
1577 * and the input buffer is using the same SGPR's anyway.
1579 return si_upload_descriptors(sctx,
1580 &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
1581 si_upload_descriptors(sctx,
1582 &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
1583 si_upload_descriptors(sctx,
1584 &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc, NULL) &&
1585 si_upload_descriptors(sctx,
1586 &sctx->images[PIPE_SHADER_COMPUTE].desc, NULL);
1589 void si_release_all_descriptors(struct si_context *sctx)
1593 for (i = 0; i < SI_NUM_SHADERS; i++) {
1594 si_release_buffer_resources(&sctx->const_buffers[i]);
1595 si_release_buffer_resources(&sctx->shader_buffers[i]);
1596 si_release_sampler_views(&sctx->samplers[i].views);
1597 si_release_image_views(&sctx->images[i]);
1599 si_release_buffer_resources(&sctx->rw_buffers);
1600 si_release_descriptors(&sctx->vertex_buffers);
1603 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1607 for (i = 0; i < SI_NUM_SHADERS; i++) {
1608 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1609 si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
1610 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1611 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
1613 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
1614 si_vertex_buffers_begin_new_cs(sctx);
1615 si_shader_userdata_begin_new_cs(sctx);