OSDN Git Service

Merge remote-tracking branch 'public/master' into vulkan
[android-x86/external-mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Marek Olšák <marek.olsak@amd.com>
25  */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28  * 4 dwords) are stored in lists in memory which is accessed by shaders
29  * using scalar load instructions.
30  *
31  * This file is responsible for managing such lists. It keeps a copy of all
32  * descriptors in CPU memory and re-uploads a whole list if some slots have
33  * been changed.
34  *
35  * This code is also reponsible for updating shader pointers to those lists.
36  *
37  * Note that CP DMA can't be used for updating the lists, because a GPU hang
38  * could leave the list in a mid-IB state and the next IB would get wrong
39  * descriptors and the whole context would be unusable at that point.
40  * (Note: The register shadowing can't be used due to the same reason)
41  *
42  * Also, uploading descriptors to newly allocated memory doesn't require
43  * a KCACHE flush.
44  *
45  *
46  * Possible scenarios for one 16 dword image+sampler slot:
47  *
48  *       | Image        | w/ FMASK   | Buffer       | NULL
49  * [ 0: 3] Image[0:3]   | Image[0:3] | Null[0:3]    | Null[0:3]
50  * [ 4: 7] Image[4:7]   | Image[4:7] | Buffer[0:3]  | 0
51  * [ 8:11] Null[0:3]    | Fmask[0:3] | Null[0:3]    | Null[0:3]
52  * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
53  *
54  * FMASK implies MSAA, therefore no sampler state.
55  * Sampler states are never unbound except when FMASK is bound.
56  */
57
58 #include "radeon/r600_cs.h"
59 #include "si_pipe.h"
60 #include "si_shader.h"
61 #include "sid.h"
62
63 #include "util/u_memory.h"
64 #include "util/u_upload_mgr.h"
65
66
67 /* NULL image and buffer descriptor for textures (alpha = 1) and images
68  * (alpha = 0).
69  *
70  * For images, all fields must be zero except for the swizzle, which
71  * supports arbitrary combinations of 0s and 1s. The texture type must be
72  * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
73  *
74  * For buffers, all fields must be zero. If they are not, the hw hangs.
75  *
76  * This is the only reason why the buffer descriptor must be in words [4:7].
77  */
78 static uint32_t null_texture_descriptor[8] = {
79         0,
80         0,
81         0,
82         S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
83         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
84         /* the rest must contain zeros, which is also used by the buffer
85          * descriptor */
86 };
87
88 static uint32_t null_image_descriptor[8] = {
89         0,
90         0,
91         0,
92         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
93         /* the rest must contain zeros, which is also used by the buffer
94          * descriptor */
95 };
96
97 static void si_init_descriptors(struct si_descriptors *desc,
98                                 unsigned shader_userdata_index,
99                                 unsigned element_dw_size,
100                                 unsigned num_elements,
101                                 const uint32_t *null_descriptor)
102 {
103         int i;
104
105         assert(num_elements <= sizeof(desc->enabled_mask)*8);
106
107         desc->list = CALLOC(num_elements, element_dw_size * 4);
108         desc->element_dw_size = element_dw_size;
109         desc->num_elements = num_elements;
110         desc->list_dirty = true; /* upload the list before the next draw */
111         desc->shader_userdata_offset = shader_userdata_index * 4;
112
113         /* Initialize the array to NULL descriptors if the element size is 8. */
114         if (null_descriptor) {
115                 assert(element_dw_size % 8 == 0);
116                 for (i = 0; i < num_elements * element_dw_size / 8; i++)
117                         memcpy(desc->list + i * 8, null_descriptor,
118                                8 * 4);
119         }
120 }
121
122 static void si_release_descriptors(struct si_descriptors *desc)
123 {
124         pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
125         FREE(desc->list);
126 }
127
128 static bool si_upload_descriptors(struct si_context *sctx,
129                                   struct si_descriptors *desc)
130 {
131         unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
132         void *ptr;
133
134         if (!desc->list_dirty)
135                 return true;
136
137         u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
138                        &desc->buffer_offset,
139                        (struct pipe_resource**)&desc->buffer, &ptr);
140         if (!desc->buffer)
141                 return false; /* skip the draw call */
142
143         util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
144
145         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
146                               RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
147
148         desc->list_dirty = false;
149         desc->pointer_dirty = true;
150         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
151         return true;
152 }
153
154 /* SAMPLER VIEWS */
155
156 static void si_release_sampler_views(struct si_sampler_views *views)
157 {
158         int i;
159
160         for (i = 0; i < Elements(views->views); i++) {
161                 pipe_sampler_view_reference(&views->views[i], NULL);
162         }
163         si_release_descriptors(&views->desc);
164 }
165
166 static void si_sampler_view_add_buffer(struct si_context *sctx,
167                                        struct pipe_resource *resource)
168 {
169         struct r600_resource *rres = (struct r600_resource*)resource;
170
171         if (!resource)
172                 return;
173
174         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres,
175                                   RADEON_USAGE_READ,
176                                   r600_get_sampler_view_priority(rres));
177 }
178
179 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
180                                           struct si_sampler_views *views)
181 {
182         uint64_t mask = views->desc.enabled_mask;
183
184         /* Add buffers to the CS. */
185         while (mask) {
186                 int i = u_bit_scan64(&mask);
187
188                 si_sampler_view_add_buffer(sctx, views->views[i]->texture);
189         }
190
191         if (!views->desc.buffer)
192                 return;
193         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
194                               RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
195 }
196
197 static void si_set_sampler_view(struct si_context *sctx,
198                                 struct si_sampler_views *views,
199                                 unsigned slot, struct pipe_sampler_view *view)
200 {
201         struct si_sampler_view *rview = (struct si_sampler_view*)view;
202
203         if (view && view->texture && view->texture->target != PIPE_BUFFER &&
204             G_008F28_COMPRESSION_EN(rview->state[6]) &&
205             ((struct r600_texture*)view->texture)->dcc_offset == 0) {
206                 rview->state[6] &= C_008F28_COMPRESSION_EN &
207                                    C_008F28_ALPHA_IS_ON_MSB;
208         } else if (views->views[slot] == view)
209                 return;
210
211         if (view) {
212                 struct r600_texture *rtex = (struct r600_texture *)view->texture;
213
214                 si_sampler_view_add_buffer(sctx, view->texture);
215
216                 pipe_sampler_view_reference(&views->views[slot], view);
217                 memcpy(views->desc.list + slot * 16, rview->state, 8*4);
218
219                 if (view->texture && view->texture->target != PIPE_BUFFER &&
220                     rtex->fmask.size) {
221                         memcpy(views->desc.list + slot*16 + 8,
222                                rview->fmask_state, 8*4);
223                 } else {
224                         /* Disable FMASK and bind sampler state in [12:15]. */
225                         memcpy(views->desc.list + slot*16 + 8,
226                                null_texture_descriptor, 4*4);
227
228                         if (views->sampler_states[slot])
229                                 memcpy(views->desc.list + slot*16 + 12,
230                                        views->sampler_states[slot], 4*4);
231                 }
232
233                 views->desc.enabled_mask |= 1llu << slot;
234         } else {
235                 pipe_sampler_view_reference(&views->views[slot], NULL);
236                 memcpy(views->desc.list + slot*16, null_texture_descriptor, 8*4);
237                 /* Only clear the lower dwords of FMASK. */
238                 memcpy(views->desc.list + slot*16 + 8, null_texture_descriptor, 4*4);
239                 views->desc.enabled_mask &= ~(1llu << slot);
240         }
241
242         views->desc.list_dirty = true;
243 }
244
245 static bool is_compressed_colortex(struct r600_texture *rtex)
246 {
247         return rtex->cmask.size || rtex->fmask.size ||
248                (rtex->dcc_offset && rtex->dirty_level_mask);
249 }
250
251 static void si_set_sampler_views(struct pipe_context *ctx,
252                                  unsigned shader, unsigned start,
253                                  unsigned count,
254                                  struct pipe_sampler_view **views)
255 {
256         struct si_context *sctx = (struct si_context *)ctx;
257         struct si_textures_info *samplers = &sctx->samplers[shader];
258         int i;
259
260         if (!count || shader >= SI_NUM_SHADERS)
261                 return;
262
263         for (i = 0; i < count; i++) {
264                 unsigned slot = start + i;
265
266                 if (!views || !views[i]) {
267                         samplers->depth_texture_mask &= ~(1 << slot);
268                         samplers->compressed_colortex_mask &= ~(1 << slot);
269                         si_set_sampler_view(sctx, &samplers->views, slot, NULL);
270                         continue;
271                 }
272
273                 si_set_sampler_view(sctx, &samplers->views, slot, views[i]);
274
275                 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
276                         struct r600_texture *rtex =
277                                 (struct r600_texture*)views[i]->texture;
278
279                         if (rtex->is_depth && !rtex->is_flushing_texture) {
280                                 samplers->depth_texture_mask |= 1 << slot;
281                         } else {
282                                 samplers->depth_texture_mask &= ~(1 << slot);
283                         }
284                         if (is_compressed_colortex(rtex)) {
285                                 samplers->compressed_colortex_mask |= 1 << slot;
286                         } else {
287                                 samplers->compressed_colortex_mask &= ~(1 << slot);
288                         }
289                 } else {
290                         samplers->depth_texture_mask &= ~(1 << slot);
291                         samplers->compressed_colortex_mask &= ~(1 << slot);
292                 }
293         }
294 }
295
296 static void
297 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
298 {
299         uint64_t mask = samplers->views.desc.enabled_mask;
300
301         while (mask) {
302                 int i = u_bit_scan64(&mask);
303                 struct pipe_resource *res = samplers->views.views[i]->texture;
304
305                 if (res && res->target != PIPE_BUFFER) {
306                         struct r600_texture *rtex = (struct r600_texture *)res;
307
308                         if (is_compressed_colortex(rtex)) {
309                                 samplers->compressed_colortex_mask |= 1 << i;
310                         } else {
311                                 samplers->compressed_colortex_mask &= ~(1 << i);
312                         }
313                 }
314         }
315 }
316
317 /* IMAGE VIEWS */
318
319 static void
320 si_release_image_views(struct si_images_info *images)
321 {
322         unsigned i;
323
324         for (i = 0; i < SI_NUM_IMAGES; ++i) {
325                 struct pipe_image_view *view = &images->views[i];
326
327                 pipe_resource_reference(&view->resource, NULL);
328         }
329
330         si_release_descriptors(&images->desc);
331 }
332
333 static void
334 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
335 {
336         uint mask = images->desc.enabled_mask;
337
338         /* Add buffers to the CS. */
339         while (mask) {
340                 int i = u_bit_scan(&mask);
341                 struct pipe_image_view *view = &images->views[i];
342
343                 assert(view->resource);
344
345                 si_sampler_view_add_buffer(sctx, view->resource);
346         }
347
348         if (images->desc.buffer) {
349                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
350                                           images->desc.buffer,
351                                           RADEON_USAGE_READ,
352                                           RADEON_PRIO_DESCRIPTORS);
353         }
354 }
355
356 static void
357 si_disable_shader_image(struct si_images_info *images, unsigned slot)
358 {
359         if (images->desc.enabled_mask & (1llu << slot)) {
360                 pipe_resource_reference(&images->views[slot].resource, NULL);
361                 images->compressed_colortex_mask &= ~(1 << slot);
362
363                 memcpy(images->desc.list + slot*8, null_image_descriptor, 8*4);
364                 images->desc.enabled_mask &= ~(1llu << slot);
365                 images->desc.list_dirty = true;
366         }
367 }
368
369 static void
370 si_set_shader_images(struct pipe_context *pipe, unsigned shader,
371                      unsigned start_slot, unsigned count,
372                      struct pipe_image_view *views)
373 {
374         struct si_context *ctx = (struct si_context *)pipe;
375         struct si_screen *screen = ctx->screen;
376         struct si_images_info *images = &ctx->images[shader];
377         unsigned i, slot;
378
379         assert(shader < SI_NUM_SHADERS);
380
381         if (!count)
382                 return;
383
384         assert(start_slot + count <= SI_NUM_IMAGES);
385
386         for (i = 0, slot = start_slot; i < count; ++i, ++slot) {
387                 struct r600_resource *res;
388
389                 if (!views || !views[i].resource) {
390                         si_disable_shader_image(images, slot);
391                         continue;
392                 }
393
394                 res = (struct r600_resource *)views[i].resource;
395                 util_copy_image_view(&images->views[slot], &views[i]);
396
397                 si_sampler_view_add_buffer(ctx, &res->b.b);
398
399                 if (res->b.b.target == PIPE_BUFFER) {
400                         si_make_buffer_descriptor(screen, res,
401                                                   views[i].format,
402                                                   views[i].u.buf.first_element,
403                                                   views[i].u.buf.last_element,
404                                                   images->desc.list + slot * 8);
405                         images->compressed_colortex_mask &= ~(1 << slot);
406                 } else {
407                         static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
408                         struct r600_texture *tex = (struct r600_texture *)res;
409                         unsigned level;
410                         unsigned width, height, depth;
411
412                         assert(!tex->is_depth);
413                         assert(tex->fmask.size == 0);
414
415                         if (tex->dcc_offset &&
416                             views[i].access & PIPE_IMAGE_ACCESS_WRITE)
417                                 r600_texture_disable_dcc(&screen->b, tex);
418
419                         if (is_compressed_colortex(tex)) {
420                                 images->compressed_colortex_mask |= 1 << slot;
421                         } else {
422                                 images->compressed_colortex_mask &= ~(1 << slot);
423                         }
424
425                         /* Always force the base level to the selected level.
426                          *
427                          * This is required for 3D textures, where otherwise
428                          * selecting a single slice for non-layered bindings
429                          * fails. It doesn't hurt the other targets.
430                          */
431                         level = views[i].u.tex.level;
432                         width = u_minify(res->b.b.width0, level);
433                         height = u_minify(res->b.b.height0, level);
434                         depth = u_minify(res->b.b.depth0, level);
435
436                         si_make_texture_descriptor(screen, tex, false, res->b.b.target,
437                                                    views[i].format, swizzle,
438                                                    level, 0, 0,
439                                                    views[i].u.tex.first_layer, views[i].u.tex.last_layer,
440                                                    width, height, depth,
441                                                    images->desc.list + slot * 8,
442                                                    NULL);
443                 }
444
445                 images->desc.enabled_mask |= 1llu << slot;
446                 images->desc.list_dirty = true;
447         }
448 }
449
450 static void
451 si_images_update_compressed_colortex_mask(struct si_images_info *images)
452 {
453         uint64_t mask = images->desc.enabled_mask;
454
455         while (mask) {
456                 int i = u_bit_scan64(&mask);
457                 struct pipe_resource *res = images->views[i].resource;
458
459                 if (res && res->target != PIPE_BUFFER) {
460                         struct r600_texture *rtex = (struct r600_texture *)res;
461
462                         if (is_compressed_colortex(rtex)) {
463                                 images->compressed_colortex_mask |= 1 << i;
464                         } else {
465                                 images->compressed_colortex_mask &= ~(1 << i);
466                         }
467                 }
468         }
469 }
470
471 /* SAMPLER STATES */
472
473 static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
474                                    unsigned start, unsigned count, void **states)
475 {
476         struct si_context *sctx = (struct si_context *)ctx;
477         struct si_textures_info *samplers = &sctx->samplers[shader];
478         struct si_descriptors *desc = &samplers->views.desc;
479         struct si_sampler_state **sstates = (struct si_sampler_state**)states;
480         int i;
481
482         if (!count || shader >= SI_NUM_SHADERS)
483                 return;
484
485         for (i = 0; i < count; i++) {
486                 unsigned slot = start + i;
487
488                 if (!sstates[i] ||
489                     sstates[i] == samplers->views.sampler_states[slot])
490                         continue;
491
492                 samplers->views.sampler_states[slot] = sstates[i];
493
494                 /* If FMASK is bound, don't overwrite it.
495                  * The sampler state will be set after FMASK is unbound.
496                  */
497                 if (samplers->views.views[i] &&
498                     samplers->views.views[i]->texture &&
499                     samplers->views.views[i]->texture->target != PIPE_BUFFER &&
500                     ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
501                         continue;
502
503                 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
504                 desc->list_dirty = true;
505         }
506 }
507
508 /* BUFFER RESOURCES */
509
510 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
511                                      unsigned num_buffers,
512                                      unsigned shader_userdata_index,
513                                      enum radeon_bo_usage shader_usage,
514                                      enum radeon_bo_priority priority)
515 {
516         buffers->shader_usage = shader_usage;
517         buffers->priority = priority;
518         buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
519
520         si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
521                             num_buffers, NULL);
522 }
523
524 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
525 {
526         int i;
527
528         for (i = 0; i < buffers->desc.num_elements; i++) {
529                 pipe_resource_reference(&buffers->buffers[i], NULL);
530         }
531
532         FREE(buffers->buffers);
533         si_release_descriptors(&buffers->desc);
534 }
535
536 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
537                                              struct si_buffer_resources *buffers)
538 {
539         uint64_t mask = buffers->desc.enabled_mask;
540
541         /* Add buffers to the CS. */
542         while (mask) {
543                 int i = u_bit_scan64(&mask);
544
545                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
546                                       (struct r600_resource*)buffers->buffers[i],
547                                       buffers->shader_usage, buffers->priority);
548         }
549
550         if (!buffers->desc.buffer)
551                 return;
552         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
553                               buffers->desc.buffer, RADEON_USAGE_READWRITE,
554                               RADEON_PRIO_DESCRIPTORS);
555 }
556
557 /* VERTEX BUFFERS */
558
559 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
560 {
561         struct si_descriptors *desc = &sctx->vertex_buffers;
562         int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
563         int i;
564
565         for (i = 0; i < count; i++) {
566                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
567
568                 if (vb >= Elements(sctx->vertex_buffer))
569                         continue;
570                 if (!sctx->vertex_buffer[vb].buffer)
571                         continue;
572
573                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
574                                       (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
575                                       RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
576         }
577
578         if (!desc->buffer)
579                 return;
580         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
581                               desc->buffer, RADEON_USAGE_READ,
582                               RADEON_PRIO_DESCRIPTORS);
583 }
584
585 static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
586 {
587         struct si_descriptors *desc = &sctx->vertex_buffers;
588         bool bound[SI_NUM_VERTEX_BUFFERS] = {};
589         unsigned i, count = sctx->vertex_elements->count;
590         uint64_t va;
591         uint32_t *ptr;
592
593         if (!sctx->vertex_buffers_dirty)
594                 return true;
595         if (!count || !sctx->vertex_elements)
596                 return true;
597
598         /* Vertex buffer descriptors are the only ones which are uploaded
599          * directly through a staging buffer and don't go through
600          * the fine-grained upload path.
601          */
602         u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
603                        (struct pipe_resource**)&desc->buffer, (void**)&ptr);
604         if (!desc->buffer)
605                 return false;
606
607         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
608                               desc->buffer, RADEON_USAGE_READ,
609                               RADEON_PRIO_DESCRIPTORS);
610
611         assert(count <= SI_NUM_VERTEX_BUFFERS);
612
613         for (i = 0; i < count; i++) {
614                 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
615                 struct pipe_vertex_buffer *vb;
616                 struct r600_resource *rbuffer;
617                 unsigned offset;
618                 uint32_t *desc = &ptr[i*4];
619
620                 if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
621                         memset(desc, 0, 16);
622                         continue;
623                 }
624
625                 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
626                 rbuffer = (struct r600_resource*)vb->buffer;
627                 if (!rbuffer) {
628                         memset(desc, 0, 16);
629                         continue;
630                 }
631
632                 offset = vb->buffer_offset + ve->src_offset;
633                 va = rbuffer->gpu_address + offset;
634
635                 /* Fill in T# buffer resource description */
636                 desc[0] = va;
637                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
638                           S_008F04_STRIDE(vb->stride);
639
640                 if (sctx->b.chip_class <= CIK && vb->stride)
641                         /* Round up by rounding down and adding 1 */
642                         desc[2] = (vb->buffer->width0 - offset -
643                                    sctx->vertex_elements->format_size[i]) /
644                                   vb->stride + 1;
645                 else
646                         desc[2] = vb->buffer->width0 - offset;
647
648                 desc[3] = sctx->vertex_elements->rsrc_word3[i];
649
650                 if (!bound[ve->vertex_buffer_index]) {
651                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
652                                               (struct r600_resource*)vb->buffer,
653                                               RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
654                         bound[ve->vertex_buffer_index] = true;
655                 }
656         }
657
658         /* Don't flush the const cache. It would have a very negative effect
659          * on performance (confirmed by testing). New descriptors are always
660          * uploaded to a fresh new buffer, so I don't think flushing the const
661          * cache is needed. */
662         desc->pointer_dirty = true;
663         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
664         sctx->vertex_buffers_dirty = false;
665         return true;
666 }
667
668
669 /* CONSTANT BUFFERS */
670
671 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
672                             const uint8_t *ptr, unsigned size, uint32_t *const_offset)
673 {
674         void *tmp;
675
676         u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
677                        (struct pipe_resource**)rbuffer, &tmp);
678         if (rbuffer)
679                 util_memcpy_cpu_to_le32(tmp, ptr, size);
680 }
681
682 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
683                                    struct pipe_constant_buffer *input)
684 {
685         struct si_context *sctx = (struct si_context *)ctx;
686         struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
687
688         if (shader >= SI_NUM_SHADERS)
689                 return;
690
691         assert(slot < buffers->desc.num_elements);
692         pipe_resource_reference(&buffers->buffers[slot], NULL);
693
694         /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
695          * with a NULL buffer). We need to use a dummy buffer instead. */
696         if (sctx->b.chip_class == CIK &&
697             (!input || (!input->buffer && !input->user_buffer)))
698                 input = &sctx->null_const_buf;
699
700         if (input && (input->buffer || input->user_buffer)) {
701                 struct pipe_resource *buffer = NULL;
702                 uint64_t va;
703
704                 /* Upload the user buffer if needed. */
705                 if (input->user_buffer) {
706                         unsigned buffer_offset;
707
708                         si_upload_const_buffer(sctx,
709                                                (struct r600_resource**)&buffer, input->user_buffer,
710                                                input->buffer_size, &buffer_offset);
711                         if (!buffer) {
712                                 /* Just unbind on failure. */
713                                 si_set_constant_buffer(ctx, shader, slot, NULL);
714                                 return;
715                         }
716                         va = r600_resource(buffer)->gpu_address + buffer_offset;
717                 } else {
718                         pipe_resource_reference(&buffer, input->buffer);
719                         va = r600_resource(buffer)->gpu_address + input->buffer_offset;
720                 }
721
722                 /* Set the descriptor. */
723                 uint32_t *desc = buffers->desc.list + slot*4;
724                 desc[0] = va;
725                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
726                           S_008F04_STRIDE(0);
727                 desc[2] = input->buffer_size;
728                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
729                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
730                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
731                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
732                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
733                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
734
735                 buffers->buffers[slot] = buffer;
736                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
737                                       (struct r600_resource*)buffer,
738                                       buffers->shader_usage, buffers->priority);
739                 buffers->desc.enabled_mask |= 1llu << slot;
740         } else {
741                 /* Clear the descriptor. */
742                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
743                 buffers->desc.enabled_mask &= ~(1llu << slot);
744         }
745
746         buffers->desc.list_dirty = true;
747 }
748
749 /* RING BUFFERS */
750
751 void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
752                         struct pipe_resource *buffer,
753                         unsigned stride, unsigned num_records,
754                         bool add_tid, bool swizzle,
755                         unsigned element_size, unsigned index_stride, uint64_t offset)
756 {
757         struct si_context *sctx = (struct si_context *)ctx;
758         struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
759
760         if (shader >= SI_NUM_SHADERS)
761                 return;
762
763         /* The stride field in the resource descriptor has 14 bits */
764         assert(stride < (1 << 14));
765
766         assert(slot < buffers->desc.num_elements);
767         pipe_resource_reference(&buffers->buffers[slot], NULL);
768
769         if (buffer) {
770                 uint64_t va;
771
772                 va = r600_resource(buffer)->gpu_address + offset;
773
774                 switch (element_size) {
775                 default:
776                         assert(!"Unsupported ring buffer element size");
777                 case 0:
778                 case 2:
779                         element_size = 0;
780                         break;
781                 case 4:
782                         element_size = 1;
783                         break;
784                 case 8:
785                         element_size = 2;
786                         break;
787                 case 16:
788                         element_size = 3;
789                         break;
790                 }
791
792                 switch (index_stride) {
793                 default:
794                         assert(!"Unsupported ring buffer index stride");
795                 case 0:
796                 case 8:
797                         index_stride = 0;
798                         break;
799                 case 16:
800                         index_stride = 1;
801                         break;
802                 case 32:
803                         index_stride = 2;
804                         break;
805                 case 64:
806                         index_stride = 3;
807                         break;
808                 }
809
810                 if (sctx->b.chip_class >= VI && stride)
811                         num_records *= stride;
812
813                 /* Set the descriptor. */
814                 uint32_t *desc = buffers->desc.list + slot*4;
815                 desc[0] = va;
816                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
817                           S_008F04_STRIDE(stride) |
818                           S_008F04_SWIZZLE_ENABLE(swizzle);
819                 desc[2] = num_records;
820                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
821                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
822                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
823                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
824                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
825                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
826                           S_008F0C_ELEMENT_SIZE(element_size) |
827                           S_008F0C_INDEX_STRIDE(index_stride) |
828                           S_008F0C_ADD_TID_ENABLE(add_tid);
829
830                 pipe_resource_reference(&buffers->buffers[slot], buffer);
831                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
832                                       (struct r600_resource*)buffer,
833                                       buffers->shader_usage, buffers->priority);
834                 buffers->desc.enabled_mask |= 1llu << slot;
835         } else {
836                 /* Clear the descriptor. */
837                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
838                 buffers->desc.enabled_mask &= ~(1llu << slot);
839         }
840
841         buffers->desc.list_dirty = true;
842 }
843
844 /* STREAMOUT BUFFERS */
845
846 static void si_set_streamout_targets(struct pipe_context *ctx,
847                                      unsigned num_targets,
848                                      struct pipe_stream_output_target **targets,
849                                      const unsigned *offsets)
850 {
851         struct si_context *sctx = (struct si_context *)ctx;
852         struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
853         unsigned old_num_targets = sctx->b.streamout.num_targets;
854         unsigned i, bufidx;
855
856         /* We are going to unbind the buffers. Mark which caches need to be flushed. */
857         if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
858                 /* Since streamout uses vector writes which go through TC L2
859                  * and most other clients can use TC L2 as well, we don't need
860                  * to flush it.
861                  *
862                  * The only case which requires flushing it is VGT DMA index
863                  * fetching, which is a rare case. Thus, flag the TC L2
864                  * dirtiness in the resource and handle it when index fetching
865                  * is used.
866                  */
867                 for (i = 0; i < sctx->b.streamout.num_targets; i++)
868                         if (sctx->b.streamout.targets[i])
869                                 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
870
871                 /* Invalidate the scalar cache in case a streamout buffer is
872                  * going to be used as a constant buffer.
873                  *
874                  * Invalidate TC L1, because streamout bypasses it (done by
875                  * setting GLC=1 in the store instruction), but it can contain
876                  * outdated data of streamout buffers.
877                  *
878                  * VS_PARTIAL_FLUSH is required if the buffers are going to be
879                  * used as an input immediately.
880                  */
881                 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
882                                  SI_CONTEXT_INV_VMEM_L1 |
883                                  SI_CONTEXT_VS_PARTIAL_FLUSH;
884         }
885
886         /* Streamout buffers must be bound in 2 places:
887          * 1) in VGT by setting the VGT_STRMOUT registers
888          * 2) as shader resources
889          */
890
891         /* Set the VGT regs. */
892         r600_set_streamout_targets(ctx, num_targets, targets, offsets);
893
894         /* Set the shader resources.*/
895         for (i = 0; i < num_targets; i++) {
896                 bufidx = SI_SO_BUF_OFFSET + i;
897
898                 if (targets[i]) {
899                         struct pipe_resource *buffer = targets[i]->buffer;
900                         uint64_t va = r600_resource(buffer)->gpu_address;
901
902                         /* Set the descriptor.
903                          *
904                          * On VI, the format must be non-INVALID, otherwise
905                          * the buffer will be considered not bound and store
906                          * instructions will be no-ops.
907                          */
908                         uint32_t *desc = buffers->desc.list + bufidx*4;
909                         desc[0] = va;
910                         desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
911                         desc[2] = 0xffffffff;
912                         desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
913                                   S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
914                                   S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
915                                   S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
916                                   S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
917
918                         /* Set the resource. */
919                         pipe_resource_reference(&buffers->buffers[bufidx],
920                                                 buffer);
921                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
922                                               (struct r600_resource*)buffer,
923                                               buffers->shader_usage, buffers->priority);
924                         buffers->desc.enabled_mask |= 1llu << bufidx;
925                 } else {
926                         /* Clear the descriptor and unset the resource. */
927                         memset(buffers->desc.list + bufidx*4, 0,
928                                sizeof(uint32_t) * 4);
929                         pipe_resource_reference(&buffers->buffers[bufidx],
930                                                 NULL);
931                         buffers->desc.enabled_mask &= ~(1llu << bufidx);
932                 }
933         }
934         for (; i < old_num_targets; i++) {
935                 bufidx = SI_SO_BUF_OFFSET + i;
936                 /* Clear the descriptor and unset the resource. */
937                 memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
938                 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
939                 buffers->desc.enabled_mask &= ~(1llu << bufidx);
940         }
941
942         buffers->desc.list_dirty = true;
943 }
944
945 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
946                                         uint32_t *desc, uint64_t old_buf_va,
947                                         struct pipe_resource *new_buf)
948 {
949         /* Retrieve the buffer offset from the descriptor. */
950         uint64_t old_desc_va =
951                 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
952
953         assert(old_buf_va <= old_desc_va);
954         uint64_t offset_within_buffer = old_desc_va - old_buf_va;
955
956         /* Update the descriptor. */
957         uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
958
959         desc[0] = va;
960         desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
961                   S_008F04_BASE_ADDRESS_HI(va >> 32);
962 }
963
964 /* TEXTURE METADATA ENABLE/DISABLE */
965
966 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
967  * while the texture is bound, possibly by a different context. In that case,
968  * call this function to update compressed_colortex_masks.
969  */
970 void si_update_compressed_colortex_masks(struct si_context *sctx)
971 {
972         for (int i = 0; i < SI_NUM_SHADERS; ++i) {
973                 si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
974                 si_images_update_compressed_colortex_mask(&sctx->images[i]);
975         }
976 }
977
978 /* BUFFER DISCARD/INVALIDATION */
979
980 /* Reallocate a buffer a update all resource bindings where the buffer is
981  * bound.
982  *
983  * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
984  * idle by discarding its contents. Apps usually tell us when to do this using
985  * map_buffer flags, for example.
986  */
987 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
988 {
989         struct si_context *sctx = (struct si_context*)ctx;
990         struct r600_resource *rbuffer = r600_resource(buf);
991         unsigned i, shader, alignment = rbuffer->buf->alignment;
992         uint64_t old_va = rbuffer->gpu_address;
993         unsigned num_elems = sctx->vertex_elements ?
994                                        sctx->vertex_elements->count : 0;
995         struct si_sampler_view *view;
996
997         /* Reallocate the buffer in the same pipe_resource. */
998         r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
999                            alignment, TRUE);
1000
1001         /* We changed the buffer, now we need to bind it where the old one
1002          * was bound. This consists of 2 things:
1003          *   1) Updating the resource descriptor and dirtying it.
1004          *   2) Adding a relocation to the CS, so that it's usable.
1005          */
1006
1007         /* Vertex buffers. */
1008         for (i = 0; i < num_elems; i++) {
1009                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
1010
1011                 if (vb >= Elements(sctx->vertex_buffer))
1012                         continue;
1013                 if (!sctx->vertex_buffer[vb].buffer)
1014                         continue;
1015
1016                 if (sctx->vertex_buffer[vb].buffer == buf) {
1017                         sctx->vertex_buffers_dirty = true;
1018                         break;
1019                 }
1020         }
1021
1022         /* Read/Write buffers. */
1023         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1024                 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
1025                 uint64_t mask = buffers->desc.enabled_mask;
1026
1027                 while (mask) {
1028                         i = u_bit_scan64(&mask);
1029                         if (buffers->buffers[i] == buf) {
1030                                 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
1031                                                             old_va, buf);
1032                                 buffers->desc.list_dirty = true;
1033
1034                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1035                                                       rbuffer, buffers->shader_usage,
1036                                                       buffers->priority);
1037
1038                                 if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
1039                                         /* Update the streamout state. */
1040                                         if (sctx->b.streamout.begin_emitted) {
1041                                                 r600_emit_streamout_end(&sctx->b);
1042                                         }
1043                                         sctx->b.streamout.append_bitmask =
1044                                                 sctx->b.streamout.enabled_mask;
1045                                         r600_streamout_buffers_dirty(&sctx->b);
1046                                 }
1047                         }
1048                 }
1049         }
1050
1051         /* Constant buffers. */
1052         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1053                 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
1054                 uint64_t mask = buffers->desc.enabled_mask;
1055
1056                 while (mask) {
1057                         unsigned i = u_bit_scan64(&mask);
1058                         if (buffers->buffers[i] == buf) {
1059                                 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
1060                                                             old_va, buf);
1061                                 buffers->desc.list_dirty = true;
1062
1063                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1064                                                       rbuffer, buffers->shader_usage,
1065                                                       buffers->priority);
1066                         }
1067                 }
1068         }
1069
1070         /* Texture buffers - update virtual addresses in sampler view descriptors. */
1071         LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
1072                 if (view->base.texture == buf) {
1073                         si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
1074                 }
1075         }
1076         /* Texture buffers - update bindings. */
1077         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1078                 struct si_sampler_views *views = &sctx->samplers[shader].views;
1079                 uint64_t mask = views->desc.enabled_mask;
1080
1081                 while (mask) {
1082                         unsigned i = u_bit_scan64(&mask);
1083                         if (views->views[i]->texture == buf) {
1084                                 si_desc_reset_buffer_offset(ctx,
1085                                                             views->desc.list +
1086                                                             i * 16 + 4,
1087                                                             old_va, buf);
1088                                 views->desc.list_dirty = true;
1089
1090                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1091                                                       rbuffer, RADEON_USAGE_READ,
1092                                                       RADEON_PRIO_SAMPLER_BUFFER);
1093                         }
1094                 }
1095         }
1096
1097         /* Shader images */
1098         for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1099                 struct si_images_info *images = &sctx->images[shader];
1100                 unsigned mask = images->desc.enabled_mask;
1101
1102                 while (mask) {
1103                         unsigned i = u_bit_scan(&mask);
1104
1105                         if (images->views[i].resource == buf) {
1106                                 si_desc_reset_buffer_offset(
1107                                         ctx, images->desc.list + i * 8 + 4,
1108                                         old_va, buf);
1109                                 images->desc.list_dirty = true;
1110
1111                                 radeon_add_to_buffer_list(
1112                                         &sctx->b, &sctx->b.gfx, rbuffer,
1113                                         RADEON_USAGE_READWRITE,
1114                                         RADEON_PRIO_SAMPLER_BUFFER);
1115                         }
1116                 }
1117         }
1118 }
1119
1120 /* SHADER USER DATA */
1121
1122 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
1123                                           unsigned shader)
1124 {
1125         sctx->const_buffers[shader].desc.pointer_dirty = true;
1126         sctx->rw_buffers[shader].desc.pointer_dirty = true;
1127         sctx->samplers[shader].views.desc.pointer_dirty = true;
1128
1129         if (shader == PIPE_SHADER_VERTEX)
1130                 sctx->vertex_buffers.pointer_dirty = true;
1131
1132         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1133 }
1134
1135 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
1136 {
1137         int i;
1138
1139         for (i = 0; i < SI_NUM_SHADERS; i++) {
1140                 si_mark_shader_pointers_dirty(sctx, i);
1141         }
1142 }
1143
1144 /* Set a base register address for user data constants in the given shader.
1145  * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
1146  */
1147 static void si_set_user_data_base(struct si_context *sctx,
1148                                   unsigned shader, uint32_t new_base)
1149 {
1150         uint32_t *base = &sctx->shader_userdata.sh_base[shader];
1151
1152         if (*base != new_base) {
1153                 *base = new_base;
1154
1155                 if (new_base)
1156                         si_mark_shader_pointers_dirty(sctx, shader);
1157         }
1158 }
1159
1160 /* This must be called when these shaders are changed from non-NULL to NULL
1161  * and vice versa:
1162  * - geometry shader
1163  * - tessellation control shader
1164  * - tessellation evaluation shader
1165  */
1166 void si_shader_change_notify(struct si_context *sctx)
1167 {
1168         /* VS can be bound as VS, ES, or LS. */
1169         if (sctx->tes_shader.cso)
1170                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1171                                       R_00B530_SPI_SHADER_USER_DATA_LS_0);
1172         else if (sctx->gs_shader.cso)
1173                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1174                                       R_00B330_SPI_SHADER_USER_DATA_ES_0);
1175         else
1176                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1177                                       R_00B130_SPI_SHADER_USER_DATA_VS_0);
1178
1179         /* TES can be bound as ES, VS, or not bound. */
1180         if (sctx->tes_shader.cso) {
1181                 if (sctx->gs_shader.cso)
1182                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1183                                               R_00B330_SPI_SHADER_USER_DATA_ES_0);
1184                 else
1185                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1186                                               R_00B130_SPI_SHADER_USER_DATA_VS_0);
1187         } else {
1188                 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
1189         }
1190 }
1191
1192 static void si_emit_shader_pointer(struct si_context *sctx,
1193                                    struct si_descriptors *desc,
1194                                    unsigned sh_base, bool keep_dirty)
1195 {
1196         struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1197         uint64_t va;
1198
1199         if (!desc->pointer_dirty || !desc->buffer)
1200                 return;
1201
1202         va = desc->buffer->gpu_address +
1203              desc->buffer_offset;
1204
1205         radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
1206         radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
1207         radeon_emit(cs, va);
1208         radeon_emit(cs, va >> 32);
1209
1210         desc->pointer_dirty = keep_dirty;
1211 }
1212
1213 void si_emit_shader_userdata(struct si_context *sctx, struct r600_atom *atom)
1214 {
1215         unsigned i;
1216         uint32_t *sh_base = sctx->shader_userdata.sh_base;
1217
1218         if (sctx->gs_shader.cso) {
1219                 /* The VS copy shader needs these for clipping, streamout, and rings. */
1220                 unsigned vs_base = R_00B130_SPI_SHADER_USER_DATA_VS_0;
1221                 unsigned i = PIPE_SHADER_VERTEX;
1222
1223                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, vs_base, true);
1224                 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, vs_base, true);
1225
1226                 if (sctx->tes_shader.cso) {
1227                         /* The TESSEVAL shader needs this for the ESGS ring buffer. */
1228                         si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc,
1229                                                R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
1230                 }
1231         } else if (sctx->tes_shader.cso) {
1232                 /* The TESSEVAL shader needs this for streamout. */
1233                 si_emit_shader_pointer(sctx, &sctx->rw_buffers[PIPE_SHADER_VERTEX].desc,
1234                                        R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1235         }
1236
1237         for (i = 0; i < SI_NUM_SHADERS; i++) {
1238                 unsigned base = sh_base[i];
1239
1240                 if (!base)
1241                         continue;
1242
1243                 if (i != PIPE_SHADER_TESS_EVAL)
1244                         si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, base, false);
1245
1246                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
1247                 si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
1248                 si_emit_shader_pointer(sctx, &sctx->images[i].desc, base, false);
1249         }
1250         si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1251 }
1252
1253 /* INIT/DEINIT/UPLOAD */
1254
1255 void si_init_all_descriptors(struct si_context *sctx)
1256 {
1257         int i;
1258
1259         for (i = 0; i < SI_NUM_SHADERS; i++) {
1260                 si_init_buffer_resources(&sctx->const_buffers[i],
1261                                          SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1262                                          RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
1263                 si_init_buffer_resources(&sctx->rw_buffers[i],
1264                                          SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1265                                          RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT);
1266
1267                 si_init_descriptors(&sctx->samplers[i].views.desc,
1268                                     SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
1269                                     null_texture_descriptor);
1270
1271                 si_init_descriptors(&sctx->images[i].desc,
1272                                     SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
1273                                     null_image_descriptor);
1274         }
1275
1276         si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1277                             4, SI_NUM_VERTEX_BUFFERS, NULL);
1278
1279         /* Set pipe_context functions. */
1280         sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1281         sctx->b.b.set_shader_images = si_set_shader_images;
1282         sctx->b.b.set_constant_buffer = si_set_constant_buffer;
1283         sctx->b.b.set_sampler_views = si_set_sampler_views;
1284         sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1285         sctx->b.invalidate_buffer = si_invalidate_buffer;
1286
1287         /* Shader user data. */
1288         si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1289                      si_emit_shader_userdata);
1290
1291         /* Set default and immutable mappings. */
1292         si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1293         si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1294         si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1295         si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1296 }
1297
1298 bool si_upload_shader_descriptors(struct si_context *sctx)
1299 {
1300         int i;
1301
1302         for (i = 0; i < SI_NUM_SHADERS; i++) {
1303                 if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc) ||
1304                     !si_upload_descriptors(sctx, &sctx->rw_buffers[i].desc) ||
1305                     !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc) ||
1306                     !si_upload_descriptors(sctx, &sctx->images[i].desc))
1307                         return false;
1308         }
1309         return si_upload_vertex_buffer_descriptors(sctx);
1310 }
1311
1312 void si_release_all_descriptors(struct si_context *sctx)
1313 {
1314         int i;
1315
1316         for (i = 0; i < SI_NUM_SHADERS; i++) {
1317                 si_release_buffer_resources(&sctx->const_buffers[i]);
1318                 si_release_buffer_resources(&sctx->rw_buffers[i]);
1319                 si_release_sampler_views(&sctx->samplers[i].views);
1320                 si_release_image_views(&sctx->images[i]);
1321         }
1322         si_release_descriptors(&sctx->vertex_buffers);
1323 }
1324
1325 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1326 {
1327         int i;
1328
1329         for (i = 0; i < SI_NUM_SHADERS; i++) {
1330                 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1331                 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
1332                 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1333                 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
1334         }
1335         si_vertex_buffers_begin_new_cs(sctx);
1336         si_shader_userdata_begin_new_cs(sctx);
1337 }