OSDN Git Service

radeonsi: Add CE uploader.
[android-x86/external-mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Marek Olšák <marek.olsak@amd.com>
25  */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28  * 4 dwords) are stored in lists in memory which is accessed by shaders
29  * using scalar load instructions.
30  *
31  * This file is responsible for managing such lists. It keeps a copy of all
32  * descriptors in CPU memory and re-uploads a whole list if some slots have
33  * been changed.
34  *
35  * This code is also reponsible for updating shader pointers to those lists.
36  *
37  * Note that CP DMA can't be used for updating the lists, because a GPU hang
38  * could leave the list in a mid-IB state and the next IB would get wrong
39  * descriptors and the whole context would be unusable at that point.
40  * (Note: The register shadowing can't be used due to the same reason)
41  *
42  * Also, uploading descriptors to newly allocated memory doesn't require
43  * a KCACHE flush.
44  *
45  *
46  * Possible scenarios for one 16 dword image+sampler slot:
47  *
48  *       | Image        | w/ FMASK   | Buffer       | NULL
49  * [ 0: 3] Image[0:3]   | Image[0:3] | Null[0:3]    | Null[0:3]
50  * [ 4: 7] Image[4:7]   | Image[4:7] | Buffer[0:3]  | 0
51  * [ 8:11] Null[0:3]    | Fmask[0:3] | Null[0:3]    | Null[0:3]
52  * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
53  *
54  * FMASK implies MSAA, therefore no sampler state.
55  * Sampler states are never unbound except when FMASK is bound.
56  */
57
58 #include "radeon/r600_cs.h"
59 #include "si_pipe.h"
60 #include "si_shader.h"
61 #include "sid.h"
62
63 #include "util/u_memory.h"
64 #include "util/u_suballoc.h"
65 #include "util/u_upload_mgr.h"
66
67
68 /* NULL image and buffer descriptor for textures (alpha = 1) and images
69  * (alpha = 0).
70  *
71  * For images, all fields must be zero except for the swizzle, which
72  * supports arbitrary combinations of 0s and 1s. The texture type must be
73  * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
74  *
75  * For buffers, all fields must be zero. If they are not, the hw hangs.
76  *
77  * This is the only reason why the buffer descriptor must be in words [4:7].
78  */
79 static uint32_t null_texture_descriptor[8] = {
80         0,
81         0,
82         0,
83         S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
84         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
85         /* the rest must contain zeros, which is also used by the buffer
86          * descriptor */
87 };
88
89 static uint32_t null_image_descriptor[8] = {
90         0,
91         0,
92         0,
93         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
94         /* the rest must contain zeros, which is also used by the buffer
95          * descriptor */
96 };
97
98 static void si_init_descriptors(struct si_descriptors *desc,
99                                 unsigned shader_userdata_index,
100                                 unsigned element_dw_size,
101                                 unsigned num_elements,
102                                 const uint32_t *null_descriptor,
103                                 unsigned *ce_offset)
104 {
105         int i;
106
107         assert(num_elements <= sizeof(desc->enabled_mask)*8);
108
109         desc->list = CALLOC(num_elements, element_dw_size * 4);
110         desc->element_dw_size = element_dw_size;
111         desc->num_elements = num_elements;
112         desc->list_dirty = true; /* upload the list before the next draw */
113         desc->shader_userdata_offset = shader_userdata_index * 4;
114
115         if (ce_offset) {
116                 desc->ce_offset = *ce_offset;
117
118                 /* make sure that ce_offset stays 32 byte aligned */
119                 *ce_offset += align(element_dw_size * num_elements * 4, 32);
120         }
121
122         /* Initialize the array to NULL descriptors if the element size is 8. */
123         if (null_descriptor) {
124                 assert(element_dw_size % 8 == 0);
125                 for (i = 0; i < num_elements * element_dw_size / 8; i++)
126                         memcpy(desc->list + i * 8, null_descriptor,
127                                8 * 4);
128         }
129 }
130
131 static void si_release_descriptors(struct si_descriptors *desc)
132 {
133         pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
134         FREE(desc->list);
135 }
136
137 static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
138                          unsigned *out_offset, struct r600_resource **out_buf) {
139         uint64_t va;
140
141         u_suballocator_alloc(sctx->ce_suballocator, size, out_offset,
142                              (struct pipe_resource**)out_buf);
143         if (!out_buf)
144                         return false;
145
146         va = (*out_buf)->gpu_address + *out_offset;
147
148         radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
149         radeon_emit(sctx->ce_ib, ce_offset);
150         radeon_emit(sctx->ce_ib, size / 4);
151         radeon_emit(sctx->ce_ib, va);
152         radeon_emit(sctx->ce_ib, va >> 32);
153
154         sctx->ce_need_synchronization = true;
155         return true;
156 }
157
158
159 static bool si_upload_descriptors(struct si_context *sctx,
160                                   struct si_descriptors *desc)
161 {
162         unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
163         void *ptr;
164
165         if (!desc->list_dirty)
166                 return true;
167
168         u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
169                        &desc->buffer_offset,
170                        (struct pipe_resource**)&desc->buffer, &ptr);
171         if (!desc->buffer)
172                 return false; /* skip the draw call */
173
174         util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
175
176         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
177                               RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
178
179         desc->list_dirty = false;
180         desc->pointer_dirty = true;
181         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
182         return true;
183 }
184
185 /* SAMPLER VIEWS */
186
187 static void si_release_sampler_views(struct si_sampler_views *views)
188 {
189         int i;
190
191         for (i = 0; i < Elements(views->views); i++) {
192                 pipe_sampler_view_reference(&views->views[i], NULL);
193         }
194         si_release_descriptors(&views->desc);
195 }
196
197 static void si_sampler_view_add_buffer(struct si_context *sctx,
198                                        struct pipe_resource *resource)
199 {
200         struct r600_resource *rres = (struct r600_resource*)resource;
201
202         if (!resource)
203                 return;
204
205         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres,
206                                   RADEON_USAGE_READ,
207                                   r600_get_sampler_view_priority(rres));
208 }
209
210 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
211                                           struct si_sampler_views *views)
212 {
213         uint64_t mask = views->desc.enabled_mask;
214
215         /* Add buffers to the CS. */
216         while (mask) {
217                 int i = u_bit_scan64(&mask);
218
219                 si_sampler_view_add_buffer(sctx, views->views[i]->texture);
220         }
221
222         if (!views->desc.buffer)
223                 return;
224         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
225                               RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
226 }
227
228 static void si_set_sampler_view(struct si_context *sctx,
229                                 struct si_sampler_views *views,
230                                 unsigned slot, struct pipe_sampler_view *view)
231 {
232         struct si_sampler_view *rview = (struct si_sampler_view*)view;
233
234         if (view && view->texture && view->texture->target != PIPE_BUFFER &&
235             G_008F28_COMPRESSION_EN(rview->state[6]) &&
236             ((struct r600_texture*)view->texture)->dcc_offset == 0) {
237                 rview->state[6] &= C_008F28_COMPRESSION_EN &
238                                    C_008F28_ALPHA_IS_ON_MSB;
239         } else if (views->views[slot] == view)
240                 return;
241
242         if (view) {
243                 struct r600_texture *rtex = (struct r600_texture *)view->texture;
244
245                 si_sampler_view_add_buffer(sctx, view->texture);
246
247                 pipe_sampler_view_reference(&views->views[slot], view);
248                 memcpy(views->desc.list + slot * 16, rview->state, 8*4);
249
250                 if (view->texture && view->texture->target != PIPE_BUFFER &&
251                     rtex->fmask.size) {
252                         memcpy(views->desc.list + slot*16 + 8,
253                                rview->fmask_state, 8*4);
254                 } else {
255                         /* Disable FMASK and bind sampler state in [12:15]. */
256                         memcpy(views->desc.list + slot*16 + 8,
257                                null_texture_descriptor, 4*4);
258
259                         if (views->sampler_states[slot])
260                                 memcpy(views->desc.list + slot*16 + 12,
261                                        views->sampler_states[slot], 4*4);
262                 }
263
264                 views->desc.enabled_mask |= 1llu << slot;
265         } else {
266                 pipe_sampler_view_reference(&views->views[slot], NULL);
267                 memcpy(views->desc.list + slot*16, null_texture_descriptor, 8*4);
268                 /* Only clear the lower dwords of FMASK. */
269                 memcpy(views->desc.list + slot*16 + 8, null_texture_descriptor, 4*4);
270                 views->desc.enabled_mask &= ~(1llu << slot);
271         }
272
273         views->desc.list_dirty = true;
274 }
275
276 static bool is_compressed_colortex(struct r600_texture *rtex)
277 {
278         return rtex->cmask.size || rtex->fmask.size ||
279                (rtex->dcc_offset && rtex->dirty_level_mask);
280 }
281
282 static void si_set_sampler_views(struct pipe_context *ctx,
283                                  unsigned shader, unsigned start,
284                                  unsigned count,
285                                  struct pipe_sampler_view **views)
286 {
287         struct si_context *sctx = (struct si_context *)ctx;
288         struct si_textures_info *samplers = &sctx->samplers[shader];
289         int i;
290
291         if (!count || shader >= SI_NUM_SHADERS)
292                 return;
293
294         for (i = 0; i < count; i++) {
295                 unsigned slot = start + i;
296
297                 if (!views || !views[i]) {
298                         samplers->depth_texture_mask &= ~(1llu << slot);
299                         samplers->compressed_colortex_mask &= ~(1llu << slot);
300                         si_set_sampler_view(sctx, &samplers->views, slot, NULL);
301                         continue;
302                 }
303
304                 si_set_sampler_view(sctx, &samplers->views, slot, views[i]);
305
306                 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
307                         struct r600_texture *rtex =
308                                 (struct r600_texture*)views[i]->texture;
309
310                         if (rtex->is_depth && !rtex->is_flushing_texture) {
311                                 samplers->depth_texture_mask |= 1llu << slot;
312                         } else {
313                                 samplers->depth_texture_mask &= ~(1llu << slot);
314                         }
315                         if (is_compressed_colortex(rtex)) {
316                                 samplers->compressed_colortex_mask |= 1llu << slot;
317                         } else {
318                                 samplers->compressed_colortex_mask &= ~(1llu << slot);
319                         }
320                 } else {
321                         samplers->depth_texture_mask &= ~(1llu << slot);
322                         samplers->compressed_colortex_mask &= ~(1llu << slot);
323                 }
324         }
325 }
326
327 static void
328 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
329 {
330         uint64_t mask = samplers->views.desc.enabled_mask;
331
332         while (mask) {
333                 int i = u_bit_scan64(&mask);
334                 struct pipe_resource *res = samplers->views.views[i]->texture;
335
336                 if (res && res->target != PIPE_BUFFER) {
337                         struct r600_texture *rtex = (struct r600_texture *)res;
338
339                         if (is_compressed_colortex(rtex)) {
340                                 samplers->compressed_colortex_mask |= 1llu << i;
341                         } else {
342                                 samplers->compressed_colortex_mask &= ~(1llu << i);
343                         }
344                 }
345         }
346 }
347
348 /* IMAGE VIEWS */
349
350 static void
351 si_release_image_views(struct si_images_info *images)
352 {
353         unsigned i;
354
355         for (i = 0; i < SI_NUM_IMAGES; ++i) {
356                 struct pipe_image_view *view = &images->views[i];
357
358                 pipe_resource_reference(&view->resource, NULL);
359         }
360
361         si_release_descriptors(&images->desc);
362 }
363
364 static void
365 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
366 {
367         uint mask = images->desc.enabled_mask;
368
369         /* Add buffers to the CS. */
370         while (mask) {
371                 int i = u_bit_scan(&mask);
372                 struct pipe_image_view *view = &images->views[i];
373
374                 assert(view->resource);
375
376                 si_sampler_view_add_buffer(sctx, view->resource);
377         }
378
379         if (images->desc.buffer) {
380                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
381                                           images->desc.buffer,
382                                           RADEON_USAGE_READ,
383                                           RADEON_PRIO_DESCRIPTORS);
384         }
385 }
386
387 static void
388 si_disable_shader_image(struct si_images_info *images, unsigned slot)
389 {
390         if (images->desc.enabled_mask & (1llu << slot)) {
391                 pipe_resource_reference(&images->views[slot].resource, NULL);
392                 images->compressed_colortex_mask &= ~(1 << slot);
393
394                 memcpy(images->desc.list + slot*8, null_image_descriptor, 8*4);
395                 images->desc.enabled_mask &= ~(1llu << slot);
396                 images->desc.list_dirty = true;
397         }
398 }
399
400 static void
401 si_set_shader_images(struct pipe_context *pipe, unsigned shader,
402                      unsigned start_slot, unsigned count,
403                      struct pipe_image_view *views)
404 {
405         struct si_context *ctx = (struct si_context *)pipe;
406         struct si_screen *screen = ctx->screen;
407         struct si_images_info *images = &ctx->images[shader];
408         unsigned i, slot;
409
410         assert(shader < SI_NUM_SHADERS);
411
412         if (!count)
413                 return;
414
415         assert(start_slot + count <= SI_NUM_IMAGES);
416
417         for (i = 0, slot = start_slot; i < count; ++i, ++slot) {
418                 struct r600_resource *res;
419
420                 if (!views || !views[i].resource) {
421                         si_disable_shader_image(images, slot);
422                         continue;
423                 }
424
425                 res = (struct r600_resource *)views[i].resource;
426                 util_copy_image_view(&images->views[slot], &views[i]);
427
428                 si_sampler_view_add_buffer(ctx, &res->b.b);
429
430                 if (res->b.b.target == PIPE_BUFFER) {
431                         si_make_buffer_descriptor(screen, res,
432                                                   views[i].format,
433                                                   views[i].u.buf.first_element,
434                                                   views[i].u.buf.last_element,
435                                                   images->desc.list + slot * 8);
436                         images->compressed_colortex_mask &= ~(1 << slot);
437                 } else {
438                         static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
439                         struct r600_texture *tex = (struct r600_texture *)res;
440                         unsigned level;
441                         unsigned width, height, depth;
442
443                         assert(!tex->is_depth);
444                         assert(tex->fmask.size == 0);
445
446                         if (tex->dcc_offset &&
447                             views[i].access & PIPE_IMAGE_ACCESS_WRITE)
448                                 r600_texture_disable_dcc(&screen->b, tex);
449
450                         if (is_compressed_colortex(tex)) {
451                                 images->compressed_colortex_mask |= 1 << slot;
452                         } else {
453                                 images->compressed_colortex_mask &= ~(1 << slot);
454                         }
455
456                         /* Always force the base level to the selected level.
457                          *
458                          * This is required for 3D textures, where otherwise
459                          * selecting a single slice for non-layered bindings
460                          * fails. It doesn't hurt the other targets.
461                          */
462                         level = views[i].u.tex.level;
463                         width = u_minify(res->b.b.width0, level);
464                         height = u_minify(res->b.b.height0, level);
465                         depth = u_minify(res->b.b.depth0, level);
466
467                         si_make_texture_descriptor(screen, tex, false, res->b.b.target,
468                                                    views[i].format, swizzle,
469                                                    level, 0, 0,
470                                                    views[i].u.tex.first_layer, views[i].u.tex.last_layer,
471                                                    width, height, depth,
472                                                    images->desc.list + slot * 8,
473                                                    NULL);
474                 }
475
476                 images->desc.enabled_mask |= 1llu << slot;
477                 images->desc.list_dirty = true;
478         }
479 }
480
481 static void
482 si_images_update_compressed_colortex_mask(struct si_images_info *images)
483 {
484         uint64_t mask = images->desc.enabled_mask;
485
486         while (mask) {
487                 int i = u_bit_scan64(&mask);
488                 struct pipe_resource *res = images->views[i].resource;
489
490                 if (res && res->target != PIPE_BUFFER) {
491                         struct r600_texture *rtex = (struct r600_texture *)res;
492
493                         if (is_compressed_colortex(rtex)) {
494                                 images->compressed_colortex_mask |= 1 << i;
495                         } else {
496                                 images->compressed_colortex_mask &= ~(1 << i);
497                         }
498                 }
499         }
500 }
501
502 /* SAMPLER STATES */
503
504 static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
505                                    unsigned start, unsigned count, void **states)
506 {
507         struct si_context *sctx = (struct si_context *)ctx;
508         struct si_textures_info *samplers = &sctx->samplers[shader];
509         struct si_descriptors *desc = &samplers->views.desc;
510         struct si_sampler_state **sstates = (struct si_sampler_state**)states;
511         int i;
512
513         if (!count || shader >= SI_NUM_SHADERS)
514                 return;
515
516         for (i = 0; i < count; i++) {
517                 unsigned slot = start + i;
518
519                 if (!sstates[i] ||
520                     sstates[i] == samplers->views.sampler_states[slot])
521                         continue;
522
523                 samplers->views.sampler_states[slot] = sstates[i];
524
525                 /* If FMASK is bound, don't overwrite it.
526                  * The sampler state will be set after FMASK is unbound.
527                  */
528                 if (samplers->views.views[i] &&
529                     samplers->views.views[i]->texture &&
530                     samplers->views.views[i]->texture->target != PIPE_BUFFER &&
531                     ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
532                         continue;
533
534                 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
535                 desc->list_dirty = true;
536         }
537 }
538
539 /* BUFFER RESOURCES */
540
541 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
542                                      unsigned num_buffers,
543                                      unsigned shader_userdata_index,
544                                      enum radeon_bo_usage shader_usage,
545                                      enum radeon_bo_priority priority,
546                                      unsigned *ce_offset)
547 {
548         buffers->shader_usage = shader_usage;
549         buffers->priority = priority;
550         buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
551
552         si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
553                             num_buffers, NULL, ce_offset);
554 }
555
556 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
557 {
558         int i;
559
560         for (i = 0; i < buffers->desc.num_elements; i++) {
561                 pipe_resource_reference(&buffers->buffers[i], NULL);
562         }
563
564         FREE(buffers->buffers);
565         si_release_descriptors(&buffers->desc);
566 }
567
568 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
569                                              struct si_buffer_resources *buffers)
570 {
571         uint64_t mask = buffers->desc.enabled_mask;
572
573         /* Add buffers to the CS. */
574         while (mask) {
575                 int i = u_bit_scan64(&mask);
576
577                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
578                                       (struct r600_resource*)buffers->buffers[i],
579                                       buffers->shader_usage, buffers->priority);
580         }
581
582         if (!buffers->desc.buffer)
583                 return;
584         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
585                               buffers->desc.buffer, RADEON_USAGE_READWRITE,
586                               RADEON_PRIO_DESCRIPTORS);
587 }
588
589 /* VERTEX BUFFERS */
590
591 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
592 {
593         struct si_descriptors *desc = &sctx->vertex_buffers;
594         int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
595         int i;
596
597         for (i = 0; i < count; i++) {
598                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
599
600                 if (vb >= Elements(sctx->vertex_buffer))
601                         continue;
602                 if (!sctx->vertex_buffer[vb].buffer)
603                         continue;
604
605                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
606                                       (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
607                                       RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
608         }
609
610         if (!desc->buffer)
611                 return;
612         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
613                               desc->buffer, RADEON_USAGE_READ,
614                               RADEON_PRIO_DESCRIPTORS);
615 }
616
617 static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
618 {
619         struct si_descriptors *desc = &sctx->vertex_buffers;
620         bool bound[SI_NUM_VERTEX_BUFFERS] = {};
621         unsigned i, count = sctx->vertex_elements->count;
622         uint64_t va;
623         uint32_t *ptr;
624
625         if (!sctx->vertex_buffers_dirty)
626                 return true;
627         if (!count || !sctx->vertex_elements)
628                 return true;
629
630         /* Vertex buffer descriptors are the only ones which are uploaded
631          * directly through a staging buffer and don't go through
632          * the fine-grained upload path.
633          */
634         u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
635                        (struct pipe_resource**)&desc->buffer, (void**)&ptr);
636         if (!desc->buffer)
637                 return false;
638
639         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
640                               desc->buffer, RADEON_USAGE_READ,
641                               RADEON_PRIO_DESCRIPTORS);
642
643         assert(count <= SI_NUM_VERTEX_BUFFERS);
644
645         for (i = 0; i < count; i++) {
646                 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
647                 struct pipe_vertex_buffer *vb;
648                 struct r600_resource *rbuffer;
649                 unsigned offset;
650                 uint32_t *desc = &ptr[i*4];
651
652                 if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
653                         memset(desc, 0, 16);
654                         continue;
655                 }
656
657                 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
658                 rbuffer = (struct r600_resource*)vb->buffer;
659                 if (!rbuffer) {
660                         memset(desc, 0, 16);
661                         continue;
662                 }
663
664                 offset = vb->buffer_offset + ve->src_offset;
665                 va = rbuffer->gpu_address + offset;
666
667                 /* Fill in T# buffer resource description */
668                 desc[0] = va;
669                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
670                           S_008F04_STRIDE(vb->stride);
671
672                 if (sctx->b.chip_class <= CIK && vb->stride)
673                         /* Round up by rounding down and adding 1 */
674                         desc[2] = (vb->buffer->width0 - offset -
675                                    sctx->vertex_elements->format_size[i]) /
676                                   vb->stride + 1;
677                 else
678                         desc[2] = vb->buffer->width0 - offset;
679
680                 desc[3] = sctx->vertex_elements->rsrc_word3[i];
681
682                 if (!bound[ve->vertex_buffer_index]) {
683                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
684                                               (struct r600_resource*)vb->buffer,
685                                               RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
686                         bound[ve->vertex_buffer_index] = true;
687                 }
688         }
689
690         /* Don't flush the const cache. It would have a very negative effect
691          * on performance (confirmed by testing). New descriptors are always
692          * uploaded to a fresh new buffer, so I don't think flushing the const
693          * cache is needed. */
694         desc->pointer_dirty = true;
695         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
696         sctx->vertex_buffers_dirty = false;
697         return true;
698 }
699
700
701 /* CONSTANT BUFFERS */
702
703 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
704                             const uint8_t *ptr, unsigned size, uint32_t *const_offset)
705 {
706         void *tmp;
707
708         u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
709                        (struct pipe_resource**)rbuffer, &tmp);
710         if (rbuffer)
711                 util_memcpy_cpu_to_le32(tmp, ptr, size);
712 }
713
714 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
715                                    struct pipe_constant_buffer *input)
716 {
717         struct si_context *sctx = (struct si_context *)ctx;
718         struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
719
720         if (shader >= SI_NUM_SHADERS)
721                 return;
722
723         assert(slot < buffers->desc.num_elements);
724         pipe_resource_reference(&buffers->buffers[slot], NULL);
725
726         /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
727          * with a NULL buffer). We need to use a dummy buffer instead. */
728         if (sctx->b.chip_class == CIK &&
729             (!input || (!input->buffer && !input->user_buffer)))
730                 input = &sctx->null_const_buf;
731
732         if (input && (input->buffer || input->user_buffer)) {
733                 struct pipe_resource *buffer = NULL;
734                 uint64_t va;
735
736                 /* Upload the user buffer if needed. */
737                 if (input->user_buffer) {
738                         unsigned buffer_offset;
739
740                         si_upload_const_buffer(sctx,
741                                                (struct r600_resource**)&buffer, input->user_buffer,
742                                                input->buffer_size, &buffer_offset);
743                         if (!buffer) {
744                                 /* Just unbind on failure. */
745                                 si_set_constant_buffer(ctx, shader, slot, NULL);
746                                 return;
747                         }
748                         va = r600_resource(buffer)->gpu_address + buffer_offset;
749                 } else {
750                         pipe_resource_reference(&buffer, input->buffer);
751                         va = r600_resource(buffer)->gpu_address + input->buffer_offset;
752                 }
753
754                 /* Set the descriptor. */
755                 uint32_t *desc = buffers->desc.list + slot*4;
756                 desc[0] = va;
757                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
758                           S_008F04_STRIDE(0);
759                 desc[2] = input->buffer_size;
760                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
761                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
762                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
763                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
764                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
765                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
766
767                 buffers->buffers[slot] = buffer;
768                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
769                                       (struct r600_resource*)buffer,
770                                       buffers->shader_usage, buffers->priority);
771                 buffers->desc.enabled_mask |= 1llu << slot;
772         } else {
773                 /* Clear the descriptor. */
774                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
775                 buffers->desc.enabled_mask &= ~(1llu << slot);
776         }
777
778         buffers->desc.list_dirty = true;
779 }
780
781 /* SHADER BUFFERS */
782
783 static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
784                                   unsigned start_slot, unsigned count,
785                                   struct pipe_shader_buffer *sbuffers)
786 {
787         struct si_context *sctx = (struct si_context *)ctx;
788         struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
789         unsigned i;
790
791         assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
792
793         for (i = 0; i < count; ++i) {
794                 struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
795                 struct r600_resource *buf;
796                 unsigned slot = start_slot + i;
797                 uint32_t *desc = buffers->desc.list + slot * 4;
798                 uint64_t va;
799
800                 if (!sbuffer || !sbuffer->buffer) {
801                         pipe_resource_reference(&buffers->buffers[slot], NULL);
802                         memset(desc, 0, sizeof(uint32_t) * 4);
803                         buffers->desc.enabled_mask &= ~(1llu << slot);
804                         continue;
805                 }
806
807                 buf = (struct r600_resource *)sbuffer->buffer;
808                 va = buf->gpu_address + sbuffer->buffer_offset;
809
810                 desc[0] = va;
811                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
812                           S_008F04_STRIDE(0);
813                 desc[2] = sbuffer->buffer_size;
814                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
815                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
816                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
817                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
818                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
819                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
820
821                 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
822                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buf,
823                                       buffers->shader_usage, buffers->priority);
824                 buffers->desc.enabled_mask |= 1llu << slot;
825         }
826
827         buffers->desc.list_dirty = true;
828 }
829
830 /* RING BUFFERS */
831
832 void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
833                         struct pipe_resource *buffer,
834                         unsigned stride, unsigned num_records,
835                         bool add_tid, bool swizzle,
836                         unsigned element_size, unsigned index_stride, uint64_t offset)
837 {
838         struct si_context *sctx = (struct si_context *)ctx;
839         struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
840
841         if (shader >= SI_NUM_SHADERS)
842                 return;
843
844         /* The stride field in the resource descriptor has 14 bits */
845         assert(stride < (1 << 14));
846
847         assert(slot < buffers->desc.num_elements);
848         pipe_resource_reference(&buffers->buffers[slot], NULL);
849
850         if (buffer) {
851                 uint64_t va;
852
853                 va = r600_resource(buffer)->gpu_address + offset;
854
855                 switch (element_size) {
856                 default:
857                         assert(!"Unsupported ring buffer element size");
858                 case 0:
859                 case 2:
860                         element_size = 0;
861                         break;
862                 case 4:
863                         element_size = 1;
864                         break;
865                 case 8:
866                         element_size = 2;
867                         break;
868                 case 16:
869                         element_size = 3;
870                         break;
871                 }
872
873                 switch (index_stride) {
874                 default:
875                         assert(!"Unsupported ring buffer index stride");
876                 case 0:
877                 case 8:
878                         index_stride = 0;
879                         break;
880                 case 16:
881                         index_stride = 1;
882                         break;
883                 case 32:
884                         index_stride = 2;
885                         break;
886                 case 64:
887                         index_stride = 3;
888                         break;
889                 }
890
891                 if (sctx->b.chip_class >= VI && stride)
892                         num_records *= stride;
893
894                 /* Set the descriptor. */
895                 uint32_t *desc = buffers->desc.list + slot*4;
896                 desc[0] = va;
897                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
898                           S_008F04_STRIDE(stride) |
899                           S_008F04_SWIZZLE_ENABLE(swizzle);
900                 desc[2] = num_records;
901                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
902                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
903                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
904                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
905                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
906                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
907                           S_008F0C_ELEMENT_SIZE(element_size) |
908                           S_008F0C_INDEX_STRIDE(index_stride) |
909                           S_008F0C_ADD_TID_ENABLE(add_tid);
910
911                 pipe_resource_reference(&buffers->buffers[slot], buffer);
912                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
913                                       (struct r600_resource*)buffer,
914                                       buffers->shader_usage, buffers->priority);
915                 buffers->desc.enabled_mask |= 1llu << slot;
916         } else {
917                 /* Clear the descriptor. */
918                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
919                 buffers->desc.enabled_mask &= ~(1llu << slot);
920         }
921
922         buffers->desc.list_dirty = true;
923 }
924
925 /* STREAMOUT BUFFERS */
926
927 static void si_set_streamout_targets(struct pipe_context *ctx,
928                                      unsigned num_targets,
929                                      struct pipe_stream_output_target **targets,
930                                      const unsigned *offsets)
931 {
932         struct si_context *sctx = (struct si_context *)ctx;
933         struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
934         unsigned old_num_targets = sctx->b.streamout.num_targets;
935         unsigned i, bufidx;
936
937         /* We are going to unbind the buffers. Mark which caches need to be flushed. */
938         if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
939                 /* Since streamout uses vector writes which go through TC L2
940                  * and most other clients can use TC L2 as well, we don't need
941                  * to flush it.
942                  *
943                  * The only case which requires flushing it is VGT DMA index
944                  * fetching, which is a rare case. Thus, flag the TC L2
945                  * dirtiness in the resource and handle it when index fetching
946                  * is used.
947                  */
948                 for (i = 0; i < sctx->b.streamout.num_targets; i++)
949                         if (sctx->b.streamout.targets[i])
950                                 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
951
952                 /* Invalidate the scalar cache in case a streamout buffer is
953                  * going to be used as a constant buffer.
954                  *
955                  * Invalidate TC L1, because streamout bypasses it (done by
956                  * setting GLC=1 in the store instruction), but it can contain
957                  * outdated data of streamout buffers.
958                  *
959                  * VS_PARTIAL_FLUSH is required if the buffers are going to be
960                  * used as an input immediately.
961                  */
962                 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
963                                  SI_CONTEXT_INV_VMEM_L1 |
964                                  SI_CONTEXT_VS_PARTIAL_FLUSH;
965         }
966
967         /* All readers of the streamout targets need to be finished before we can
968          * start writing to the targets.
969          */
970         if (num_targets)
971                 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
972
973         /* Streamout buffers must be bound in 2 places:
974          * 1) in VGT by setting the VGT_STRMOUT registers
975          * 2) as shader resources
976          */
977
978         /* Set the VGT regs. */
979         r600_set_streamout_targets(ctx, num_targets, targets, offsets);
980
981         /* Set the shader resources.*/
982         for (i = 0; i < num_targets; i++) {
983                 bufidx = SI_SO_BUF_OFFSET + i;
984
985                 if (targets[i]) {
986                         struct pipe_resource *buffer = targets[i]->buffer;
987                         uint64_t va = r600_resource(buffer)->gpu_address;
988
989                         /* Set the descriptor.
990                          *
991                          * On VI, the format must be non-INVALID, otherwise
992                          * the buffer will be considered not bound and store
993                          * instructions will be no-ops.
994                          */
995                         uint32_t *desc = buffers->desc.list + bufidx*4;
996                         desc[0] = va;
997                         desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
998                         desc[2] = 0xffffffff;
999                         desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1000                                   S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1001                                   S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1002                                   S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1003                                   S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1004
1005                         /* Set the resource. */
1006                         pipe_resource_reference(&buffers->buffers[bufidx],
1007                                                 buffer);
1008                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1009                                               (struct r600_resource*)buffer,
1010                                               buffers->shader_usage, buffers->priority);
1011                         buffers->desc.enabled_mask |= 1llu << bufidx;
1012                 } else {
1013                         /* Clear the descriptor and unset the resource. */
1014                         memset(buffers->desc.list + bufidx*4, 0,
1015                                sizeof(uint32_t) * 4);
1016                         pipe_resource_reference(&buffers->buffers[bufidx],
1017                                                 NULL);
1018                         buffers->desc.enabled_mask &= ~(1llu << bufidx);
1019                 }
1020         }
1021         for (; i < old_num_targets; i++) {
1022                 bufidx = SI_SO_BUF_OFFSET + i;
1023                 /* Clear the descriptor and unset the resource. */
1024                 memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
1025                 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
1026                 buffers->desc.enabled_mask &= ~(1llu << bufidx);
1027         }
1028
1029         buffers->desc.list_dirty = true;
1030 }
1031
1032 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
1033                                         uint32_t *desc, uint64_t old_buf_va,
1034                                         struct pipe_resource *new_buf)
1035 {
1036         /* Retrieve the buffer offset from the descriptor. */
1037         uint64_t old_desc_va =
1038                 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
1039
1040         assert(old_buf_va <= old_desc_va);
1041         uint64_t offset_within_buffer = old_desc_va - old_buf_va;
1042
1043         /* Update the descriptor. */
1044         uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
1045
1046         desc[0] = va;
1047         desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
1048                   S_008F04_BASE_ADDRESS_HI(va >> 32);
1049 }
1050
1051 /* TEXTURE METADATA ENABLE/DISABLE */
1052
1053 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1054  * while the texture is bound, possibly by a different context. In that case,
1055  * call this function to update compressed_colortex_masks.
1056  */
1057 void si_update_compressed_colortex_masks(struct si_context *sctx)
1058 {
1059         for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1060                 si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
1061                 si_images_update_compressed_colortex_mask(&sctx->images[i]);
1062         }
1063 }
1064
1065 /* BUFFER DISCARD/INVALIDATION */
1066
1067 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1068 static void si_reset_buffer_resources(struct si_context *sctx,
1069                                       struct si_buffer_resources *buffers,
1070                                       struct pipe_resource *buf,
1071                                       uint64_t old_va)
1072 {
1073         uint64_t mask = buffers->desc.enabled_mask;
1074
1075         while (mask) {
1076                 unsigned i = u_bit_scan64(&mask);
1077                 if (buffers->buffers[i] == buf) {
1078                         si_desc_reset_buffer_offset(&sctx->b.b,
1079                                                     buffers->desc.list + i*4,
1080                                                     old_va, buf);
1081                         buffers->desc.list_dirty = true;
1082
1083                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1084                                                 (struct r600_resource *)buf,
1085                                                 buffers->shader_usage,
1086                                                 buffers->priority);
1087                 }
1088         }
1089 }
1090
1091 /* Reallocate a buffer a update all resource bindings where the buffer is
1092  * bound.
1093  *
1094  * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1095  * idle by discarding its contents. Apps usually tell us when to do this using
1096  * map_buffer flags, for example.
1097  */
1098 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
1099 {
1100         struct si_context *sctx = (struct si_context*)ctx;
1101         struct r600_resource *rbuffer = r600_resource(buf);
1102         unsigned i, shader, alignment = rbuffer->buf->alignment;
1103         uint64_t old_va = rbuffer->gpu_address;
1104         unsigned num_elems = sctx->vertex_elements ?
1105                                        sctx->vertex_elements->count : 0;
1106         struct si_sampler_view *view;
1107
1108         /* Reallocate the buffer in the same pipe_resource. */
1109         r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
1110                            alignment, TRUE);
1111
1112         /* We changed the buffer, now we need to bind it where the old one
1113          * was bound. This consists of 2 things:
1114          *   1) Updating the resource descriptor and dirtying it.
1115          *   2) Adding a relocation to the CS, so that it's usable.
1116          */
1117
1118         /* Vertex buffers. */
1119         for (i = 0; i < num_elems; i++) {
1120                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
1121
1122                 if (vb >= Elements(sctx->vertex_buffer))
1123                         continue;
1124                 if (!sctx->vertex_buffer[vb].buffer)
1125                         continue;
1126
1127                 if (sctx->vertex_buffer[vb].buffer == buf) {
1128                         sctx->vertex_buffers_dirty = true;
1129                         break;
1130                 }
1131         }
1132
1133         /* Read/Write buffers. */
1134         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1135                 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
1136                 uint64_t mask = buffers->desc.enabled_mask;
1137
1138                 while (mask) {
1139                         i = u_bit_scan64(&mask);
1140                         if (buffers->buffers[i] == buf) {
1141                                 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
1142                                                             old_va, buf);
1143                                 buffers->desc.list_dirty = true;
1144
1145                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1146                                                       rbuffer, buffers->shader_usage,
1147                                                       buffers->priority);
1148
1149                                 if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
1150                                         /* Update the streamout state. */
1151                                         if (sctx->b.streamout.begin_emitted) {
1152                                                 r600_emit_streamout_end(&sctx->b);
1153                                         }
1154                                         sctx->b.streamout.append_bitmask =
1155                                                 sctx->b.streamout.enabled_mask;
1156                                         r600_streamout_buffers_dirty(&sctx->b);
1157                                 }
1158                         }
1159                 }
1160         }
1161
1162         /* Constant and shader buffers. */
1163         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1164                 si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
1165                                           buf, old_va);
1166                 si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
1167                                           buf, old_va);
1168         }
1169
1170         /* Texture buffers - update virtual addresses in sampler view descriptors. */
1171         LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
1172                 if (view->base.texture == buf) {
1173                         si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
1174                 }
1175         }
1176         /* Texture buffers - update bindings. */
1177         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1178                 struct si_sampler_views *views = &sctx->samplers[shader].views;
1179                 uint64_t mask = views->desc.enabled_mask;
1180
1181                 while (mask) {
1182                         unsigned i = u_bit_scan64(&mask);
1183                         if (views->views[i]->texture == buf) {
1184                                 si_desc_reset_buffer_offset(ctx,
1185                                                             views->desc.list +
1186                                                             i * 16 + 4,
1187                                                             old_va, buf);
1188                                 views->desc.list_dirty = true;
1189
1190                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1191                                                       rbuffer, RADEON_USAGE_READ,
1192                                                       RADEON_PRIO_SAMPLER_BUFFER);
1193                         }
1194                 }
1195         }
1196
1197         /* Shader images */
1198         for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1199                 struct si_images_info *images = &sctx->images[shader];
1200                 unsigned mask = images->desc.enabled_mask;
1201
1202                 while (mask) {
1203                         unsigned i = u_bit_scan(&mask);
1204
1205                         if (images->views[i].resource == buf) {
1206                                 si_desc_reset_buffer_offset(
1207                                         ctx, images->desc.list + i * 8 + 4,
1208                                         old_va, buf);
1209                                 images->desc.list_dirty = true;
1210
1211                                 radeon_add_to_buffer_list(
1212                                         &sctx->b, &sctx->b.gfx, rbuffer,
1213                                         RADEON_USAGE_READWRITE,
1214                                         RADEON_PRIO_SAMPLER_BUFFER);
1215                         }
1216                 }
1217         }
1218 }
1219
1220 /* SHADER USER DATA */
1221
1222 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
1223                                           unsigned shader)
1224 {
1225         sctx->const_buffers[shader].desc.pointer_dirty = true;
1226         sctx->rw_buffers[shader].desc.pointer_dirty = true;
1227         sctx->shader_buffers[shader].desc.pointer_dirty = true;
1228         sctx->samplers[shader].views.desc.pointer_dirty = true;
1229         sctx->images[shader].desc.pointer_dirty = true;
1230
1231         if (shader == PIPE_SHADER_VERTEX)
1232                 sctx->vertex_buffers.pointer_dirty = true;
1233
1234         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1235 }
1236
1237 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
1238 {
1239         int i;
1240
1241         for (i = 0; i < SI_NUM_SHADERS; i++) {
1242                 si_mark_shader_pointers_dirty(sctx, i);
1243         }
1244 }
1245
1246 /* Set a base register address for user data constants in the given shader.
1247  * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
1248  */
1249 static void si_set_user_data_base(struct si_context *sctx,
1250                                   unsigned shader, uint32_t new_base)
1251 {
1252         uint32_t *base = &sctx->shader_userdata.sh_base[shader];
1253
1254         if (*base != new_base) {
1255                 *base = new_base;
1256
1257                 if (new_base)
1258                         si_mark_shader_pointers_dirty(sctx, shader);
1259         }
1260 }
1261
1262 /* This must be called when these shaders are changed from non-NULL to NULL
1263  * and vice versa:
1264  * - geometry shader
1265  * - tessellation control shader
1266  * - tessellation evaluation shader
1267  */
1268 void si_shader_change_notify(struct si_context *sctx)
1269 {
1270         /* VS can be bound as VS, ES, or LS. */
1271         if (sctx->tes_shader.cso)
1272                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1273                                       R_00B530_SPI_SHADER_USER_DATA_LS_0);
1274         else if (sctx->gs_shader.cso)
1275                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1276                                       R_00B330_SPI_SHADER_USER_DATA_ES_0);
1277         else
1278                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1279                                       R_00B130_SPI_SHADER_USER_DATA_VS_0);
1280
1281         /* TES can be bound as ES, VS, or not bound. */
1282         if (sctx->tes_shader.cso) {
1283                 if (sctx->gs_shader.cso)
1284                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1285                                               R_00B330_SPI_SHADER_USER_DATA_ES_0);
1286                 else
1287                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1288                                               R_00B130_SPI_SHADER_USER_DATA_VS_0);
1289         } else {
1290                 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
1291         }
1292 }
1293
1294 static void si_emit_shader_pointer(struct si_context *sctx,
1295                                    struct si_descriptors *desc,
1296                                    unsigned sh_base, bool keep_dirty)
1297 {
1298         struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1299         uint64_t va;
1300
1301         if (!desc->pointer_dirty || !desc->buffer)
1302                 return;
1303
1304         va = desc->buffer->gpu_address +
1305              desc->buffer_offset;
1306
1307         radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
1308         radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
1309         radeon_emit(cs, va);
1310         radeon_emit(cs, va >> 32);
1311
1312         desc->pointer_dirty = keep_dirty;
1313 }
1314
1315 void si_emit_shader_userdata(struct si_context *sctx, struct r600_atom *atom)
1316 {
1317         unsigned i;
1318         uint32_t *sh_base = sctx->shader_userdata.sh_base;
1319
1320         if (sctx->gs_shader.cso) {
1321                 /* The VS copy shader needs these for clipping, streamout, and rings. */
1322                 unsigned vs_base = R_00B130_SPI_SHADER_USER_DATA_VS_0;
1323                 unsigned i = PIPE_SHADER_VERTEX;
1324
1325                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, vs_base, true);
1326                 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, vs_base, true);
1327
1328                 if (sctx->tes_shader.cso) {
1329                         /* The TESSEVAL shader needs this for the ESGS ring buffer. */
1330                         si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc,
1331                                                R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
1332                 }
1333         } else if (sctx->tes_shader.cso) {
1334                 /* The TESSEVAL shader needs this for streamout. */
1335                 si_emit_shader_pointer(sctx, &sctx->rw_buffers[PIPE_SHADER_VERTEX].desc,
1336                                        R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1337         }
1338
1339         for (i = 0; i < SI_NUM_SHADERS; i++) {
1340                 unsigned base = sh_base[i];
1341
1342                 if (!base)
1343                         continue;
1344
1345                 if (i != PIPE_SHADER_TESS_EVAL)
1346                         si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, base, false);
1347
1348                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
1349                 si_emit_shader_pointer(sctx, &sctx->shader_buffers[i].desc, base, false);
1350                 si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
1351                 si_emit_shader_pointer(sctx, &sctx->images[i].desc, base, false);
1352         }
1353         si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1354 }
1355
1356 /* INIT/DEINIT/UPLOAD */
1357
1358 void si_init_all_descriptors(struct si_context *sctx)
1359 {
1360         int i;
1361         unsigned ce_offset = 0;
1362
1363         for (i = 0; i < SI_NUM_SHADERS; i++) {
1364                 si_init_buffer_resources(&sctx->const_buffers[i],
1365                                          SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1366                                          RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
1367                                          &ce_offset);
1368                 si_init_buffer_resources(&sctx->rw_buffers[i],
1369                                          SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1370                                          RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT,
1371                                          &ce_offset);
1372                 si_init_buffer_resources(&sctx->shader_buffers[i],
1373                                          SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
1374                                          RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
1375                                          &ce_offset);
1376
1377                 si_init_descriptors(&sctx->samplers[i].views.desc,
1378                                     SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
1379                                     null_texture_descriptor, &ce_offset);
1380
1381                 si_init_descriptors(&sctx->images[i].desc,
1382                                     SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
1383                                     null_image_descriptor, &ce_offset);
1384         }
1385
1386         si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1387                             4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
1388
1389         assert(ce_offset <= 32768);
1390
1391         /* Set pipe_context functions. */
1392         sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1393         sctx->b.b.set_shader_images = si_set_shader_images;
1394         sctx->b.b.set_constant_buffer = si_set_constant_buffer;
1395         sctx->b.b.set_shader_buffers = si_set_shader_buffers;
1396         sctx->b.b.set_sampler_views = si_set_sampler_views;
1397         sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1398         sctx->b.invalidate_buffer = si_invalidate_buffer;
1399
1400         /* Shader user data. */
1401         si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1402                      si_emit_shader_userdata);
1403
1404         /* Set default and immutable mappings. */
1405         si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1406         si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1407         si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1408         si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1409 }
1410
1411 bool si_upload_shader_descriptors(struct si_context *sctx)
1412 {
1413         int i;
1414
1415         for (i = 0; i < SI_NUM_SHADERS; i++) {
1416                 if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc) ||
1417                     !si_upload_descriptors(sctx, &sctx->rw_buffers[i].desc) ||
1418                     !si_upload_descriptors(sctx, &sctx->shader_buffers[i].desc) ||
1419                     !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc) ||
1420                     !si_upload_descriptors(sctx, &sctx->images[i].desc))
1421                         return false;
1422         }
1423         return si_upload_vertex_buffer_descriptors(sctx);
1424 }
1425
1426 void si_release_all_descriptors(struct si_context *sctx)
1427 {
1428         int i;
1429
1430         for (i = 0; i < SI_NUM_SHADERS; i++) {
1431                 si_release_buffer_resources(&sctx->const_buffers[i]);
1432                 si_release_buffer_resources(&sctx->rw_buffers[i]);
1433                 si_release_buffer_resources(&sctx->shader_buffers[i]);
1434                 si_release_sampler_views(&sctx->samplers[i].views);
1435                 si_release_image_views(&sctx->images[i]);
1436         }
1437         si_release_descriptors(&sctx->vertex_buffers);
1438 }
1439
1440 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1441 {
1442         int i;
1443
1444         for (i = 0; i < SI_NUM_SHADERS; i++) {
1445                 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1446                 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
1447                 si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
1448                 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1449                 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
1450         }
1451         si_vertex_buffers_begin_new_cs(sctx);
1452         si_shader_userdata_begin_new_cs(sctx);
1453 }