OSDN Git Service

radeonsi: set descriptor dirty mask on shader buffer unbind
[android-x86/external-mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Marek Olšák <marek.olsak@amd.com>
25  */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28  * 4 dwords) are stored in lists in memory which is accessed by shaders
29  * using scalar load instructions.
30  *
31  * This file is responsible for managing such lists. It keeps a copy of all
32  * descriptors in CPU memory and re-uploads a whole list if some slots have
33  * been changed.
34  *
35  * This code is also reponsible for updating shader pointers to those lists.
36  *
37  * Note that CP DMA can't be used for updating the lists, because a GPU hang
38  * could leave the list in a mid-IB state and the next IB would get wrong
39  * descriptors and the whole context would be unusable at that point.
40  * (Note: The register shadowing can't be used due to the same reason)
41  *
42  * Also, uploading descriptors to newly allocated memory doesn't require
43  * a KCACHE flush.
44  *
45  *
46  * Possible scenarios for one 16 dword image+sampler slot:
47  *
48  *       | Image        | w/ FMASK   | Buffer       | NULL
49  * [ 0: 3] Image[0:3]   | Image[0:3] | Null[0:3]    | Null[0:3]
50  * [ 4: 7] Image[4:7]   | Image[4:7] | Buffer[0:3]  | 0
51  * [ 8:11] Null[0:3]    | Fmask[0:3] | Null[0:3]    | Null[0:3]
52  * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
53  *
54  * FMASK implies MSAA, therefore no sampler state.
55  * Sampler states are never unbound except when FMASK is bound.
56  */
57
58 #include "radeon/r600_cs.h"
59 #include "si_pipe.h"
60 #include "si_shader.h"
61 #include "sid.h"
62
63 #include "util/u_math.h"
64 #include "util/u_memory.h"
65 #include "util/u_suballoc.h"
66 #include "util/u_upload_mgr.h"
67
68
69 /* NULL image and buffer descriptor for textures (alpha = 1) and images
70  * (alpha = 0).
71  *
72  * For images, all fields must be zero except for the swizzle, which
73  * supports arbitrary combinations of 0s and 1s. The texture type must be
74  * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
75  *
76  * For buffers, all fields must be zero. If they are not, the hw hangs.
77  *
78  * This is the only reason why the buffer descriptor must be in words [4:7].
79  */
80 static uint32_t null_texture_descriptor[8] = {
81         0,
82         0,
83         0,
84         S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
85         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
86         /* the rest must contain zeros, which is also used by the buffer
87          * descriptor */
88 };
89
90 static uint32_t null_image_descriptor[8] = {
91         0,
92         0,
93         0,
94         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
95         /* the rest must contain zeros, which is also used by the buffer
96          * descriptor */
97 };
98
99 static void si_init_descriptors(struct si_descriptors *desc,
100                                 unsigned shader_userdata_index,
101                                 unsigned element_dw_size,
102                                 unsigned num_elements,
103                                 const uint32_t *null_descriptor,
104                                 unsigned *ce_offset)
105 {
106         int i;
107
108         assert(num_elements <= sizeof(desc->enabled_mask)*8);
109
110         desc->list = CALLOC(num_elements, element_dw_size * 4);
111         desc->element_dw_size = element_dw_size;
112         desc->num_elements = num_elements;
113         desc->dirty_mask = num_elements == 32 ? ~0u : (1u << num_elements) - 1;
114         desc->shader_userdata_offset = shader_userdata_index * 4;
115
116         if (ce_offset) {
117                 desc->ce_offset = *ce_offset;
118
119                 /* make sure that ce_offset stays 32 byte aligned */
120                 *ce_offset += align(element_dw_size * num_elements * 4, 32);
121         }
122
123         /* Initialize the array to NULL descriptors if the element size is 8. */
124         if (null_descriptor) {
125                 assert(element_dw_size % 8 == 0);
126                 for (i = 0; i < num_elements * element_dw_size / 8; i++)
127                         memcpy(desc->list + i * 8, null_descriptor,
128                                8 * 4);
129         }
130 }
131
132 static void si_release_descriptors(struct si_descriptors *desc)
133 {
134         pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
135         FREE(desc->list);
136 }
137
138 static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
139                          unsigned *out_offset, struct r600_resource **out_buf) {
140         uint64_t va;
141
142         u_suballocator_alloc(sctx->ce_suballocator, size, out_offset,
143                              (struct pipe_resource**)out_buf);
144         if (!out_buf)
145                         return false;
146
147         va = (*out_buf)->gpu_address + *out_offset;
148
149         radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
150         radeon_emit(sctx->ce_ib, ce_offset);
151         radeon_emit(sctx->ce_ib, size / 4);
152         radeon_emit(sctx->ce_ib, va);
153         radeon_emit(sctx->ce_ib, va >> 32);
154
155         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
156                                RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
157
158         sctx->ce_need_synchronization = true;
159         return true;
160 }
161
162 static void si_reinitialize_ce_ram(struct si_context *sctx,
163                             struct si_descriptors *desc)
164 {
165         if (desc->buffer) {
166                 struct r600_resource *buffer = (struct r600_resource*)desc->buffer;
167                 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
168                 uint64_t va = buffer->gpu_address + desc->buffer_offset;
169                 struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
170
171                 if (!ib)
172                         ib = sctx->ce_ib;
173
174                 list_size = align(list_size, 32);
175
176                 radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
177                 radeon_emit(ib, va);
178                 radeon_emit(ib, va >> 32);
179                 radeon_emit(ib, list_size / 4);
180                 radeon_emit(ib, desc->ce_offset);
181
182                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
183                                     RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
184         }
185         desc->ce_ram_dirty = false;
186 }
187
188 void si_ce_enable_loads(struct radeon_winsys_cs *ib)
189 {
190         radeon_emit(ib, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
191         radeon_emit(ib, CONTEXT_CONTROL_LOAD_ENABLE(1) |
192                         CONTEXT_CONTROL_LOAD_CE_RAM(1));
193         radeon_emit(ib, CONTEXT_CONTROL_SHADOW_ENABLE(1));
194 }
195
196 static bool si_upload_descriptors(struct si_context *sctx,
197                                   struct si_descriptors *desc,
198                                   struct r600_atom * atom)
199 {
200         unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
201
202         if (!desc->dirty_mask)
203                 return true;
204
205         if (sctx->ce_ib) {
206                 uint32_t const* list = (uint32_t const*)desc->list;
207
208                 if (desc->ce_ram_dirty)
209                         si_reinitialize_ce_ram(sctx, desc);
210
211                 while(desc->dirty_mask) {
212                         int begin, count;
213                         u_bit_scan_consecutive_range(&desc->dirty_mask, &begin,
214                                                      &count);
215
216                         begin *= desc->element_dw_size;
217                         count *= desc->element_dw_size;
218
219                         radeon_emit(sctx->ce_ib,
220                                     PKT3(PKT3_WRITE_CONST_RAM, count, 0));
221                         radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
222                         radeon_emit_array(sctx->ce_ib, list + begin, count);
223                 }
224
225                 if (!si_ce_upload(sctx, desc->ce_offset, list_size,
226                                            &desc->buffer_offset, &desc->buffer))
227                         return false;
228         } else {
229                 void *ptr;
230
231                 u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
232                         &desc->buffer_offset,
233                         (struct pipe_resource**)&desc->buffer, &ptr);
234                 if (!desc->buffer)
235                         return false; /* skip the draw call */
236
237                 util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
238
239                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
240                                     RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
241         }
242         desc->pointer_dirty = true;
243         desc->dirty_mask = 0;
244
245         if (atom)
246                 si_mark_atom_dirty(sctx, atom);
247
248         return true;
249 }
250
251 /* SAMPLER VIEWS */
252
253 static void si_release_sampler_views(struct si_sampler_views *views)
254 {
255         int i;
256
257         for (i = 0; i < ARRAY_SIZE(views->views); i++) {
258                 pipe_sampler_view_reference(&views->views[i], NULL);
259         }
260         si_release_descriptors(&views->desc);
261 }
262
263 static void si_sampler_view_add_buffer(struct si_context *sctx,
264                                        struct pipe_resource *resource,
265                                        enum radeon_bo_usage usage)
266 {
267         struct r600_resource *rres = (struct r600_resource*)resource;
268
269         if (!resource)
270                 return;
271
272         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres, usage,
273                                   r600_get_sampler_view_priority(rres));
274 }
275
276 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
277                                           struct si_sampler_views *views)
278 {
279         unsigned mask = views->desc.enabled_mask;
280
281         /* Add buffers to the CS. */
282         while (mask) {
283                 int i = u_bit_scan(&mask);
284
285                 si_sampler_view_add_buffer(sctx, views->views[i]->texture,
286                                            RADEON_USAGE_READ);
287         }
288
289         views->desc.ce_ram_dirty = true;
290
291         if (!views->desc.buffer)
292                 return;
293         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
294                               RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
295 }
296
297 static void si_set_sampler_view(struct si_context *sctx,
298                                 struct si_sampler_views *views,
299                                 unsigned slot, struct pipe_sampler_view *view)
300 {
301         struct si_sampler_view *rview = (struct si_sampler_view*)view;
302
303         if (view && view->texture && view->texture->target != PIPE_BUFFER &&
304             G_008F28_COMPRESSION_EN(rview->state[6]) &&
305             ((struct r600_texture*)view->texture)->dcc_offset == 0) {
306                 rview->state[6] &= C_008F28_COMPRESSION_EN &
307                                    C_008F28_ALPHA_IS_ON_MSB;
308         } else if (views->views[slot] == view)
309                 return;
310
311         if (view) {
312                 struct r600_texture *rtex = (struct r600_texture *)view->texture;
313
314                 si_sampler_view_add_buffer(sctx, view->texture,
315                                            RADEON_USAGE_READ);
316
317                 pipe_sampler_view_reference(&views->views[slot], view);
318                 memcpy(views->desc.list + slot * 16, rview->state, 8*4);
319
320                 if (view->texture && view->texture->target != PIPE_BUFFER &&
321                     rtex->fmask.size) {
322                         memcpy(views->desc.list + slot*16 + 8,
323                                rview->fmask_state, 8*4);
324                 } else {
325                         /* Disable FMASK and bind sampler state in [12:15]. */
326                         memcpy(views->desc.list + slot*16 + 8,
327                                null_texture_descriptor, 4*4);
328
329                         if (views->sampler_states[slot])
330                                 memcpy(views->desc.list + slot*16 + 12,
331                                        views->sampler_states[slot], 4*4);
332                 }
333
334                 views->desc.enabled_mask |= 1u << slot;
335         } else {
336                 pipe_sampler_view_reference(&views->views[slot], NULL);
337                 memcpy(views->desc.list + slot*16, null_texture_descriptor, 8*4);
338                 /* Only clear the lower dwords of FMASK. */
339                 memcpy(views->desc.list + slot*16 + 8, null_texture_descriptor, 4*4);
340                 views->desc.enabled_mask &= ~(1u << slot);
341         }
342
343         views->desc.dirty_mask |= 1u << slot;
344 }
345
346 static bool is_compressed_colortex(struct r600_texture *rtex)
347 {
348         return rtex->cmask.size || rtex->fmask.size ||
349                (rtex->dcc_offset && rtex->dirty_level_mask);
350 }
351
352 static void si_set_sampler_views(struct pipe_context *ctx,
353                                  unsigned shader, unsigned start,
354                                  unsigned count,
355                                  struct pipe_sampler_view **views)
356 {
357         struct si_context *sctx = (struct si_context *)ctx;
358         struct si_textures_info *samplers = &sctx->samplers[shader];
359         int i;
360
361         if (!count || shader >= SI_NUM_SHADERS)
362                 return;
363
364         for (i = 0; i < count; i++) {
365                 unsigned slot = start + i;
366
367                 if (!views || !views[i]) {
368                         samplers->depth_texture_mask &= ~(1u << slot);
369                         samplers->compressed_colortex_mask &= ~(1u << slot);
370                         si_set_sampler_view(sctx, &samplers->views, slot, NULL);
371                         continue;
372                 }
373
374                 si_set_sampler_view(sctx, &samplers->views, slot, views[i]);
375
376                 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
377                         struct r600_texture *rtex =
378                                 (struct r600_texture*)views[i]->texture;
379
380                         if (rtex->is_depth && !rtex->is_flushing_texture) {
381                                 samplers->depth_texture_mask |= 1u << slot;
382                         } else {
383                                 samplers->depth_texture_mask &= ~(1u << slot);
384                         }
385                         if (is_compressed_colortex(rtex)) {
386                                 samplers->compressed_colortex_mask |= 1u << slot;
387                         } else {
388                                 samplers->compressed_colortex_mask &= ~(1u << slot);
389                         }
390                 } else {
391                         samplers->depth_texture_mask &= ~(1u << slot);
392                         samplers->compressed_colortex_mask &= ~(1u << slot);
393                 }
394         }
395 }
396
397 static void
398 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
399 {
400         unsigned mask = samplers->views.desc.enabled_mask;
401
402         while (mask) {
403                 int i = u_bit_scan(&mask);
404                 struct pipe_resource *res = samplers->views.views[i]->texture;
405
406                 if (res && res->target != PIPE_BUFFER) {
407                         struct r600_texture *rtex = (struct r600_texture *)res;
408
409                         if (is_compressed_colortex(rtex)) {
410                                 samplers->compressed_colortex_mask |= 1u << i;
411                         } else {
412                                 samplers->compressed_colortex_mask &= ~(1u << i);
413                         }
414                 }
415         }
416 }
417
418 /* IMAGE VIEWS */
419
420 static void
421 si_release_image_views(struct si_images_info *images)
422 {
423         unsigned i;
424
425         for (i = 0; i < SI_NUM_IMAGES; ++i) {
426                 struct pipe_image_view *view = &images->views[i];
427
428                 pipe_resource_reference(&view->resource, NULL);
429         }
430
431         si_release_descriptors(&images->desc);
432 }
433
434 static void
435 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
436 {
437         uint mask = images->desc.enabled_mask;
438
439         /* Add buffers to the CS. */
440         while (mask) {
441                 int i = u_bit_scan(&mask);
442                 struct pipe_image_view *view = &images->views[i];
443
444                 assert(view->resource);
445
446                 si_sampler_view_add_buffer(sctx, view->resource,
447                                            RADEON_USAGE_READWRITE);
448         }
449
450         images->desc.ce_ram_dirty = true;
451
452         if (images->desc.buffer) {
453                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
454                                           images->desc.buffer,
455                                           RADEON_USAGE_READ,
456                                           RADEON_PRIO_DESCRIPTORS);
457         }
458 }
459
460 static void
461 si_disable_shader_image(struct si_images_info *images, unsigned slot)
462 {
463         if (images->desc.enabled_mask & (1u << slot)) {
464                 pipe_resource_reference(&images->views[slot].resource, NULL);
465                 images->compressed_colortex_mask &= ~(1 << slot);
466
467                 memcpy(images->desc.list + slot*8, null_image_descriptor, 8*4);
468                 images->desc.enabled_mask &= ~(1u << slot);
469                 images->desc.dirty_mask |= 1u << slot;
470         }
471 }
472
473 static void
474 si_set_shader_images(struct pipe_context *pipe, unsigned shader,
475                      unsigned start_slot, unsigned count,
476                      struct pipe_image_view *views)
477 {
478         struct si_context *ctx = (struct si_context *)pipe;
479         struct si_screen *screen = ctx->screen;
480         struct si_images_info *images = &ctx->images[shader];
481         unsigned i, slot;
482
483         assert(shader < SI_NUM_SHADERS);
484
485         if (!count)
486                 return;
487
488         assert(start_slot + count <= SI_NUM_IMAGES);
489
490         for (i = 0, slot = start_slot; i < count; ++i, ++slot) {
491                 struct r600_resource *res;
492
493                 if (!views || !views[i].resource) {
494                         si_disable_shader_image(images, slot);
495                         continue;
496                 }
497
498                 res = (struct r600_resource *)views[i].resource;
499                 util_copy_image_view(&images->views[slot], &views[i]);
500
501                 si_sampler_view_add_buffer(ctx, &res->b.b,
502                                            RADEON_USAGE_READWRITE);
503
504                 if (res->b.b.target == PIPE_BUFFER) {
505                         si_make_buffer_descriptor(screen, res,
506                                                   views[i].format,
507                                                   views[i].u.buf.first_element,
508                                                   views[i].u.buf.last_element,
509                                                   images->desc.list + slot * 8);
510                         images->compressed_colortex_mask &= ~(1 << slot);
511                 } else {
512                         static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
513                         struct r600_texture *tex = (struct r600_texture *)res;
514                         unsigned level;
515                         unsigned width, height, depth;
516
517                         assert(!tex->is_depth);
518                         assert(tex->fmask.size == 0);
519
520                         if (tex->dcc_offset &&
521                             views[i].access & PIPE_IMAGE_ACCESS_WRITE)
522                                 r600_texture_disable_dcc(&screen->b, tex);
523
524                         if (is_compressed_colortex(tex)) {
525                                 images->compressed_colortex_mask |= 1 << slot;
526                         } else {
527                                 images->compressed_colortex_mask &= ~(1 << slot);
528                         }
529
530                         /* Always force the base level to the selected level.
531                          *
532                          * This is required for 3D textures, where otherwise
533                          * selecting a single slice for non-layered bindings
534                          * fails. It doesn't hurt the other targets.
535                          */
536                         level = views[i].u.tex.level;
537                         width = u_minify(res->b.b.width0, level);
538                         height = u_minify(res->b.b.height0, level);
539                         depth = u_minify(res->b.b.depth0, level);
540
541                         si_make_texture_descriptor(screen, tex, false, res->b.b.target,
542                                                    views[i].format, swizzle,
543                                                    level, 0, 0,
544                                                    views[i].u.tex.first_layer, views[i].u.tex.last_layer,
545                                                    width, height, depth,
546                                                    images->desc.list + slot * 8,
547                                                    NULL);
548                 }
549
550                 images->desc.enabled_mask |= 1u << slot;
551                 images->desc.dirty_mask |= 1u << slot;
552         }
553 }
554
555 static void
556 si_images_update_compressed_colortex_mask(struct si_images_info *images)
557 {
558         unsigned mask = images->desc.enabled_mask;
559
560         while (mask) {
561                 int i = u_bit_scan(&mask);
562                 struct pipe_resource *res = images->views[i].resource;
563
564                 if (res && res->target != PIPE_BUFFER) {
565                         struct r600_texture *rtex = (struct r600_texture *)res;
566
567                         if (is_compressed_colortex(rtex)) {
568                                 images->compressed_colortex_mask |= 1 << i;
569                         } else {
570                                 images->compressed_colortex_mask &= ~(1 << i);
571                         }
572                 }
573         }
574 }
575
576 /* SAMPLER STATES */
577
578 static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
579                                    unsigned start, unsigned count, void **states)
580 {
581         struct si_context *sctx = (struct si_context *)ctx;
582         struct si_textures_info *samplers = &sctx->samplers[shader];
583         struct si_descriptors *desc = &samplers->views.desc;
584         struct si_sampler_state **sstates = (struct si_sampler_state**)states;
585         int i;
586
587         if (!count || shader >= SI_NUM_SHADERS)
588                 return;
589
590         for (i = 0; i < count; i++) {
591                 unsigned slot = start + i;
592
593                 if (!sstates[i] ||
594                     sstates[i] == samplers->views.sampler_states[slot])
595                         continue;
596
597                 samplers->views.sampler_states[slot] = sstates[i];
598
599                 /* If FMASK is bound, don't overwrite it.
600                  * The sampler state will be set after FMASK is unbound.
601                  */
602                 if (samplers->views.views[i] &&
603                     samplers->views.views[i]->texture &&
604                     samplers->views.views[i]->texture->target != PIPE_BUFFER &&
605                     ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
606                         continue;
607
608                 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
609                 desc->dirty_mask |= 1u << slot;
610         }
611 }
612
613 /* BUFFER RESOURCES */
614
615 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
616                                      unsigned num_buffers,
617                                      unsigned shader_userdata_index,
618                                      enum radeon_bo_usage shader_usage,
619                                      enum radeon_bo_priority priority,
620                                      unsigned *ce_offset)
621 {
622         buffers->shader_usage = shader_usage;
623         buffers->priority = priority;
624         buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
625
626         si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
627                             num_buffers, NULL, ce_offset);
628 }
629
630 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
631 {
632         int i;
633
634         for (i = 0; i < buffers->desc.num_elements; i++) {
635                 pipe_resource_reference(&buffers->buffers[i], NULL);
636         }
637
638         FREE(buffers->buffers);
639         si_release_descriptors(&buffers->desc);
640 }
641
642 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
643                                              struct si_buffer_resources *buffers)
644 {
645         unsigned mask = buffers->desc.enabled_mask;
646
647         /* Add buffers to the CS. */
648         while (mask) {
649                 int i = u_bit_scan(&mask);
650
651                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
652                                       (struct r600_resource*)buffers->buffers[i],
653                                       buffers->shader_usage, buffers->priority);
654         }
655
656         buffers->desc.ce_ram_dirty = true;
657
658         if (!buffers->desc.buffer)
659                 return;
660         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
661                               buffers->desc.buffer, RADEON_USAGE_READWRITE,
662                               RADEON_PRIO_DESCRIPTORS);
663 }
664
665 /* VERTEX BUFFERS */
666
667 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
668 {
669         struct si_descriptors *desc = &sctx->vertex_buffers;
670         int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
671         int i;
672
673         for (i = 0; i < count; i++) {
674                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
675
676                 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
677                         continue;
678                 if (!sctx->vertex_buffer[vb].buffer)
679                         continue;
680
681                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
682                                       (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
683                                       RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
684         }
685
686         if (!desc->buffer)
687                 return;
688         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
689                               desc->buffer, RADEON_USAGE_READ,
690                               RADEON_PRIO_DESCRIPTORS);
691 }
692
693 static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
694 {
695         struct si_descriptors *desc = &sctx->vertex_buffers;
696         bool bound[SI_NUM_VERTEX_BUFFERS] = {};
697         unsigned i, count = sctx->vertex_elements->count;
698         uint64_t va;
699         uint32_t *ptr;
700
701         if (!sctx->vertex_buffers_dirty)
702                 return true;
703         if (!count || !sctx->vertex_elements)
704                 return true;
705
706         /* Vertex buffer descriptors are the only ones which are uploaded
707          * directly through a staging buffer and don't go through
708          * the fine-grained upload path.
709          */
710         u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
711                        (struct pipe_resource**)&desc->buffer, (void**)&ptr);
712         if (!desc->buffer)
713                 return false;
714
715         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
716                               desc->buffer, RADEON_USAGE_READ,
717                               RADEON_PRIO_DESCRIPTORS);
718
719         assert(count <= SI_NUM_VERTEX_BUFFERS);
720
721         for (i = 0; i < count; i++) {
722                 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
723                 struct pipe_vertex_buffer *vb;
724                 struct r600_resource *rbuffer;
725                 unsigned offset;
726                 uint32_t *desc = &ptr[i*4];
727
728                 if (ve->vertex_buffer_index >= ARRAY_SIZE(sctx->vertex_buffer)) {
729                         memset(desc, 0, 16);
730                         continue;
731                 }
732
733                 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
734                 rbuffer = (struct r600_resource*)vb->buffer;
735                 if (!rbuffer) {
736                         memset(desc, 0, 16);
737                         continue;
738                 }
739
740                 offset = vb->buffer_offset + ve->src_offset;
741                 va = rbuffer->gpu_address + offset;
742
743                 /* Fill in T# buffer resource description */
744                 desc[0] = va;
745                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
746                           S_008F04_STRIDE(vb->stride);
747
748                 if (sctx->b.chip_class <= CIK && vb->stride)
749                         /* Round up by rounding down and adding 1 */
750                         desc[2] = (vb->buffer->width0 - offset -
751                                    sctx->vertex_elements->format_size[i]) /
752                                   vb->stride + 1;
753                 else
754                         desc[2] = vb->buffer->width0 - offset;
755
756                 desc[3] = sctx->vertex_elements->rsrc_word3[i];
757
758                 if (!bound[ve->vertex_buffer_index]) {
759                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
760                                               (struct r600_resource*)vb->buffer,
761                                               RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
762                         bound[ve->vertex_buffer_index] = true;
763                 }
764         }
765
766         /* Don't flush the const cache. It would have a very negative effect
767          * on performance (confirmed by testing). New descriptors are always
768          * uploaded to a fresh new buffer, so I don't think flushing the const
769          * cache is needed. */
770         desc->pointer_dirty = true;
771         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
772         sctx->vertex_buffers_dirty = false;
773         return true;
774 }
775
776
777 /* CONSTANT BUFFERS */
778
779 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
780                             const uint8_t *ptr, unsigned size, uint32_t *const_offset)
781 {
782         void *tmp;
783
784         u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
785                        (struct pipe_resource**)rbuffer, &tmp);
786         if (*rbuffer)
787                 util_memcpy_cpu_to_le32(tmp, ptr, size);
788 }
789
790 void si_set_constant_buffer(struct si_context *sctx,
791                             struct si_buffer_resources *buffers,
792                             uint slot, struct pipe_constant_buffer *input)
793 {
794         assert(slot < buffers->desc.num_elements);
795         pipe_resource_reference(&buffers->buffers[slot], NULL);
796
797         /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
798          * with a NULL buffer). We need to use a dummy buffer instead. */
799         if (sctx->b.chip_class == CIK &&
800             (!input || (!input->buffer && !input->user_buffer)))
801                 input = &sctx->null_const_buf;
802
803         if (input && (input->buffer || input->user_buffer)) {
804                 struct pipe_resource *buffer = NULL;
805                 uint64_t va;
806
807                 /* Upload the user buffer if needed. */
808                 if (input->user_buffer) {
809                         unsigned buffer_offset;
810
811                         si_upload_const_buffer(sctx,
812                                                (struct r600_resource**)&buffer, input->user_buffer,
813                                                input->buffer_size, &buffer_offset);
814                         if (!buffer) {
815                                 /* Just unbind on failure. */
816                                 si_set_constant_buffer(sctx, buffers, slot, NULL);
817                                 return;
818                         }
819                         va = r600_resource(buffer)->gpu_address + buffer_offset;
820                 } else {
821                         pipe_resource_reference(&buffer, input->buffer);
822                         va = r600_resource(buffer)->gpu_address + input->buffer_offset;
823                 }
824
825                 /* Set the descriptor. */
826                 uint32_t *desc = buffers->desc.list + slot*4;
827                 desc[0] = va;
828                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
829                           S_008F04_STRIDE(0);
830                 desc[2] = input->buffer_size;
831                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
832                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
833                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
834                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
835                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
836                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
837
838                 buffers->buffers[slot] = buffer;
839                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
840                                       (struct r600_resource*)buffer,
841                                       buffers->shader_usage, buffers->priority);
842                 buffers->desc.enabled_mask |= 1u << slot;
843         } else {
844                 /* Clear the descriptor. */
845                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
846                 buffers->desc.enabled_mask &= ~(1u << slot);
847         }
848
849         buffers->desc.dirty_mask |= 1u << slot;
850 }
851
852 static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
853                                         uint shader, uint slot,
854                                         struct pipe_constant_buffer *input)
855 {
856         struct si_context *sctx = (struct si_context *)ctx;
857
858         if (shader >= SI_NUM_SHADERS)
859                 return;
860
861         si_set_constant_buffer(sctx, &sctx->const_buffers[shader], slot, input);
862 }
863
864 /* SHADER BUFFERS */
865
866 static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
867                                   unsigned start_slot, unsigned count,
868                                   struct pipe_shader_buffer *sbuffers)
869 {
870         struct si_context *sctx = (struct si_context *)ctx;
871         struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
872         unsigned i;
873
874         assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
875
876         for (i = 0; i < count; ++i) {
877                 struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
878                 struct r600_resource *buf;
879                 unsigned slot = start_slot + i;
880                 uint32_t *desc = buffers->desc.list + slot * 4;
881                 uint64_t va;
882
883                 if (!sbuffer || !sbuffer->buffer) {
884                         pipe_resource_reference(&buffers->buffers[slot], NULL);
885                         memset(desc, 0, sizeof(uint32_t) * 4);
886                         buffers->desc.enabled_mask &= ~(1u << slot);
887                         buffers->desc.dirty_mask |= 1u << slot;
888                         continue;
889                 }
890
891                 buf = (struct r600_resource *)sbuffer->buffer;
892                 va = buf->gpu_address + sbuffer->buffer_offset;
893
894                 desc[0] = va;
895                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
896                           S_008F04_STRIDE(0);
897                 desc[2] = sbuffer->buffer_size;
898                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
899                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
900                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
901                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
902                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
903                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
904
905                 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
906                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buf,
907                                       buffers->shader_usage, buffers->priority);
908                 buffers->desc.enabled_mask |= 1u << slot;
909                 buffers->desc.dirty_mask |= 1u << slot;
910         }
911 }
912
913 /* RING BUFFERS */
914
915 void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
916                         struct pipe_resource *buffer,
917                         unsigned stride, unsigned num_records,
918                         bool add_tid, bool swizzle,
919                         unsigned element_size, unsigned index_stride, uint64_t offset)
920 {
921         struct si_context *sctx = (struct si_context *)ctx;
922         struct si_buffer_resources *buffers = &sctx->rw_buffers;
923
924         /* The stride field in the resource descriptor has 14 bits */
925         assert(stride < (1 << 14));
926
927         assert(slot < buffers->desc.num_elements);
928         pipe_resource_reference(&buffers->buffers[slot], NULL);
929
930         if (buffer) {
931                 uint64_t va;
932
933                 va = r600_resource(buffer)->gpu_address + offset;
934
935                 switch (element_size) {
936                 default:
937                         assert(!"Unsupported ring buffer element size");
938                 case 0:
939                 case 2:
940                         element_size = 0;
941                         break;
942                 case 4:
943                         element_size = 1;
944                         break;
945                 case 8:
946                         element_size = 2;
947                         break;
948                 case 16:
949                         element_size = 3;
950                         break;
951                 }
952
953                 switch (index_stride) {
954                 default:
955                         assert(!"Unsupported ring buffer index stride");
956                 case 0:
957                 case 8:
958                         index_stride = 0;
959                         break;
960                 case 16:
961                         index_stride = 1;
962                         break;
963                 case 32:
964                         index_stride = 2;
965                         break;
966                 case 64:
967                         index_stride = 3;
968                         break;
969                 }
970
971                 if (sctx->b.chip_class >= VI && stride)
972                         num_records *= stride;
973
974                 /* Set the descriptor. */
975                 uint32_t *desc = buffers->desc.list + slot*4;
976                 desc[0] = va;
977                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
978                           S_008F04_STRIDE(stride) |
979                           S_008F04_SWIZZLE_ENABLE(swizzle);
980                 desc[2] = num_records;
981                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
982                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
983                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
984                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
985                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
986                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
987                           S_008F0C_ELEMENT_SIZE(element_size) |
988                           S_008F0C_INDEX_STRIDE(index_stride) |
989                           S_008F0C_ADD_TID_ENABLE(add_tid);
990
991                 pipe_resource_reference(&buffers->buffers[slot], buffer);
992                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
993                                       (struct r600_resource*)buffer,
994                                       buffers->shader_usage, buffers->priority);
995                 buffers->desc.enabled_mask |= 1u << slot;
996         } else {
997                 /* Clear the descriptor. */
998                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
999                 buffers->desc.enabled_mask &= ~(1u << slot);
1000         }
1001
1002         buffers->desc.dirty_mask |= 1u << slot;
1003 }
1004
1005 /* STREAMOUT BUFFERS */
1006
1007 static void si_set_streamout_targets(struct pipe_context *ctx,
1008                                      unsigned num_targets,
1009                                      struct pipe_stream_output_target **targets,
1010                                      const unsigned *offsets)
1011 {
1012         struct si_context *sctx = (struct si_context *)ctx;
1013         struct si_buffer_resources *buffers = &sctx->rw_buffers;
1014         unsigned old_num_targets = sctx->b.streamout.num_targets;
1015         unsigned i, bufidx;
1016
1017         /* We are going to unbind the buffers. Mark which caches need to be flushed. */
1018         if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
1019                 /* Since streamout uses vector writes which go through TC L2
1020                  * and most other clients can use TC L2 as well, we don't need
1021                  * to flush it.
1022                  *
1023                  * The only case which requires flushing it is VGT DMA index
1024                  * fetching, which is a rare case. Thus, flag the TC L2
1025                  * dirtiness in the resource and handle it when index fetching
1026                  * is used.
1027                  */
1028                 for (i = 0; i < sctx->b.streamout.num_targets; i++)
1029                         if (sctx->b.streamout.targets[i])
1030                                 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
1031
1032                 /* Invalidate the scalar cache in case a streamout buffer is
1033                  * going to be used as a constant buffer.
1034                  *
1035                  * Invalidate TC L1, because streamout bypasses it (done by
1036                  * setting GLC=1 in the store instruction), but it can contain
1037                  * outdated data of streamout buffers.
1038                  *
1039                  * VS_PARTIAL_FLUSH is required if the buffers are going to be
1040                  * used as an input immediately.
1041                  */
1042                 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
1043                                  SI_CONTEXT_INV_VMEM_L1 |
1044                                  SI_CONTEXT_VS_PARTIAL_FLUSH;
1045         }
1046
1047         /* All readers of the streamout targets need to be finished before we can
1048          * start writing to the targets.
1049          */
1050         if (num_targets)
1051                 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1052                                  SI_CONTEXT_CS_PARTIAL_FLUSH;
1053
1054         /* Streamout buffers must be bound in 2 places:
1055          * 1) in VGT by setting the VGT_STRMOUT registers
1056          * 2) as shader resources
1057          */
1058
1059         /* Set the VGT regs. */
1060         r600_set_streamout_targets(ctx, num_targets, targets, offsets);
1061
1062         /* Set the shader resources.*/
1063         for (i = 0; i < num_targets; i++) {
1064                 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1065
1066                 if (targets[i]) {
1067                         struct pipe_resource *buffer = targets[i]->buffer;
1068                         uint64_t va = r600_resource(buffer)->gpu_address;
1069
1070                         /* Set the descriptor.
1071                          *
1072                          * On VI, the format must be non-INVALID, otherwise
1073                          * the buffer will be considered not bound and store
1074                          * instructions will be no-ops.
1075                          */
1076                         uint32_t *desc = buffers->desc.list + bufidx*4;
1077                         desc[0] = va;
1078                         desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
1079                         desc[2] = 0xffffffff;
1080                         desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1081                                   S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1082                                   S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1083                                   S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1084                                   S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1085
1086                         /* Set the resource. */
1087                         pipe_resource_reference(&buffers->buffers[bufidx],
1088                                                 buffer);
1089                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1090                                               (struct r600_resource*)buffer,
1091                                               buffers->shader_usage, buffers->priority);
1092                         buffers->desc.enabled_mask |= 1u << bufidx;
1093                 } else {
1094                         /* Clear the descriptor and unset the resource. */
1095                         memset(buffers->desc.list + bufidx*4, 0,
1096                                sizeof(uint32_t) * 4);
1097                         pipe_resource_reference(&buffers->buffers[bufidx],
1098                                                 NULL);
1099                         buffers->desc.enabled_mask &= ~(1u << bufidx);
1100                 }
1101                 buffers->desc.dirty_mask |= 1u << bufidx;
1102         }
1103         for (; i < old_num_targets; i++) {
1104                 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1105                 /* Clear the descriptor and unset the resource. */
1106                 memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
1107                 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
1108                 buffers->desc.enabled_mask &= ~(1u << bufidx);
1109                 buffers->desc.dirty_mask |= 1u << bufidx;
1110         }
1111 }
1112
1113 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
1114                                         uint32_t *desc, uint64_t old_buf_va,
1115                                         struct pipe_resource *new_buf)
1116 {
1117         /* Retrieve the buffer offset from the descriptor. */
1118         uint64_t old_desc_va =
1119                 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
1120
1121         assert(old_buf_va <= old_desc_va);
1122         uint64_t offset_within_buffer = old_desc_va - old_buf_va;
1123
1124         /* Update the descriptor. */
1125         uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
1126
1127         desc[0] = va;
1128         desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
1129                   S_008F04_BASE_ADDRESS_HI(va >> 32);
1130 }
1131
1132 /* INTERNAL CONST BUFFERS */
1133
1134 static void si_set_polygon_stipple(struct pipe_context *ctx,
1135                                    const struct pipe_poly_stipple *state)
1136 {
1137         struct si_context *sctx = (struct si_context *)ctx;
1138         struct pipe_constant_buffer cb = {};
1139         unsigned stipple[32];
1140         int i;
1141
1142         for (i = 0; i < 32; i++)
1143                 stipple[i] = util_bitreverse(state->stipple[i]);
1144
1145         cb.user_buffer = stipple;
1146         cb.buffer_size = sizeof(stipple);
1147
1148         si_set_constant_buffer(sctx, &sctx->rw_buffers,
1149                                SI_PS_CONST_POLY_STIPPLE, &cb);
1150 }
1151
1152 /* TEXTURE METADATA ENABLE/DISABLE */
1153
1154 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1155  * while the texture is bound, possibly by a different context. In that case,
1156  * call this function to update compressed_colortex_masks.
1157  */
1158 void si_update_compressed_colortex_masks(struct si_context *sctx)
1159 {
1160         for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1161                 si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
1162                 si_images_update_compressed_colortex_mask(&sctx->images[i]);
1163         }
1164 }
1165
1166 /* BUFFER DISCARD/INVALIDATION */
1167
1168 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1169 static void si_reset_buffer_resources(struct si_context *sctx,
1170                                       struct si_buffer_resources *buffers,
1171                                       struct pipe_resource *buf,
1172                                       uint64_t old_va)
1173 {
1174         unsigned mask = buffers->desc.enabled_mask;
1175
1176         while (mask) {
1177                 unsigned i = u_bit_scan(&mask);
1178                 if (buffers->buffers[i] == buf) {
1179                         si_desc_reset_buffer_offset(&sctx->b.b,
1180                                                     buffers->desc.list + i*4,
1181                                                     old_va, buf);
1182                         buffers->desc.dirty_mask |= 1u << i;
1183
1184                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1185                                                 (struct r600_resource *)buf,
1186                                                 buffers->shader_usage,
1187                                                 buffers->priority);
1188                 }
1189         }
1190 }
1191
1192 /* Reallocate a buffer a update all resource bindings where the buffer is
1193  * bound.
1194  *
1195  * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1196  * idle by discarding its contents. Apps usually tell us when to do this using
1197  * map_buffer flags, for example.
1198  */
1199 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
1200 {
1201         struct si_context *sctx = (struct si_context*)ctx;
1202         struct r600_resource *rbuffer = r600_resource(buf);
1203         unsigned i, shader, alignment = rbuffer->buf->alignment;
1204         uint64_t old_va = rbuffer->gpu_address;
1205         unsigned num_elems = sctx->vertex_elements ?
1206                                        sctx->vertex_elements->count : 0;
1207         struct si_sampler_view *view;
1208
1209         /* Reallocate the buffer in the same pipe_resource. */
1210         r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
1211                            alignment);
1212
1213         /* We changed the buffer, now we need to bind it where the old one
1214          * was bound. This consists of 2 things:
1215          *   1) Updating the resource descriptor and dirtying it.
1216          *   2) Adding a relocation to the CS, so that it's usable.
1217          */
1218
1219         /* Vertex buffers. */
1220         for (i = 0; i < num_elems; i++) {
1221                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
1222
1223                 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1224                         continue;
1225                 if (!sctx->vertex_buffer[vb].buffer)
1226                         continue;
1227
1228                 if (sctx->vertex_buffer[vb].buffer == buf) {
1229                         sctx->vertex_buffers_dirty = true;
1230                         break;
1231                 }
1232         }
1233
1234         /* Streamout buffers. (other internal buffers can't be invalidated) */
1235         for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1236                 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1237
1238                 if (buffers->buffers[i] != buf)
1239                         continue;
1240
1241                 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
1242                                             old_va, buf);
1243                 buffers->desc.dirty_mask |= 1u << i;
1244
1245                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1246                                           rbuffer, buffers->shader_usage,
1247                                           buffers->priority);
1248
1249                 /* Update the streamout state. */
1250                 if (sctx->b.streamout.begin_emitted)
1251                         r600_emit_streamout_end(&sctx->b);
1252                 sctx->b.streamout.append_bitmask =
1253                                 sctx->b.streamout.enabled_mask;
1254                 r600_streamout_buffers_dirty(&sctx->b);
1255         }
1256
1257         /* Constant and shader buffers. */
1258         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1259                 si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
1260                                           buf, old_va);
1261                 si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
1262                                           buf, old_va);
1263         }
1264
1265         /* Texture buffers - update virtual addresses in sampler view descriptors. */
1266         LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
1267                 if (view->base.texture == buf) {
1268                         si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
1269                 }
1270         }
1271         /* Texture buffers - update bindings. */
1272         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1273                 struct si_sampler_views *views = &sctx->samplers[shader].views;
1274                 unsigned mask = views->desc.enabled_mask;
1275
1276                 while (mask) {
1277                         unsigned i = u_bit_scan(&mask);
1278                         if (views->views[i]->texture == buf) {
1279                                 si_desc_reset_buffer_offset(ctx,
1280                                                             views->desc.list +
1281                                                             i * 16 + 4,
1282                                                             old_va, buf);
1283                                 views->desc.dirty_mask |= 1u << i;
1284
1285                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1286                                                       rbuffer, RADEON_USAGE_READ,
1287                                                       RADEON_PRIO_SAMPLER_BUFFER);
1288                         }
1289                 }
1290         }
1291
1292         /* Shader images */
1293         for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1294                 struct si_images_info *images = &sctx->images[shader];
1295                 unsigned mask = images->desc.enabled_mask;
1296
1297                 while (mask) {
1298                         unsigned i = u_bit_scan(&mask);
1299
1300                         if (images->views[i].resource == buf) {
1301                                 si_desc_reset_buffer_offset(
1302                                         ctx, images->desc.list + i * 8 + 4,
1303                                         old_va, buf);
1304                                 images->desc.dirty_mask |= 1u << i;
1305
1306                                 radeon_add_to_buffer_list(
1307                                         &sctx->b, &sctx->b.gfx, rbuffer,
1308                                         RADEON_USAGE_READWRITE,
1309                                         RADEON_PRIO_SAMPLER_BUFFER);
1310                         }
1311                 }
1312         }
1313 }
1314
1315 /* SHADER USER DATA */
1316
1317 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
1318                                           unsigned shader)
1319 {
1320         sctx->const_buffers[shader].desc.pointer_dirty = true;
1321         sctx->shader_buffers[shader].desc.pointer_dirty = true;
1322         sctx->samplers[shader].views.desc.pointer_dirty = true;
1323         sctx->images[shader].desc.pointer_dirty = true;
1324
1325         if (shader == PIPE_SHADER_VERTEX)
1326                 sctx->vertex_buffers.pointer_dirty = true;
1327
1328         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1329 }
1330
1331 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
1332 {
1333         int i;
1334
1335         for (i = 0; i < SI_NUM_SHADERS; i++) {
1336                 si_mark_shader_pointers_dirty(sctx, i);
1337         }
1338         sctx->rw_buffers.desc.pointer_dirty = true;
1339 }
1340
1341 /* Set a base register address for user data constants in the given shader.
1342  * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
1343  */
1344 static void si_set_user_data_base(struct si_context *sctx,
1345                                   unsigned shader, uint32_t new_base)
1346 {
1347         uint32_t *base = &sctx->shader_userdata.sh_base[shader];
1348
1349         if (*base != new_base) {
1350                 *base = new_base;
1351
1352                 if (new_base)
1353                         si_mark_shader_pointers_dirty(sctx, shader);
1354         }
1355 }
1356
1357 /* This must be called when these shaders are changed from non-NULL to NULL
1358  * and vice versa:
1359  * - geometry shader
1360  * - tessellation control shader
1361  * - tessellation evaluation shader
1362  */
1363 void si_shader_change_notify(struct si_context *sctx)
1364 {
1365         /* VS can be bound as VS, ES, or LS. */
1366         if (sctx->tes_shader.cso)
1367                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1368                                       R_00B530_SPI_SHADER_USER_DATA_LS_0);
1369         else if (sctx->gs_shader.cso)
1370                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1371                                       R_00B330_SPI_SHADER_USER_DATA_ES_0);
1372         else
1373                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1374                                       R_00B130_SPI_SHADER_USER_DATA_VS_0);
1375
1376         /* TES can be bound as ES, VS, or not bound. */
1377         if (sctx->tes_shader.cso) {
1378                 if (sctx->gs_shader.cso)
1379                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1380                                               R_00B330_SPI_SHADER_USER_DATA_ES_0);
1381                 else
1382                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1383                                               R_00B130_SPI_SHADER_USER_DATA_VS_0);
1384         } else {
1385                 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
1386         }
1387 }
1388
1389 static void si_emit_shader_pointer(struct si_context *sctx,
1390                                    struct si_descriptors *desc,
1391                                    unsigned sh_base, bool keep_dirty)
1392 {
1393         struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1394         uint64_t va;
1395
1396         if (!desc->pointer_dirty || !desc->buffer)
1397                 return;
1398
1399         va = desc->buffer->gpu_address +
1400              desc->buffer_offset;
1401
1402         radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
1403         radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
1404         radeon_emit(cs, va);
1405         radeon_emit(cs, va >> 32);
1406
1407         desc->pointer_dirty = keep_dirty;
1408 }
1409
1410 void si_emit_graphics_shader_userdata(struct si_context *sctx,
1411                                       struct r600_atom *atom)
1412 {
1413         unsigned i;
1414         uint32_t *sh_base = sctx->shader_userdata.sh_base;
1415
1416         if (sctx->rw_buffers.desc.pointer_dirty) {
1417                 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1418                                        R_00B030_SPI_SHADER_USER_DATA_PS_0, true);
1419                 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1420                                        R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1421                 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1422                                        R_00B230_SPI_SHADER_USER_DATA_GS_0, true);
1423                 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1424                                        R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
1425                 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1426                                        R_00B430_SPI_SHADER_USER_DATA_HS_0, true);
1427                 sctx->rw_buffers.desc.pointer_dirty = false;
1428         }
1429
1430         for (i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
1431                 unsigned base = sh_base[i];
1432
1433                 if (!base)
1434                         continue;
1435
1436                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
1437                 si_emit_shader_pointer(sctx, &sctx->shader_buffers[i].desc, base, false);
1438                 si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
1439                 si_emit_shader_pointer(sctx, &sctx->images[i].desc, base, false);
1440         }
1441         si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1442 }
1443
1444 void si_emit_compute_shader_userdata(struct si_context *sctx)
1445 {
1446         unsigned base = R_00B900_COMPUTE_USER_DATA_0;
1447
1448         si_emit_shader_pointer(sctx, &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc,
1449                                base, false);
1450         si_emit_shader_pointer(sctx, &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc,
1451                                base, false);
1452         si_emit_shader_pointer(sctx, &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc,
1453                                base, false);
1454         si_emit_shader_pointer(sctx, &sctx->images[PIPE_SHADER_COMPUTE].desc,
1455                                base, false);
1456 }
1457
1458 /* INIT/DEINIT/UPLOAD */
1459
1460 void si_init_all_descriptors(struct si_context *sctx)
1461 {
1462         int i;
1463         unsigned ce_offset = 0;
1464
1465         for (i = 0; i < SI_NUM_SHADERS; i++) {
1466                 si_init_buffer_resources(&sctx->const_buffers[i],
1467                                          SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1468                                          RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
1469                                          &ce_offset);
1470                 si_init_buffer_resources(&sctx->shader_buffers[i],
1471                                          SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
1472                                          RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
1473                                          &ce_offset);
1474
1475                 si_init_descriptors(&sctx->samplers[i].views.desc,
1476                                     SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
1477                                     null_texture_descriptor, &ce_offset);
1478
1479                 si_init_descriptors(&sctx->images[i].desc,
1480                                     SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
1481                                     null_image_descriptor, &ce_offset);
1482         }
1483
1484         si_init_buffer_resources(&sctx->rw_buffers,
1485                                  SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1486                                  RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT,
1487                                  &ce_offset);
1488         si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1489                             4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
1490
1491         assert(ce_offset <= 32768);
1492
1493         /* Set pipe_context functions. */
1494         sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1495         sctx->b.b.set_shader_images = si_set_shader_images;
1496         sctx->b.b.set_constant_buffer = si_pipe_set_constant_buffer;
1497         sctx->b.b.set_polygon_stipple = si_set_polygon_stipple;
1498         sctx->b.b.set_shader_buffers = si_set_shader_buffers;
1499         sctx->b.b.set_sampler_views = si_set_sampler_views;
1500         sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1501         sctx->b.invalidate_buffer = si_invalidate_buffer;
1502
1503         /* Shader user data. */
1504         si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1505                      si_emit_graphics_shader_userdata);
1506
1507         /* Set default and immutable mappings. */
1508         si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1509         si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1510         si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1511         si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1512 }
1513
1514 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
1515 {
1516         int i;
1517
1518         for (i = 0; i < SI_NUM_SHADERS; i++) {
1519                 if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc,
1520                                            &sctx->shader_userdata.atom) ||
1521                     !si_upload_descriptors(sctx, &sctx->shader_buffers[i].desc,
1522                                            &sctx->shader_userdata.atom) ||
1523                     !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc,
1524                                            &sctx->shader_userdata.atom) ||
1525                     !si_upload_descriptors(sctx, &sctx->images[i].desc,
1526                                            &sctx->shader_userdata.atom))
1527                         return false;
1528         }
1529         return si_upload_descriptors(sctx, &sctx->rw_buffers.desc,
1530                                      &sctx->shader_userdata.atom) &&
1531                si_upload_vertex_buffer_descriptors(sctx);
1532 }
1533
1534 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
1535 {
1536         /* Does not update rw_buffers as that is not needed for compute shaders
1537          * and the input buffer is using the same SGPR's anyway.
1538          */
1539         return si_upload_descriptors(sctx,
1540                         &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
1541                si_upload_descriptors(sctx,
1542                        &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
1543                si_upload_descriptors(sctx,
1544                        &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc, NULL) &&
1545                si_upload_descriptors(sctx,
1546                        &sctx->images[PIPE_SHADER_COMPUTE].desc,  NULL);
1547 }
1548
1549 void si_release_all_descriptors(struct si_context *sctx)
1550 {
1551         int i;
1552
1553         for (i = 0; i < SI_NUM_SHADERS; i++) {
1554                 si_release_buffer_resources(&sctx->const_buffers[i]);
1555                 si_release_buffer_resources(&sctx->shader_buffers[i]);
1556                 si_release_sampler_views(&sctx->samplers[i].views);
1557                 si_release_image_views(&sctx->images[i]);
1558         }
1559         si_release_buffer_resources(&sctx->rw_buffers);
1560         si_release_descriptors(&sctx->vertex_buffers);
1561 }
1562
1563 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1564 {
1565         int i;
1566
1567         for (i = 0; i < SI_NUM_SHADERS; i++) {
1568                 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1569                 si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
1570                 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1571                 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
1572         }
1573         si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
1574         si_vertex_buffers_begin_new_cs(sctx);
1575         si_shader_userdata_begin_new_cs(sctx);
1576 }