OSDN Git Service

Merge remote-tracking branch 'mesa/12.0' into marshmallow-x86
[android-x86/external-mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Marek Olšák <marek.olsak@amd.com>
25  */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28  * 4 dwords) are stored in lists in memory which is accessed by shaders
29  * using scalar load instructions.
30  *
31  * This file is responsible for managing such lists. It keeps a copy of all
32  * descriptors in CPU memory and re-uploads a whole list if some slots have
33  * been changed.
34  *
35  * This code is also reponsible for updating shader pointers to those lists.
36  *
37  * Note that CP DMA can't be used for updating the lists, because a GPU hang
38  * could leave the list in a mid-IB state and the next IB would get wrong
39  * descriptors and the whole context would be unusable at that point.
40  * (Note: The register shadowing can't be used due to the same reason)
41  *
42  * Also, uploading descriptors to newly allocated memory doesn't require
43  * a KCACHE flush.
44  *
45  *
46  * Possible scenarios for one 16 dword image+sampler slot:
47  *
48  *       | Image        | w/ FMASK   | Buffer       | NULL
49  * [ 0: 3] Image[0:3]   | Image[0:3] | Null[0:3]    | Null[0:3]
50  * [ 4: 7] Image[4:7]   | Image[4:7] | Buffer[0:3]  | 0
51  * [ 8:11] Null[0:3]    | Fmask[0:3] | Null[0:3]    | Null[0:3]
52  * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
53  *
54  * FMASK implies MSAA, therefore no sampler state.
55  * Sampler states are never unbound except when FMASK is bound.
56  */
57
58 #include "radeon/r600_cs.h"
59 #include "si_pipe.h"
60 #include "si_shader.h"
61 #include "sid.h"
62
63 #include "util/u_format.h"
64 #include "util/u_math.h"
65 #include "util/u_memory.h"
66 #include "util/u_suballoc.h"
67 #include "util/u_upload_mgr.h"
68
69
70 /* NULL image and buffer descriptor for textures (alpha = 1) and images
71  * (alpha = 0).
72  *
73  * For images, all fields must be zero except for the swizzle, which
74  * supports arbitrary combinations of 0s and 1s. The texture type must be
75  * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
76  *
77  * For buffers, all fields must be zero. If they are not, the hw hangs.
78  *
79  * This is the only reason why the buffer descriptor must be in words [4:7].
80  */
81 static uint32_t null_texture_descriptor[8] = {
82         0,
83         0,
84         0,
85         S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
86         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
87         /* the rest must contain zeros, which is also used by the buffer
88          * descriptor */
89 };
90
91 static uint32_t null_image_descriptor[8] = {
92         0,
93         0,
94         0,
95         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
96         /* the rest must contain zeros, which is also used by the buffer
97          * descriptor */
98 };
99
100 static void si_init_descriptors(struct si_descriptors *desc,
101                                 unsigned shader_userdata_index,
102                                 unsigned element_dw_size,
103                                 unsigned num_elements,
104                                 const uint32_t *null_descriptor,
105                                 unsigned *ce_offset)
106 {
107         int i;
108
109         assert(num_elements <= sizeof(desc->enabled_mask)*8);
110
111         desc->list = CALLOC(num_elements, element_dw_size * 4);
112         desc->element_dw_size = element_dw_size;
113         desc->num_elements = num_elements;
114         desc->dirty_mask = num_elements == 32 ? ~0u : (1u << num_elements) - 1;
115         desc->shader_userdata_offset = shader_userdata_index * 4;
116
117         if (ce_offset) {
118                 desc->ce_offset = *ce_offset;
119
120                 /* make sure that ce_offset stays 32 byte aligned */
121                 *ce_offset += align(element_dw_size * num_elements * 4, 32);
122         }
123
124         /* Initialize the array to NULL descriptors if the element size is 8. */
125         if (null_descriptor) {
126                 assert(element_dw_size % 8 == 0);
127                 for (i = 0; i < num_elements * element_dw_size / 8; i++)
128                         memcpy(desc->list + i * 8, null_descriptor,
129                                8 * 4);
130         }
131 }
132
133 static void si_release_descriptors(struct si_descriptors *desc)
134 {
135         pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
136         FREE(desc->list);
137 }
138
139 static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
140                          unsigned *out_offset, struct r600_resource **out_buf) {
141         uint64_t va;
142
143         u_suballocator_alloc(sctx->ce_suballocator, size, out_offset,
144                              (struct pipe_resource**)out_buf);
145         if (!out_buf)
146                         return false;
147
148         va = (*out_buf)->gpu_address + *out_offset;
149
150         radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
151         radeon_emit(sctx->ce_ib, ce_offset);
152         radeon_emit(sctx->ce_ib, size / 4);
153         radeon_emit(sctx->ce_ib, va);
154         radeon_emit(sctx->ce_ib, va >> 32);
155
156         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
157                                RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
158
159         sctx->ce_need_synchronization = true;
160         return true;
161 }
162
163 static void si_ce_reinitialize_descriptors(struct si_context *sctx,
164                             struct si_descriptors *desc)
165 {
166         if (desc->buffer) {
167                 struct r600_resource *buffer = (struct r600_resource*)desc->buffer;
168                 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
169                 uint64_t va = buffer->gpu_address + desc->buffer_offset;
170                 struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
171
172                 if (!ib)
173                         ib = sctx->ce_ib;
174
175                 list_size = align(list_size, 32);
176
177                 radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
178                 radeon_emit(ib, va);
179                 radeon_emit(ib, va >> 32);
180                 radeon_emit(ib, list_size / 4);
181                 radeon_emit(ib, desc->ce_offset);
182
183                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
184                                     RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
185         }
186         desc->ce_ram_dirty = false;
187 }
188
189 void si_ce_reinitialize_all_descriptors(struct si_context *sctx)
190 {
191         for (int i = 0; i < SI_NUM_SHADERS; i++) {
192                  si_ce_reinitialize_descriptors(sctx, &sctx->const_buffers[i].desc);
193                  si_ce_reinitialize_descriptors(sctx, &sctx->shader_buffers[i].desc);
194                  si_ce_reinitialize_descriptors(sctx, &sctx->samplers[i].views.desc);
195                  si_ce_reinitialize_descriptors(sctx, &sctx->images[i].desc);
196         }
197          si_ce_reinitialize_descriptors(sctx, &sctx->rw_buffers.desc);
198 }
199
200 void si_ce_enable_loads(struct radeon_winsys_cs *ib)
201 {
202         radeon_emit(ib, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
203         radeon_emit(ib, CONTEXT_CONTROL_LOAD_ENABLE(1) |
204                         CONTEXT_CONTROL_LOAD_CE_RAM(1));
205         radeon_emit(ib, CONTEXT_CONTROL_SHADOW_ENABLE(1));
206 }
207
208 static bool si_upload_descriptors(struct si_context *sctx,
209                                   struct si_descriptors *desc,
210                                   struct r600_atom * atom)
211 {
212         unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
213
214         if (!desc->dirty_mask)
215                 return true;
216
217         if (sctx->ce_ib) {
218                 uint32_t const* list = (uint32_t const*)desc->list;
219
220                 if (desc->ce_ram_dirty)
221                         si_ce_reinitialize_descriptors(sctx, desc);
222
223                 while(desc->dirty_mask) {
224                         int begin, count;
225                         u_bit_scan_consecutive_range(&desc->dirty_mask, &begin,
226                                                      &count);
227
228                         begin *= desc->element_dw_size;
229                         count *= desc->element_dw_size;
230
231                         radeon_emit(sctx->ce_ib,
232                                     PKT3(PKT3_WRITE_CONST_RAM, count, 0));
233                         radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
234                         radeon_emit_array(sctx->ce_ib, list + begin, count);
235                 }
236
237                 if (!si_ce_upload(sctx, desc->ce_offset, list_size,
238                                            &desc->buffer_offset, &desc->buffer))
239                         return false;
240         } else {
241                 void *ptr;
242
243                 u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
244                         &desc->buffer_offset,
245                         (struct pipe_resource**)&desc->buffer, &ptr);
246                 if (!desc->buffer)
247                         return false; /* skip the draw call */
248
249                 util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
250
251                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
252                                     RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
253         }
254         desc->pointer_dirty = true;
255         desc->dirty_mask = 0;
256
257         if (atom)
258                 si_mark_atom_dirty(sctx, atom);
259
260         return true;
261 }
262
263 /* SAMPLER VIEWS */
264
265 static void si_release_sampler_views(struct si_sampler_views *views)
266 {
267         int i;
268
269         for (i = 0; i < ARRAY_SIZE(views->views); i++) {
270                 pipe_sampler_view_reference(&views->views[i], NULL);
271         }
272         si_release_descriptors(&views->desc);
273 }
274
275 static void si_sampler_view_add_buffer(struct si_context *sctx,
276                                        struct pipe_resource *resource,
277                                        enum radeon_bo_usage usage)
278 {
279         struct r600_resource *rres = (struct r600_resource*)resource;
280
281         if (!resource)
282                 return;
283
284         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres, usage,
285                                   r600_get_sampler_view_priority(rres));
286 }
287
288 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
289                                           struct si_sampler_views *views)
290 {
291         unsigned mask = views->desc.enabled_mask;
292
293         /* Add buffers to the CS. */
294         while (mask) {
295                 int i = u_bit_scan(&mask);
296
297                 si_sampler_view_add_buffer(sctx, views->views[i]->texture,
298                                            RADEON_USAGE_READ);
299         }
300
301         views->desc.ce_ram_dirty = true;
302
303         if (!views->desc.buffer)
304                 return;
305         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
306                               RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
307 }
308
309 static void si_set_sampler_view(struct si_context *sctx,
310                                 struct si_sampler_views *views,
311                                 unsigned slot, struct pipe_sampler_view *view)
312 {
313         struct si_sampler_view *rview = (struct si_sampler_view*)view;
314
315         if (view && view->texture && view->texture->target != PIPE_BUFFER &&
316             G_008F28_COMPRESSION_EN(rview->state[6]) &&
317             ((struct r600_texture*)view->texture)->dcc_offset == 0) {
318                 rview->state[6] &= C_008F28_COMPRESSION_EN &
319                                    C_008F28_ALPHA_IS_ON_MSB;
320         } else if (views->views[slot] == view)
321                 return;
322
323         if (view) {
324                 struct r600_texture *rtex = (struct r600_texture *)view->texture;
325
326                 si_sampler_view_add_buffer(sctx, view->texture,
327                                            RADEON_USAGE_READ);
328
329                 pipe_sampler_view_reference(&views->views[slot], view);
330                 memcpy(views->desc.list + slot * 16, rview->state, 8*4);
331
332                 if (view->texture && view->texture->target != PIPE_BUFFER &&
333                     rtex->fmask.size) {
334                         memcpy(views->desc.list + slot*16 + 8,
335                                rview->fmask_state, 8*4);
336                 } else {
337                         /* Disable FMASK and bind sampler state in [12:15]. */
338                         memcpy(views->desc.list + slot*16 + 8,
339                                null_texture_descriptor, 4*4);
340
341                         if (views->sampler_states[slot])
342                                 memcpy(views->desc.list + slot*16 + 12,
343                                        views->sampler_states[slot], 4*4);
344                 }
345
346                 views->desc.enabled_mask |= 1u << slot;
347         } else {
348                 pipe_sampler_view_reference(&views->views[slot], NULL);
349                 memcpy(views->desc.list + slot*16, null_texture_descriptor, 8*4);
350                 /* Only clear the lower dwords of FMASK. */
351                 memcpy(views->desc.list + slot*16 + 8, null_texture_descriptor, 4*4);
352                 views->desc.enabled_mask &= ~(1u << slot);
353         }
354
355         views->desc.dirty_mask |= 1u << slot;
356 }
357
358 static bool is_compressed_colortex(struct r600_texture *rtex)
359 {
360         return rtex->cmask.size || rtex->fmask.size ||
361                (rtex->dcc_offset && rtex->dirty_level_mask);
362 }
363
364 static void si_set_sampler_views(struct pipe_context *ctx,
365                                  unsigned shader, unsigned start,
366                                  unsigned count,
367                                  struct pipe_sampler_view **views)
368 {
369         struct si_context *sctx = (struct si_context *)ctx;
370         struct si_textures_info *samplers = &sctx->samplers[shader];
371         int i;
372
373         if (!count || shader >= SI_NUM_SHADERS)
374                 return;
375
376         for (i = 0; i < count; i++) {
377                 unsigned slot = start + i;
378
379                 if (!views || !views[i]) {
380                         samplers->depth_texture_mask &= ~(1u << slot);
381                         samplers->compressed_colortex_mask &= ~(1u << slot);
382                         si_set_sampler_view(sctx, &samplers->views, slot, NULL);
383                         continue;
384                 }
385
386                 si_set_sampler_view(sctx, &samplers->views, slot, views[i]);
387
388                 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
389                         struct r600_texture *rtex =
390                                 (struct r600_texture*)views[i]->texture;
391
392                         if (rtex->is_depth && !rtex->is_flushing_texture) {
393                                 samplers->depth_texture_mask |= 1u << slot;
394                         } else {
395                                 samplers->depth_texture_mask &= ~(1u << slot);
396                         }
397                         if (is_compressed_colortex(rtex)) {
398                                 samplers->compressed_colortex_mask |= 1u << slot;
399                         } else {
400                                 samplers->compressed_colortex_mask &= ~(1u << slot);
401                         }
402                 } else {
403                         samplers->depth_texture_mask &= ~(1u << slot);
404                         samplers->compressed_colortex_mask &= ~(1u << slot);
405                 }
406         }
407 }
408
409 static void
410 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
411 {
412         unsigned mask = samplers->views.desc.enabled_mask;
413
414         while (mask) {
415                 int i = u_bit_scan(&mask);
416                 struct pipe_resource *res = samplers->views.views[i]->texture;
417
418                 if (res && res->target != PIPE_BUFFER) {
419                         struct r600_texture *rtex = (struct r600_texture *)res;
420
421                         if (is_compressed_colortex(rtex)) {
422                                 samplers->compressed_colortex_mask |= 1u << i;
423                         } else {
424                                 samplers->compressed_colortex_mask &= ~(1u << i);
425                         }
426                 }
427         }
428 }
429
430 /* IMAGE VIEWS */
431
432 static void
433 si_release_image_views(struct si_images_info *images)
434 {
435         unsigned i;
436
437         for (i = 0; i < SI_NUM_IMAGES; ++i) {
438                 struct pipe_image_view *view = &images->views[i];
439
440                 pipe_resource_reference(&view->resource, NULL);
441         }
442
443         si_release_descriptors(&images->desc);
444 }
445
446 static void
447 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
448 {
449         uint mask = images->desc.enabled_mask;
450
451         /* Add buffers to the CS. */
452         while (mask) {
453                 int i = u_bit_scan(&mask);
454                 struct pipe_image_view *view = &images->views[i];
455
456                 assert(view->resource);
457
458                 si_sampler_view_add_buffer(sctx, view->resource,
459                                            RADEON_USAGE_READWRITE);
460         }
461
462         images->desc.ce_ram_dirty = true;
463
464         if (images->desc.buffer) {
465                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
466                                           images->desc.buffer,
467                                           RADEON_USAGE_READ,
468                                           RADEON_PRIO_DESCRIPTORS);
469         }
470 }
471
472 static void
473 si_disable_shader_image(struct si_images_info *images, unsigned slot)
474 {
475         if (images->desc.enabled_mask & (1u << slot)) {
476                 pipe_resource_reference(&images->views[slot].resource, NULL);
477                 images->compressed_colortex_mask &= ~(1 << slot);
478
479                 memcpy(images->desc.list + slot*8, null_image_descriptor, 8*4);
480                 images->desc.enabled_mask &= ~(1u << slot);
481                 images->desc.dirty_mask |= 1u << slot;
482         }
483 }
484
485 static void
486 si_mark_image_range_valid(struct pipe_image_view *view)
487 {
488         struct r600_resource *res = (struct r600_resource *)view->resource;
489         const struct util_format_description *desc;
490         unsigned stride;
491
492         assert(res && res->b.b.target == PIPE_BUFFER);
493
494         desc = util_format_description(view->format);
495         stride = desc->block.bits / 8;
496
497         util_range_add(&res->valid_buffer_range,
498                        stride * (view->u.buf.first_element),
499                        stride * (view->u.buf.last_element + 1));
500 }
501
502 static void
503 si_set_shader_images(struct pipe_context *pipe, unsigned shader,
504                      unsigned start_slot, unsigned count,
505                      struct pipe_image_view *views)
506 {
507         struct si_context *ctx = (struct si_context *)pipe;
508         struct si_screen *screen = ctx->screen;
509         struct si_images_info *images = &ctx->images[shader];
510         unsigned i, slot;
511
512         assert(shader < SI_NUM_SHADERS);
513
514         if (!count)
515                 return;
516
517         assert(start_slot + count <= SI_NUM_IMAGES);
518
519         for (i = 0, slot = start_slot; i < count; ++i, ++slot) {
520                 struct r600_resource *res;
521
522                 if (!views || !views[i].resource) {
523                         si_disable_shader_image(images, slot);
524                         continue;
525                 }
526
527                 res = (struct r600_resource *)views[i].resource;
528                 util_copy_image_view(&images->views[slot], &views[i]);
529
530                 si_sampler_view_add_buffer(ctx, &res->b.b,
531                                            RADEON_USAGE_READWRITE);
532
533                 if (res->b.b.target == PIPE_BUFFER) {
534                         if (views[i].access & PIPE_IMAGE_ACCESS_WRITE)
535                                 si_mark_image_range_valid(&views[i]);
536
537                         si_make_buffer_descriptor(screen, res,
538                                                   views[i].format,
539                                                   views[i].u.buf.first_element,
540                                                   views[i].u.buf.last_element,
541                                                   images->desc.list + slot * 8);
542                         images->compressed_colortex_mask &= ~(1 << slot);
543                 } else {
544                         static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
545                         struct r600_texture *tex = (struct r600_texture *)res;
546                         unsigned level;
547                         unsigned width, height, depth;
548
549                         assert(!tex->is_depth);
550                         assert(tex->fmask.size == 0);
551
552                         if (tex->dcc_offset &&
553                             views[i].access & PIPE_IMAGE_ACCESS_WRITE)
554                                 r600_texture_disable_dcc(&screen->b, tex);
555
556                         if (is_compressed_colortex(tex)) {
557                                 images->compressed_colortex_mask |= 1 << slot;
558                         } else {
559                                 images->compressed_colortex_mask &= ~(1 << slot);
560                         }
561
562                         /* Always force the base level to the selected level.
563                          *
564                          * This is required for 3D textures, where otherwise
565                          * selecting a single slice for non-layered bindings
566                          * fails. It doesn't hurt the other targets.
567                          */
568                         level = views[i].u.tex.level;
569                         width = u_minify(res->b.b.width0, level);
570                         height = u_minify(res->b.b.height0, level);
571                         depth = u_minify(res->b.b.depth0, level);
572
573                         si_make_texture_descriptor(screen, tex, false, res->b.b.target,
574                                                    views[i].format, swizzle,
575                                                    level, 0, 0,
576                                                    views[i].u.tex.first_layer, views[i].u.tex.last_layer,
577                                                    width, height, depth,
578                                                    images->desc.list + slot * 8,
579                                                    NULL);
580                 }
581
582                 images->desc.enabled_mask |= 1u << slot;
583                 images->desc.dirty_mask |= 1u << slot;
584         }
585 }
586
587 static void
588 si_images_update_compressed_colortex_mask(struct si_images_info *images)
589 {
590         unsigned mask = images->desc.enabled_mask;
591
592         while (mask) {
593                 int i = u_bit_scan(&mask);
594                 struct pipe_resource *res = images->views[i].resource;
595
596                 if (res && res->target != PIPE_BUFFER) {
597                         struct r600_texture *rtex = (struct r600_texture *)res;
598
599                         if (is_compressed_colortex(rtex)) {
600                                 images->compressed_colortex_mask |= 1 << i;
601                         } else {
602                                 images->compressed_colortex_mask &= ~(1 << i);
603                         }
604                 }
605         }
606 }
607
608 /* SAMPLER STATES */
609
610 static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
611                                    unsigned start, unsigned count, void **states)
612 {
613         struct si_context *sctx = (struct si_context *)ctx;
614         struct si_textures_info *samplers = &sctx->samplers[shader];
615         struct si_descriptors *desc = &samplers->views.desc;
616         struct si_sampler_state **sstates = (struct si_sampler_state**)states;
617         int i;
618
619         if (!count || shader >= SI_NUM_SHADERS)
620                 return;
621
622         for (i = 0; i < count; i++) {
623                 unsigned slot = start + i;
624
625                 if (!sstates[i] ||
626                     sstates[i] == samplers->views.sampler_states[slot])
627                         continue;
628
629                 samplers->views.sampler_states[slot] = sstates[i];
630
631                 /* If FMASK is bound, don't overwrite it.
632                  * The sampler state will be set after FMASK is unbound.
633                  */
634                 if (samplers->views.views[i] &&
635                     samplers->views.views[i]->texture &&
636                     samplers->views.views[i]->texture->target != PIPE_BUFFER &&
637                     ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
638                         continue;
639
640                 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
641                 desc->dirty_mask |= 1u << slot;
642         }
643 }
644
645 /* BUFFER RESOURCES */
646
647 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
648                                      unsigned num_buffers,
649                                      unsigned shader_userdata_index,
650                                      enum radeon_bo_usage shader_usage,
651                                      enum radeon_bo_priority priority,
652                                      unsigned *ce_offset)
653 {
654         buffers->shader_usage = shader_usage;
655         buffers->priority = priority;
656         buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
657
658         si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
659                             num_buffers, NULL, ce_offset);
660 }
661
662 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
663 {
664         int i;
665
666         for (i = 0; i < buffers->desc.num_elements; i++) {
667                 pipe_resource_reference(&buffers->buffers[i], NULL);
668         }
669
670         FREE(buffers->buffers);
671         si_release_descriptors(&buffers->desc);
672 }
673
674 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
675                                              struct si_buffer_resources *buffers)
676 {
677         unsigned mask = buffers->desc.enabled_mask;
678
679         /* Add buffers to the CS. */
680         while (mask) {
681                 int i = u_bit_scan(&mask);
682
683                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
684                                       (struct r600_resource*)buffers->buffers[i],
685                                       buffers->shader_usage, buffers->priority);
686         }
687
688         buffers->desc.ce_ram_dirty = true;
689
690         if (!buffers->desc.buffer)
691                 return;
692         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
693                               buffers->desc.buffer, RADEON_USAGE_READWRITE,
694                               RADEON_PRIO_DESCRIPTORS);
695 }
696
697 /* VERTEX BUFFERS */
698
699 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
700 {
701         struct si_descriptors *desc = &sctx->vertex_buffers;
702         int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
703         int i;
704
705         for (i = 0; i < count; i++) {
706                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
707
708                 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
709                         continue;
710                 if (!sctx->vertex_buffer[vb].buffer)
711                         continue;
712
713                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
714                                       (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
715                                       RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
716         }
717
718         if (!desc->buffer)
719                 return;
720         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
721                               desc->buffer, RADEON_USAGE_READ,
722                               RADEON_PRIO_DESCRIPTORS);
723 }
724
725 static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
726 {
727         struct si_descriptors *desc = &sctx->vertex_buffers;
728         bool bound[SI_NUM_VERTEX_BUFFERS] = {};
729         unsigned i, count = sctx->vertex_elements->count;
730         uint64_t va;
731         uint32_t *ptr;
732
733         if (!sctx->vertex_buffers_dirty)
734                 return true;
735         if (!count || !sctx->vertex_elements)
736                 return true;
737
738         /* Vertex buffer descriptors are the only ones which are uploaded
739          * directly through a staging buffer and don't go through
740          * the fine-grained upload path.
741          */
742         u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
743                        (struct pipe_resource**)&desc->buffer, (void**)&ptr);
744         if (!desc->buffer)
745                 return false;
746
747         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
748                               desc->buffer, RADEON_USAGE_READ,
749                               RADEON_PRIO_DESCRIPTORS);
750
751         assert(count <= SI_NUM_VERTEX_BUFFERS);
752
753         for (i = 0; i < count; i++) {
754                 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
755                 struct pipe_vertex_buffer *vb;
756                 struct r600_resource *rbuffer;
757                 unsigned offset;
758                 uint32_t *desc = &ptr[i*4];
759
760                 if (ve->vertex_buffer_index >= ARRAY_SIZE(sctx->vertex_buffer)) {
761                         memset(desc, 0, 16);
762                         continue;
763                 }
764
765                 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
766                 rbuffer = (struct r600_resource*)vb->buffer;
767                 if (!rbuffer) {
768                         memset(desc, 0, 16);
769                         continue;
770                 }
771
772                 offset = vb->buffer_offset + ve->src_offset;
773                 va = rbuffer->gpu_address + offset;
774
775                 /* Fill in T# buffer resource description */
776                 desc[0] = va;
777                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
778                           S_008F04_STRIDE(vb->stride);
779
780                 if (sctx->b.chip_class <= CIK && vb->stride)
781                         /* Round up by rounding down and adding 1 */
782                         desc[2] = (vb->buffer->width0 - offset -
783                                    sctx->vertex_elements->format_size[i]) /
784                                   vb->stride + 1;
785                 else
786                         desc[2] = vb->buffer->width0 - offset;
787
788                 desc[3] = sctx->vertex_elements->rsrc_word3[i];
789
790                 if (!bound[ve->vertex_buffer_index]) {
791                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
792                                               (struct r600_resource*)vb->buffer,
793                                               RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
794                         bound[ve->vertex_buffer_index] = true;
795                 }
796         }
797
798         /* Don't flush the const cache. It would have a very negative effect
799          * on performance (confirmed by testing). New descriptors are always
800          * uploaded to a fresh new buffer, so I don't think flushing the const
801          * cache is needed. */
802         desc->pointer_dirty = true;
803         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
804         sctx->vertex_buffers_dirty = false;
805         return true;
806 }
807
808
809 /* CONSTANT BUFFERS */
810
811 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
812                             const uint8_t *ptr, unsigned size, uint32_t *const_offset)
813 {
814         void *tmp;
815
816         u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
817                        (struct pipe_resource**)rbuffer, &tmp);
818         if (*rbuffer)
819                 util_memcpy_cpu_to_le32(tmp, ptr, size);
820 }
821
822 static void si_set_constant_buffer(struct si_context *sctx,
823                                    struct si_buffer_resources *buffers,
824                                    uint slot, struct pipe_constant_buffer *input)
825 {
826         assert(slot < buffers->desc.num_elements);
827         pipe_resource_reference(&buffers->buffers[slot], NULL);
828
829         /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
830          * with a NULL buffer). We need to use a dummy buffer instead. */
831         if (sctx->b.chip_class == CIK &&
832             (!input || (!input->buffer && !input->user_buffer)))
833                 input = &sctx->null_const_buf;
834
835         if (input && (input->buffer || input->user_buffer)) {
836                 struct pipe_resource *buffer = NULL;
837                 uint64_t va;
838
839                 /* Upload the user buffer if needed. */
840                 if (input->user_buffer) {
841                         unsigned buffer_offset;
842
843                         si_upload_const_buffer(sctx,
844                                                (struct r600_resource**)&buffer, input->user_buffer,
845                                                input->buffer_size, &buffer_offset);
846                         if (!buffer) {
847                                 /* Just unbind on failure. */
848                                 si_set_constant_buffer(sctx, buffers, slot, NULL);
849                                 return;
850                         }
851                         va = r600_resource(buffer)->gpu_address + buffer_offset;
852                 } else {
853                         pipe_resource_reference(&buffer, input->buffer);
854                         va = r600_resource(buffer)->gpu_address + input->buffer_offset;
855                 }
856
857                 /* Set the descriptor. */
858                 uint32_t *desc = buffers->desc.list + slot*4;
859                 desc[0] = va;
860                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
861                           S_008F04_STRIDE(0);
862                 desc[2] = input->buffer_size;
863                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
864                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
865                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
866                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
867                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
868                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
869
870                 buffers->buffers[slot] = buffer;
871                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
872                                       (struct r600_resource*)buffer,
873                                       buffers->shader_usage, buffers->priority);
874                 buffers->desc.enabled_mask |= 1u << slot;
875         } else {
876                 /* Clear the descriptor. */
877                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
878                 buffers->desc.enabled_mask &= ~(1u << slot);
879         }
880
881         buffers->desc.dirty_mask |= 1u << slot;
882 }
883
884 void si_set_rw_buffer(struct si_context *sctx,
885                       uint slot, struct pipe_constant_buffer *input)
886 {
887         si_set_constant_buffer(sctx, &sctx->rw_buffers, slot, input);
888 }
889
890 static void si_pipe_set_constant_buffer(struct pipe_context *ctx,
891                                         uint shader, uint slot,
892                                         struct pipe_constant_buffer *input)
893 {
894         struct si_context *sctx = (struct si_context *)ctx;
895
896         if (shader >= SI_NUM_SHADERS)
897                 return;
898
899         si_set_constant_buffer(sctx, &sctx->const_buffers[shader], slot, input);
900 }
901
902 /* SHADER BUFFERS */
903
904 static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
905                                   unsigned start_slot, unsigned count,
906                                   struct pipe_shader_buffer *sbuffers)
907 {
908         struct si_context *sctx = (struct si_context *)ctx;
909         struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
910         unsigned i;
911
912         assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
913
914         for (i = 0; i < count; ++i) {
915                 struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
916                 struct r600_resource *buf;
917                 unsigned slot = start_slot + i;
918                 uint32_t *desc = buffers->desc.list + slot * 4;
919                 uint64_t va;
920
921                 if (!sbuffer || !sbuffer->buffer) {
922                         pipe_resource_reference(&buffers->buffers[slot], NULL);
923                         memset(desc, 0, sizeof(uint32_t) * 4);
924                         buffers->desc.enabled_mask &= ~(1u << slot);
925                         buffers->desc.dirty_mask |= 1u << slot;
926                         continue;
927                 }
928
929                 buf = (struct r600_resource *)sbuffer->buffer;
930                 va = buf->gpu_address + sbuffer->buffer_offset;
931
932                 desc[0] = va;
933                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
934                           S_008F04_STRIDE(0);
935                 desc[2] = sbuffer->buffer_size;
936                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
937                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
938                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
939                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
940                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
941                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
942
943                 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
944                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buf,
945                                       buffers->shader_usage, buffers->priority);
946                 buffers->desc.enabled_mask |= 1u << slot;
947                 buffers->desc.dirty_mask |= 1u << slot;
948         }
949 }
950
951 /* RING BUFFERS */
952
953 void si_set_ring_buffer(struct pipe_context *ctx, uint slot,
954                         struct pipe_resource *buffer,
955                         unsigned stride, unsigned num_records,
956                         bool add_tid, bool swizzle,
957                         unsigned element_size, unsigned index_stride, uint64_t offset)
958 {
959         struct si_context *sctx = (struct si_context *)ctx;
960         struct si_buffer_resources *buffers = &sctx->rw_buffers;
961
962         /* The stride field in the resource descriptor has 14 bits */
963         assert(stride < (1 << 14));
964
965         assert(slot < buffers->desc.num_elements);
966         pipe_resource_reference(&buffers->buffers[slot], NULL);
967
968         if (buffer) {
969                 uint64_t va;
970
971                 va = r600_resource(buffer)->gpu_address + offset;
972
973                 switch (element_size) {
974                 default:
975                         assert(!"Unsupported ring buffer element size");
976                 case 0:
977                 case 2:
978                         element_size = 0;
979                         break;
980                 case 4:
981                         element_size = 1;
982                         break;
983                 case 8:
984                         element_size = 2;
985                         break;
986                 case 16:
987                         element_size = 3;
988                         break;
989                 }
990
991                 switch (index_stride) {
992                 default:
993                         assert(!"Unsupported ring buffer index stride");
994                 case 0:
995                 case 8:
996                         index_stride = 0;
997                         break;
998                 case 16:
999                         index_stride = 1;
1000                         break;
1001                 case 32:
1002                         index_stride = 2;
1003                         break;
1004                 case 64:
1005                         index_stride = 3;
1006                         break;
1007                 }
1008
1009                 if (sctx->b.chip_class >= VI && stride)
1010                         num_records *= stride;
1011
1012                 /* Set the descriptor. */
1013                 uint32_t *desc = buffers->desc.list + slot*4;
1014                 desc[0] = va;
1015                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
1016                           S_008F04_STRIDE(stride) |
1017                           S_008F04_SWIZZLE_ENABLE(swizzle);
1018                 desc[2] = num_records;
1019                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1020                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1021                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1022                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1023                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1024                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1025                           S_008F0C_ELEMENT_SIZE(element_size) |
1026                           S_008F0C_INDEX_STRIDE(index_stride) |
1027                           S_008F0C_ADD_TID_ENABLE(add_tid);
1028
1029                 pipe_resource_reference(&buffers->buffers[slot], buffer);
1030                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1031                                       (struct r600_resource*)buffer,
1032                                       buffers->shader_usage, buffers->priority);
1033                 buffers->desc.enabled_mask |= 1u << slot;
1034         } else {
1035                 /* Clear the descriptor. */
1036                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
1037                 buffers->desc.enabled_mask &= ~(1u << slot);
1038         }
1039
1040         buffers->desc.dirty_mask |= 1u << slot;
1041 }
1042
1043 /* STREAMOUT BUFFERS */
1044
1045 static void si_set_streamout_targets(struct pipe_context *ctx,
1046                                      unsigned num_targets,
1047                                      struct pipe_stream_output_target **targets,
1048                                      const unsigned *offsets)
1049 {
1050         struct si_context *sctx = (struct si_context *)ctx;
1051         struct si_buffer_resources *buffers = &sctx->rw_buffers;
1052         unsigned old_num_targets = sctx->b.streamout.num_targets;
1053         unsigned i, bufidx;
1054
1055         /* We are going to unbind the buffers. Mark which caches need to be flushed. */
1056         if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
1057                 /* Since streamout uses vector writes which go through TC L2
1058                  * and most other clients can use TC L2 as well, we don't need
1059                  * to flush it.
1060                  *
1061                  * The only cases which requires flushing it is VGT DMA index
1062                  * fetching (on <= CIK) and indirect draw data, which are rare
1063                  * cases. Thus, flag the TC L2 dirtiness in the resource and
1064                  * handle it at draw call time.
1065                  */
1066                 for (i = 0; i < sctx->b.streamout.num_targets; i++)
1067                         if (sctx->b.streamout.targets[i])
1068                                 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
1069
1070                 /* Invalidate the scalar cache in case a streamout buffer is
1071                  * going to be used as a constant buffer.
1072                  *
1073                  * Invalidate TC L1, because streamout bypasses it (done by
1074                  * setting GLC=1 in the store instruction), but it can contain
1075                  * outdated data of streamout buffers.
1076                  *
1077                  * VS_PARTIAL_FLUSH is required if the buffers are going to be
1078                  * used as an input immediately.
1079                  */
1080                 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
1081                                  SI_CONTEXT_INV_VMEM_L1 |
1082                                  SI_CONTEXT_VS_PARTIAL_FLUSH;
1083         }
1084
1085         /* All readers of the streamout targets need to be finished before we can
1086          * start writing to the targets.
1087          */
1088         if (num_targets)
1089                 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH |
1090                                  SI_CONTEXT_CS_PARTIAL_FLUSH;
1091
1092         /* Streamout buffers must be bound in 2 places:
1093          * 1) in VGT by setting the VGT_STRMOUT registers
1094          * 2) as shader resources
1095          */
1096
1097         /* Set the VGT regs. */
1098         r600_set_streamout_targets(ctx, num_targets, targets, offsets);
1099
1100         /* Set the shader resources.*/
1101         for (i = 0; i < num_targets; i++) {
1102                 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1103
1104                 if (targets[i]) {
1105                         struct pipe_resource *buffer = targets[i]->buffer;
1106                         uint64_t va = r600_resource(buffer)->gpu_address;
1107
1108                         /* Set the descriptor.
1109                          *
1110                          * On VI, the format must be non-INVALID, otherwise
1111                          * the buffer will be considered not bound and store
1112                          * instructions will be no-ops.
1113                          */
1114                         uint32_t *desc = buffers->desc.list + bufidx*4;
1115                         desc[0] = va;
1116                         desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
1117                         desc[2] = 0xffffffff;
1118                         desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1119                                   S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1120                                   S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1121                                   S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1122                                   S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1123
1124                         /* Set the resource. */
1125                         pipe_resource_reference(&buffers->buffers[bufidx],
1126                                                 buffer);
1127                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1128                                               (struct r600_resource*)buffer,
1129                                               buffers->shader_usage, buffers->priority);
1130                         buffers->desc.enabled_mask |= 1u << bufidx;
1131                 } else {
1132                         /* Clear the descriptor and unset the resource. */
1133                         memset(buffers->desc.list + bufidx*4, 0,
1134                                sizeof(uint32_t) * 4);
1135                         pipe_resource_reference(&buffers->buffers[bufidx],
1136                                                 NULL);
1137                         buffers->desc.enabled_mask &= ~(1u << bufidx);
1138                 }
1139                 buffers->desc.dirty_mask |= 1u << bufidx;
1140         }
1141         for (; i < old_num_targets; i++) {
1142                 bufidx = SI_VS_STREAMOUT_BUF0 + i;
1143                 /* Clear the descriptor and unset the resource. */
1144                 memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
1145                 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
1146                 buffers->desc.enabled_mask &= ~(1u << bufidx);
1147                 buffers->desc.dirty_mask |= 1u << bufidx;
1148         }
1149 }
1150
1151 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
1152                                         uint32_t *desc, uint64_t old_buf_va,
1153                                         struct pipe_resource *new_buf)
1154 {
1155         /* Retrieve the buffer offset from the descriptor. */
1156         uint64_t old_desc_va =
1157                 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
1158
1159         assert(old_buf_va <= old_desc_va);
1160         uint64_t offset_within_buffer = old_desc_va - old_buf_va;
1161
1162         /* Update the descriptor. */
1163         uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
1164
1165         desc[0] = va;
1166         desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
1167                   S_008F04_BASE_ADDRESS_HI(va >> 32);
1168 }
1169
1170 /* INTERNAL CONST BUFFERS */
1171
1172 static void si_set_polygon_stipple(struct pipe_context *ctx,
1173                                    const struct pipe_poly_stipple *state)
1174 {
1175         struct si_context *sctx = (struct si_context *)ctx;
1176         struct pipe_constant_buffer cb = {};
1177         unsigned stipple[32];
1178         int i;
1179
1180         for (i = 0; i < 32; i++)
1181                 stipple[i] = util_bitreverse(state->stipple[i]);
1182
1183         cb.user_buffer = stipple;
1184         cb.buffer_size = sizeof(stipple);
1185
1186         si_set_rw_buffer(sctx, SI_PS_CONST_POLY_STIPPLE, &cb);
1187 }
1188
1189 /* TEXTURE METADATA ENABLE/DISABLE */
1190
1191 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1192  * while the texture is bound, possibly by a different context. In that case,
1193  * call this function to update compressed_colortex_masks.
1194  */
1195 void si_update_compressed_colortex_masks(struct si_context *sctx)
1196 {
1197         for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1198                 si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
1199                 si_images_update_compressed_colortex_mask(&sctx->images[i]);
1200         }
1201 }
1202
1203 /* BUFFER DISCARD/INVALIDATION */
1204
1205 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1206 static void si_reset_buffer_resources(struct si_context *sctx,
1207                                       struct si_buffer_resources *buffers,
1208                                       struct pipe_resource *buf,
1209                                       uint64_t old_va)
1210 {
1211         unsigned mask = buffers->desc.enabled_mask;
1212
1213         while (mask) {
1214                 unsigned i = u_bit_scan(&mask);
1215                 if (buffers->buffers[i] == buf) {
1216                         si_desc_reset_buffer_offset(&sctx->b.b,
1217                                                     buffers->desc.list + i*4,
1218                                                     old_va, buf);
1219                         buffers->desc.dirty_mask |= 1u << i;
1220
1221                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1222                                                 (struct r600_resource *)buf,
1223                                                 buffers->shader_usage,
1224                                                 buffers->priority);
1225                 }
1226         }
1227 }
1228
1229 /* Reallocate a buffer a update all resource bindings where the buffer is
1230  * bound.
1231  *
1232  * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1233  * idle by discarding its contents. Apps usually tell us when to do this using
1234  * map_buffer flags, for example.
1235  */
1236 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
1237 {
1238         struct si_context *sctx = (struct si_context*)ctx;
1239         struct r600_resource *rbuffer = r600_resource(buf);
1240         unsigned i, shader, alignment = rbuffer->buf->alignment;
1241         uint64_t old_va = rbuffer->gpu_address;
1242         unsigned num_elems = sctx->vertex_elements ?
1243                                        sctx->vertex_elements->count : 0;
1244         struct si_sampler_view *view;
1245
1246         /* Reallocate the buffer in the same pipe_resource. */
1247         r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
1248                            alignment);
1249
1250         /* We changed the buffer, now we need to bind it where the old one
1251          * was bound. This consists of 2 things:
1252          *   1) Updating the resource descriptor and dirtying it.
1253          *   2) Adding a relocation to the CS, so that it's usable.
1254          */
1255
1256         /* Vertex buffers. */
1257         for (i = 0; i < num_elems; i++) {
1258                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
1259
1260                 if (vb >= ARRAY_SIZE(sctx->vertex_buffer))
1261                         continue;
1262                 if (!sctx->vertex_buffer[vb].buffer)
1263                         continue;
1264
1265                 if (sctx->vertex_buffer[vb].buffer == buf) {
1266                         sctx->vertex_buffers_dirty = true;
1267                         break;
1268                 }
1269         }
1270
1271         /* Streamout buffers. (other internal buffers can't be invalidated) */
1272         for (i = SI_VS_STREAMOUT_BUF0; i <= SI_VS_STREAMOUT_BUF3; i++) {
1273                 struct si_buffer_resources *buffers = &sctx->rw_buffers;
1274
1275                 if (buffers->buffers[i] != buf)
1276                         continue;
1277
1278                 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
1279                                             old_va, buf);
1280                 buffers->desc.dirty_mask |= 1u << i;
1281
1282                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1283                                           rbuffer, buffers->shader_usage,
1284                                           buffers->priority);
1285
1286                 /* Update the streamout state. */
1287                 if (sctx->b.streamout.begin_emitted)
1288                         r600_emit_streamout_end(&sctx->b);
1289                 sctx->b.streamout.append_bitmask =
1290                                 sctx->b.streamout.enabled_mask;
1291                 r600_streamout_buffers_dirty(&sctx->b);
1292         }
1293
1294         /* Constant and shader buffers. */
1295         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1296                 si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
1297                                           buf, old_va);
1298                 si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
1299                                           buf, old_va);
1300         }
1301
1302         /* Texture buffers - update virtual addresses in sampler view descriptors. */
1303         LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
1304                 if (view->base.texture == buf) {
1305                         si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
1306                 }
1307         }
1308         /* Texture buffers - update bindings. */
1309         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1310                 struct si_sampler_views *views = &sctx->samplers[shader].views;
1311                 unsigned mask = views->desc.enabled_mask;
1312
1313                 while (mask) {
1314                         unsigned i = u_bit_scan(&mask);
1315                         if (views->views[i]->texture == buf) {
1316                                 si_desc_reset_buffer_offset(ctx,
1317                                                             views->desc.list +
1318                                                             i * 16 + 4,
1319                                                             old_va, buf);
1320                                 views->desc.dirty_mask |= 1u << i;
1321
1322                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1323                                                       rbuffer, RADEON_USAGE_READ,
1324                                                       RADEON_PRIO_SAMPLER_BUFFER);
1325                         }
1326                 }
1327         }
1328
1329         /* Shader images */
1330         for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1331                 struct si_images_info *images = &sctx->images[shader];
1332                 unsigned mask = images->desc.enabled_mask;
1333
1334                 while (mask) {
1335                         unsigned i = u_bit_scan(&mask);
1336
1337                         if (images->views[i].resource == buf) {
1338                                 if (images->views[i].access & PIPE_IMAGE_ACCESS_WRITE)
1339                                         si_mark_image_range_valid(&images->views[i]);
1340
1341                                 si_desc_reset_buffer_offset(
1342                                         ctx, images->desc.list + i * 8 + 4,
1343                                         old_va, buf);
1344                                 images->desc.dirty_mask |= 1u << i;
1345
1346                                 radeon_add_to_buffer_list(
1347                                         &sctx->b, &sctx->b.gfx, rbuffer,
1348                                         RADEON_USAGE_READWRITE,
1349                                         RADEON_PRIO_SAMPLER_BUFFER);
1350                         }
1351                 }
1352         }
1353 }
1354
1355 /* SHADER USER DATA */
1356
1357 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
1358                                           unsigned shader)
1359 {
1360         sctx->const_buffers[shader].desc.pointer_dirty = true;
1361         sctx->shader_buffers[shader].desc.pointer_dirty = true;
1362         sctx->samplers[shader].views.desc.pointer_dirty = true;
1363         sctx->images[shader].desc.pointer_dirty = true;
1364
1365         if (shader == PIPE_SHADER_VERTEX)
1366                 sctx->vertex_buffers.pointer_dirty = true;
1367
1368         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1369 }
1370
1371 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
1372 {
1373         int i;
1374
1375         for (i = 0; i < SI_NUM_SHADERS; i++) {
1376                 si_mark_shader_pointers_dirty(sctx, i);
1377         }
1378         sctx->rw_buffers.desc.pointer_dirty = true;
1379 }
1380
1381 /* Set a base register address for user data constants in the given shader.
1382  * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
1383  */
1384 static void si_set_user_data_base(struct si_context *sctx,
1385                                   unsigned shader, uint32_t new_base)
1386 {
1387         uint32_t *base = &sctx->shader_userdata.sh_base[shader];
1388
1389         if (*base != new_base) {
1390                 *base = new_base;
1391
1392                 if (new_base)
1393                         si_mark_shader_pointers_dirty(sctx, shader);
1394         }
1395 }
1396
1397 /* This must be called when these shaders are changed from non-NULL to NULL
1398  * and vice versa:
1399  * - geometry shader
1400  * - tessellation control shader
1401  * - tessellation evaluation shader
1402  */
1403 void si_shader_change_notify(struct si_context *sctx)
1404 {
1405         /* VS can be bound as VS, ES, or LS. */
1406         if (sctx->tes_shader.cso)
1407                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1408                                       R_00B530_SPI_SHADER_USER_DATA_LS_0);
1409         else if (sctx->gs_shader.cso)
1410                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1411                                       R_00B330_SPI_SHADER_USER_DATA_ES_0);
1412         else
1413                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1414                                       R_00B130_SPI_SHADER_USER_DATA_VS_0);
1415
1416         /* TES can be bound as ES, VS, or not bound. */
1417         if (sctx->tes_shader.cso) {
1418                 if (sctx->gs_shader.cso)
1419                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1420                                               R_00B330_SPI_SHADER_USER_DATA_ES_0);
1421                 else
1422                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1423                                               R_00B130_SPI_SHADER_USER_DATA_VS_0);
1424         } else {
1425                 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
1426         }
1427 }
1428
1429 static void si_emit_shader_pointer(struct si_context *sctx,
1430                                    struct si_descriptors *desc,
1431                                    unsigned sh_base, bool keep_dirty)
1432 {
1433         struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1434         uint64_t va;
1435
1436         if (!desc->pointer_dirty || !desc->buffer)
1437                 return;
1438
1439         va = desc->buffer->gpu_address +
1440              desc->buffer_offset;
1441
1442         radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
1443         radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
1444         radeon_emit(cs, va);
1445         radeon_emit(cs, va >> 32);
1446
1447         desc->pointer_dirty = keep_dirty;
1448 }
1449
1450 void si_emit_graphics_shader_userdata(struct si_context *sctx,
1451                                       struct r600_atom *atom)
1452 {
1453         unsigned i;
1454         uint32_t *sh_base = sctx->shader_userdata.sh_base;
1455
1456         if (sctx->rw_buffers.desc.pointer_dirty) {
1457                 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1458                                        R_00B030_SPI_SHADER_USER_DATA_PS_0, true);
1459                 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1460                                        R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1461                 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1462                                        R_00B230_SPI_SHADER_USER_DATA_GS_0, true);
1463                 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1464                                        R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
1465                 si_emit_shader_pointer(sctx, &sctx->rw_buffers.desc,
1466                                        R_00B430_SPI_SHADER_USER_DATA_HS_0, true);
1467                 sctx->rw_buffers.desc.pointer_dirty = false;
1468         }
1469
1470         for (i = 0; i < SI_NUM_GRAPHICS_SHADERS; i++) {
1471                 unsigned base = sh_base[i];
1472
1473                 if (!base)
1474                         continue;
1475
1476                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
1477                 si_emit_shader_pointer(sctx, &sctx->shader_buffers[i].desc, base, false);
1478                 si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
1479                 si_emit_shader_pointer(sctx, &sctx->images[i].desc, base, false);
1480         }
1481         si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1482 }
1483
1484 void si_emit_compute_shader_userdata(struct si_context *sctx)
1485 {
1486         unsigned base = R_00B900_COMPUTE_USER_DATA_0;
1487
1488         si_emit_shader_pointer(sctx, &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc,
1489                                base, false);
1490         si_emit_shader_pointer(sctx, &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc,
1491                                base, false);
1492         si_emit_shader_pointer(sctx, &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc,
1493                                base, false);
1494         si_emit_shader_pointer(sctx, &sctx->images[PIPE_SHADER_COMPUTE].desc,
1495                                base, false);
1496 }
1497
1498 /* INIT/DEINIT/UPLOAD */
1499
1500 void si_init_all_descriptors(struct si_context *sctx)
1501 {
1502         int i;
1503         unsigned ce_offset = 0;
1504
1505         for (i = 0; i < SI_NUM_SHADERS; i++) {
1506                 si_init_buffer_resources(&sctx->const_buffers[i],
1507                                          SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1508                                          RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
1509                                          &ce_offset);
1510                 si_init_buffer_resources(&sctx->shader_buffers[i],
1511                                          SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
1512                                          RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
1513                                          &ce_offset);
1514
1515                 si_init_descriptors(&sctx->samplers[i].views.desc,
1516                                     SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
1517                                     null_texture_descriptor, &ce_offset);
1518
1519                 si_init_descriptors(&sctx->images[i].desc,
1520                                     SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
1521                                     null_image_descriptor, &ce_offset);
1522         }
1523
1524         si_init_buffer_resources(&sctx->rw_buffers,
1525                                  SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1526                                  RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT,
1527                                  &ce_offset);
1528         si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1529                             4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
1530
1531         assert(ce_offset <= 32768);
1532
1533         /* Set pipe_context functions. */
1534         sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1535         sctx->b.b.set_shader_images = si_set_shader_images;
1536         sctx->b.b.set_constant_buffer = si_pipe_set_constant_buffer;
1537         sctx->b.b.set_polygon_stipple = si_set_polygon_stipple;
1538         sctx->b.b.set_shader_buffers = si_set_shader_buffers;
1539         sctx->b.b.set_sampler_views = si_set_sampler_views;
1540         sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1541         sctx->b.invalidate_buffer = si_invalidate_buffer;
1542
1543         /* Shader user data. */
1544         si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1545                      si_emit_graphics_shader_userdata);
1546
1547         /* Set default and immutable mappings. */
1548         si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1549         si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1550         si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1551         si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1552 }
1553
1554 bool si_upload_graphics_shader_descriptors(struct si_context *sctx)
1555 {
1556         int i;
1557
1558         for (i = 0; i < SI_NUM_SHADERS; i++) {
1559                 if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc,
1560                                            &sctx->shader_userdata.atom) ||
1561                     !si_upload_descriptors(sctx, &sctx->shader_buffers[i].desc,
1562                                            &sctx->shader_userdata.atom) ||
1563                     !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc,
1564                                            &sctx->shader_userdata.atom) ||
1565                     !si_upload_descriptors(sctx, &sctx->images[i].desc,
1566                                            &sctx->shader_userdata.atom))
1567                         return false;
1568         }
1569         return si_upload_descriptors(sctx, &sctx->rw_buffers.desc,
1570                                      &sctx->shader_userdata.atom) &&
1571                si_upload_vertex_buffer_descriptors(sctx);
1572 }
1573
1574 bool si_upload_compute_shader_descriptors(struct si_context *sctx)
1575 {
1576         /* Does not update rw_buffers as that is not needed for compute shaders
1577          * and the input buffer is using the same SGPR's anyway.
1578          */
1579         return si_upload_descriptors(sctx,
1580                         &sctx->const_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
1581                si_upload_descriptors(sctx,
1582                        &sctx->shader_buffers[PIPE_SHADER_COMPUTE].desc, NULL) &&
1583                si_upload_descriptors(sctx,
1584                        &sctx->samplers[PIPE_SHADER_COMPUTE].views.desc, NULL) &&
1585                si_upload_descriptors(sctx,
1586                        &sctx->images[PIPE_SHADER_COMPUTE].desc,  NULL);
1587 }
1588
1589 void si_release_all_descriptors(struct si_context *sctx)
1590 {
1591         int i;
1592
1593         for (i = 0; i < SI_NUM_SHADERS; i++) {
1594                 si_release_buffer_resources(&sctx->const_buffers[i]);
1595                 si_release_buffer_resources(&sctx->shader_buffers[i]);
1596                 si_release_sampler_views(&sctx->samplers[i].views);
1597                 si_release_image_views(&sctx->images[i]);
1598         }
1599         si_release_buffer_resources(&sctx->rw_buffers);
1600         si_release_descriptors(&sctx->vertex_buffers);
1601 }
1602
1603 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1604 {
1605         int i;
1606
1607         for (i = 0; i < SI_NUM_SHADERS; i++) {
1608                 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1609                 si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
1610                 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1611                 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
1612         }
1613         si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers);
1614         si_vertex_buffers_begin_new_cs(sctx);
1615         si_shader_userdata_begin_new_cs(sctx);
1616 }