OSDN Git Service

radeonsi: Use CE for all descriptors.
[android-x86/external-mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Marek Olšák <marek.olsak@amd.com>
25  */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28  * 4 dwords) are stored in lists in memory which is accessed by shaders
29  * using scalar load instructions.
30  *
31  * This file is responsible for managing such lists. It keeps a copy of all
32  * descriptors in CPU memory and re-uploads a whole list if some slots have
33  * been changed.
34  *
35  * This code is also reponsible for updating shader pointers to those lists.
36  *
37  * Note that CP DMA can't be used for updating the lists, because a GPU hang
38  * could leave the list in a mid-IB state and the next IB would get wrong
39  * descriptors and the whole context would be unusable at that point.
40  * (Note: The register shadowing can't be used due to the same reason)
41  *
42  * Also, uploading descriptors to newly allocated memory doesn't require
43  * a KCACHE flush.
44  *
45  *
46  * Possible scenarios for one 16 dword image+sampler slot:
47  *
48  *       | Image        | w/ FMASK   | Buffer       | NULL
49  * [ 0: 3] Image[0:3]   | Image[0:3] | Null[0:3]    | Null[0:3]
50  * [ 4: 7] Image[4:7]   | Image[4:7] | Buffer[0:3]  | 0
51  * [ 8:11] Null[0:3]    | Fmask[0:3] | Null[0:3]    | Null[0:3]
52  * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
53  *
54  * FMASK implies MSAA, therefore no sampler state.
55  * Sampler states are never unbound except when FMASK is bound.
56  */
57
58 #include "radeon/r600_cs.h"
59 #include "si_pipe.h"
60 #include "si_shader.h"
61 #include "sid.h"
62
63 #include "util/u_math.h"
64 #include "util/u_memory.h"
65 #include "util/u_suballoc.h"
66 #include "util/u_upload_mgr.h"
67
68
69 /* NULL image and buffer descriptor for textures (alpha = 1) and images
70  * (alpha = 0).
71  *
72  * For images, all fields must be zero except for the swizzle, which
73  * supports arbitrary combinations of 0s and 1s. The texture type must be
74  * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
75  *
76  * For buffers, all fields must be zero. If they are not, the hw hangs.
77  *
78  * This is the only reason why the buffer descriptor must be in words [4:7].
79  */
80 static uint32_t null_texture_descriptor[8] = {
81         0,
82         0,
83         0,
84         S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
85         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
86         /* the rest must contain zeros, which is also used by the buffer
87          * descriptor */
88 };
89
90 static uint32_t null_image_descriptor[8] = {
91         0,
92         0,
93         0,
94         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
95         /* the rest must contain zeros, which is also used by the buffer
96          * descriptor */
97 };
98
99 static void si_init_descriptors(struct si_descriptors *desc,
100                                 unsigned shader_userdata_index,
101                                 unsigned element_dw_size,
102                                 unsigned num_elements,
103                                 const uint32_t *null_descriptor,
104                                 unsigned *ce_offset)
105 {
106         int i;
107
108         assert(num_elements <= sizeof(desc->enabled_mask)*8);
109
110         desc->list = CALLOC(num_elements, element_dw_size * 4);
111         desc->element_dw_size = element_dw_size;
112         desc->num_elements = num_elements;
113         desc->dirty_mask = num_elements == 64 ? ~0llu : (1llu << num_elements) - 1;
114         desc->shader_userdata_offset = shader_userdata_index * 4;
115
116         if (ce_offset) {
117                 desc->ce_offset = *ce_offset;
118
119                 /* make sure that ce_offset stays 32 byte aligned */
120                 *ce_offset += align(element_dw_size * num_elements * 4, 32);
121         }
122
123         /* Initialize the array to NULL descriptors if the element size is 8. */
124         if (null_descriptor) {
125                 assert(element_dw_size % 8 == 0);
126                 for (i = 0; i < num_elements * element_dw_size / 8; i++)
127                         memcpy(desc->list + i * 8, null_descriptor,
128                                8 * 4);
129         }
130 }
131
132 static void si_release_descriptors(struct si_descriptors *desc)
133 {
134         pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
135         FREE(desc->list);
136 }
137
138 static bool si_ce_upload(struct si_context *sctx, unsigned ce_offset, unsigned size,
139                          unsigned *out_offset, struct r600_resource **out_buf) {
140         uint64_t va;
141
142         u_suballocator_alloc(sctx->ce_suballocator, size, out_offset,
143                              (struct pipe_resource**)out_buf);
144         if (!out_buf)
145                         return false;
146
147         va = (*out_buf)->gpu_address + *out_offset;
148
149         radeon_emit(sctx->ce_ib, PKT3(PKT3_DUMP_CONST_RAM, 3, 0));
150         radeon_emit(sctx->ce_ib, ce_offset);
151         radeon_emit(sctx->ce_ib, size / 4);
152         radeon_emit(sctx->ce_ib, va);
153         radeon_emit(sctx->ce_ib, va >> 32);
154
155         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, *out_buf,
156                                RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
157
158         sctx->ce_need_synchronization = true;
159         return true;
160 }
161
162 static void si_reinitialize_ce_ram(struct si_context *sctx,
163                             struct si_descriptors *desc)
164 {
165         if (desc->buffer) {
166                 struct r600_resource *buffer = (struct r600_resource*)desc->buffer;
167                 unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
168                 uint64_t va = buffer->gpu_address + desc->buffer_offset;
169                 struct radeon_winsys_cs *ib = sctx->ce_preamble_ib;
170
171                 if (!ib)
172                         ib = sctx->ce_ib;
173
174                 list_size = align(list_size, 32);
175
176                 radeon_emit(ib, PKT3(PKT3_LOAD_CONST_RAM, 3, 0));
177                 radeon_emit(ib, va);
178                 radeon_emit(ib, va >> 32);
179                 radeon_emit(ib, list_size / 4);
180                 radeon_emit(ib, desc->ce_offset);
181
182                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
183                                     RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
184         }
185         desc->ce_ram_dirty = false;
186 }
187
188 static bool si_upload_descriptors(struct si_context *sctx,
189                                   struct si_descriptors *desc)
190 {
191         unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
192
193         if (!desc->dirty_mask)
194                 return true;
195
196         if (sctx->ce_ib) {
197                 uint32_t const* list = (uint32_t const*)desc->list;
198
199                 if (desc->ce_ram_dirty)
200                         si_reinitialize_ce_ram(sctx, desc);
201
202                 while(desc->dirty_mask) {
203                         int begin, count;
204                         u_bit_scan_consecutive_range64(&desc->dirty_mask, &begin,
205                                                        &count);
206
207                         begin *= desc->element_dw_size;
208                         count *= desc->element_dw_size;
209
210                         radeon_emit(sctx->ce_ib,
211                                     PKT3(PKT3_WRITE_CONST_RAM, count, 0));
212                         radeon_emit(sctx->ce_ib, desc->ce_offset + begin * 4);
213                         radeon_emit_array(sctx->ce_ib, list + begin, count);
214                 }
215
216                 if (!si_ce_upload(sctx, desc->ce_offset, list_size,
217                                            &desc->buffer_offset, &desc->buffer))
218                         return false;
219         } else {
220                 void *ptr;
221
222                 u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
223                         &desc->buffer_offset,
224                         (struct pipe_resource**)&desc->buffer, &ptr);
225                 if (!desc->buffer)
226                         return false; /* skip the draw call */
227
228                 util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
229
230                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
231                                     RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
232         }
233         desc->pointer_dirty = true;
234         desc->dirty_mask = 0;
235         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
236         return true;
237 }
238
239 /* SAMPLER VIEWS */
240
241 static void si_release_sampler_views(struct si_sampler_views *views)
242 {
243         int i;
244
245         for (i = 0; i < Elements(views->views); i++) {
246                 pipe_sampler_view_reference(&views->views[i], NULL);
247         }
248         si_release_descriptors(&views->desc);
249 }
250
251 static void si_sampler_view_add_buffer(struct si_context *sctx,
252                                        struct pipe_resource *resource)
253 {
254         struct r600_resource *rres = (struct r600_resource*)resource;
255
256         if (!resource)
257                 return;
258
259         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, rres,
260                                   RADEON_USAGE_READ,
261                                   r600_get_sampler_view_priority(rres));
262 }
263
264 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
265                                           struct si_sampler_views *views)
266 {
267         uint64_t mask = views->desc.enabled_mask;
268
269         /* Add buffers to the CS. */
270         while (mask) {
271                 int i = u_bit_scan64(&mask);
272
273                 si_sampler_view_add_buffer(sctx, views->views[i]->texture);
274         }
275
276         views->desc.ce_ram_dirty = true;
277
278         if (!views->desc.buffer)
279                 return;
280         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
281                               RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
282 }
283
284 static void si_set_sampler_view(struct si_context *sctx,
285                                 struct si_sampler_views *views,
286                                 unsigned slot, struct pipe_sampler_view *view)
287 {
288         struct si_sampler_view *rview = (struct si_sampler_view*)view;
289
290         if (view && view->texture && view->texture->target != PIPE_BUFFER &&
291             G_008F28_COMPRESSION_EN(rview->state[6]) &&
292             ((struct r600_texture*)view->texture)->dcc_offset == 0) {
293                 rview->state[6] &= C_008F28_COMPRESSION_EN &
294                                    C_008F28_ALPHA_IS_ON_MSB;
295         } else if (views->views[slot] == view)
296                 return;
297
298         if (view) {
299                 struct r600_texture *rtex = (struct r600_texture *)view->texture;
300
301                 si_sampler_view_add_buffer(sctx, view->texture);
302
303                 pipe_sampler_view_reference(&views->views[slot], view);
304                 memcpy(views->desc.list + slot * 16, rview->state, 8*4);
305
306                 if (view->texture && view->texture->target != PIPE_BUFFER &&
307                     rtex->fmask.size) {
308                         memcpy(views->desc.list + slot*16 + 8,
309                                rview->fmask_state, 8*4);
310                 } else {
311                         /* Disable FMASK and bind sampler state in [12:15]. */
312                         memcpy(views->desc.list + slot*16 + 8,
313                                null_texture_descriptor, 4*4);
314
315                         if (views->sampler_states[slot])
316                                 memcpy(views->desc.list + slot*16 + 12,
317                                        views->sampler_states[slot], 4*4);
318                 }
319
320                 views->desc.enabled_mask |= 1llu << slot;
321         } else {
322                 pipe_sampler_view_reference(&views->views[slot], NULL);
323                 memcpy(views->desc.list + slot*16, null_texture_descriptor, 8*4);
324                 /* Only clear the lower dwords of FMASK. */
325                 memcpy(views->desc.list + slot*16 + 8, null_texture_descriptor, 4*4);
326                 views->desc.enabled_mask &= ~(1llu << slot);
327         }
328
329         views->desc.dirty_mask |= 1llu << slot;
330 }
331
332 static bool is_compressed_colortex(struct r600_texture *rtex)
333 {
334         return rtex->cmask.size || rtex->fmask.size ||
335                (rtex->dcc_offset && rtex->dirty_level_mask);
336 }
337
338 static void si_set_sampler_views(struct pipe_context *ctx,
339                                  unsigned shader, unsigned start,
340                                  unsigned count,
341                                  struct pipe_sampler_view **views)
342 {
343         struct si_context *sctx = (struct si_context *)ctx;
344         struct si_textures_info *samplers = &sctx->samplers[shader];
345         int i;
346
347         if (!count || shader >= SI_NUM_SHADERS)
348                 return;
349
350         for (i = 0; i < count; i++) {
351                 unsigned slot = start + i;
352
353                 if (!views || !views[i]) {
354                         samplers->depth_texture_mask &= ~(1llu << slot);
355                         samplers->compressed_colortex_mask &= ~(1llu << slot);
356                         si_set_sampler_view(sctx, &samplers->views, slot, NULL);
357                         continue;
358                 }
359
360                 si_set_sampler_view(sctx, &samplers->views, slot, views[i]);
361
362                 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
363                         struct r600_texture *rtex =
364                                 (struct r600_texture*)views[i]->texture;
365
366                         if (rtex->is_depth && !rtex->is_flushing_texture) {
367                                 samplers->depth_texture_mask |= 1llu << slot;
368                         } else {
369                                 samplers->depth_texture_mask &= ~(1llu << slot);
370                         }
371                         if (is_compressed_colortex(rtex)) {
372                                 samplers->compressed_colortex_mask |= 1llu << slot;
373                         } else {
374                                 samplers->compressed_colortex_mask &= ~(1llu << slot);
375                         }
376                 } else {
377                         samplers->depth_texture_mask &= ~(1llu << slot);
378                         samplers->compressed_colortex_mask &= ~(1llu << slot);
379                 }
380         }
381 }
382
383 static void
384 si_samplers_update_compressed_colortex_mask(struct si_textures_info *samplers)
385 {
386         uint64_t mask = samplers->views.desc.enabled_mask;
387
388         while (mask) {
389                 int i = u_bit_scan64(&mask);
390                 struct pipe_resource *res = samplers->views.views[i]->texture;
391
392                 if (res && res->target != PIPE_BUFFER) {
393                         struct r600_texture *rtex = (struct r600_texture *)res;
394
395                         if (is_compressed_colortex(rtex)) {
396                                 samplers->compressed_colortex_mask |= 1llu << i;
397                         } else {
398                                 samplers->compressed_colortex_mask &= ~(1llu << i);
399                         }
400                 }
401         }
402 }
403
404 /* IMAGE VIEWS */
405
406 static void
407 si_release_image_views(struct si_images_info *images)
408 {
409         unsigned i;
410
411         for (i = 0; i < SI_NUM_IMAGES; ++i) {
412                 struct pipe_image_view *view = &images->views[i];
413
414                 pipe_resource_reference(&view->resource, NULL);
415         }
416
417         si_release_descriptors(&images->desc);
418 }
419
420 static void
421 si_image_views_begin_new_cs(struct si_context *sctx, struct si_images_info *images)
422 {
423         uint mask = images->desc.enabled_mask;
424
425         /* Add buffers to the CS. */
426         while (mask) {
427                 int i = u_bit_scan(&mask);
428                 struct pipe_image_view *view = &images->views[i];
429
430                 assert(view->resource);
431
432                 si_sampler_view_add_buffer(sctx, view->resource);
433         }
434
435         images->desc.ce_ram_dirty = true;
436
437         if (images->desc.buffer) {
438                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
439                                           images->desc.buffer,
440                                           RADEON_USAGE_READ,
441                                           RADEON_PRIO_DESCRIPTORS);
442         }
443 }
444
445 static void
446 si_disable_shader_image(struct si_images_info *images, unsigned slot)
447 {
448         if (images->desc.enabled_mask & (1llu << slot)) {
449                 pipe_resource_reference(&images->views[slot].resource, NULL);
450                 images->compressed_colortex_mask &= ~(1 << slot);
451
452                 memcpy(images->desc.list + slot*8, null_image_descriptor, 8*4);
453                 images->desc.enabled_mask &= ~(1llu << slot);
454                 images->desc.dirty_mask |= 1llu << slot;
455         }
456 }
457
458 static void
459 si_set_shader_images(struct pipe_context *pipe, unsigned shader,
460                      unsigned start_slot, unsigned count,
461                      struct pipe_image_view *views)
462 {
463         struct si_context *ctx = (struct si_context *)pipe;
464         struct si_screen *screen = ctx->screen;
465         struct si_images_info *images = &ctx->images[shader];
466         unsigned i, slot;
467
468         assert(shader < SI_NUM_SHADERS);
469
470         if (!count)
471                 return;
472
473         assert(start_slot + count <= SI_NUM_IMAGES);
474
475         for (i = 0, slot = start_slot; i < count; ++i, ++slot) {
476                 struct r600_resource *res;
477
478                 if (!views || !views[i].resource) {
479                         si_disable_shader_image(images, slot);
480                         continue;
481                 }
482
483                 res = (struct r600_resource *)views[i].resource;
484                 util_copy_image_view(&images->views[slot], &views[i]);
485
486                 si_sampler_view_add_buffer(ctx, &res->b.b);
487
488                 if (res->b.b.target == PIPE_BUFFER) {
489                         si_make_buffer_descriptor(screen, res,
490                                                   views[i].format,
491                                                   views[i].u.buf.first_element,
492                                                   views[i].u.buf.last_element,
493                                                   images->desc.list + slot * 8);
494                         images->compressed_colortex_mask &= ~(1 << slot);
495                 } else {
496                         static const unsigned char swizzle[4] = { 0, 1, 2, 3 };
497                         struct r600_texture *tex = (struct r600_texture *)res;
498                         unsigned level;
499                         unsigned width, height, depth;
500
501                         assert(!tex->is_depth);
502                         assert(tex->fmask.size == 0);
503
504                         if (tex->dcc_offset &&
505                             views[i].access & PIPE_IMAGE_ACCESS_WRITE)
506                                 r600_texture_disable_dcc(&screen->b, tex);
507
508                         if (is_compressed_colortex(tex)) {
509                                 images->compressed_colortex_mask |= 1 << slot;
510                         } else {
511                                 images->compressed_colortex_mask &= ~(1 << slot);
512                         }
513
514                         /* Always force the base level to the selected level.
515                          *
516                          * This is required for 3D textures, where otherwise
517                          * selecting a single slice for non-layered bindings
518                          * fails. It doesn't hurt the other targets.
519                          */
520                         level = views[i].u.tex.level;
521                         width = u_minify(res->b.b.width0, level);
522                         height = u_minify(res->b.b.height0, level);
523                         depth = u_minify(res->b.b.depth0, level);
524
525                         si_make_texture_descriptor(screen, tex, false, res->b.b.target,
526                                                    views[i].format, swizzle,
527                                                    level, 0, 0,
528                                                    views[i].u.tex.first_layer, views[i].u.tex.last_layer,
529                                                    width, height, depth,
530                                                    images->desc.list + slot * 8,
531                                                    NULL);
532                 }
533
534                 images->desc.enabled_mask |= 1llu << slot;
535                 images->desc.dirty_mask |= 1llu << slot;
536         }
537 }
538
539 static void
540 si_images_update_compressed_colortex_mask(struct si_images_info *images)
541 {
542         uint64_t mask = images->desc.enabled_mask;
543
544         while (mask) {
545                 int i = u_bit_scan64(&mask);
546                 struct pipe_resource *res = images->views[i].resource;
547
548                 if (res && res->target != PIPE_BUFFER) {
549                         struct r600_texture *rtex = (struct r600_texture *)res;
550
551                         if (is_compressed_colortex(rtex)) {
552                                 images->compressed_colortex_mask |= 1 << i;
553                         } else {
554                                 images->compressed_colortex_mask &= ~(1 << i);
555                         }
556                 }
557         }
558 }
559
560 /* SAMPLER STATES */
561
562 static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
563                                    unsigned start, unsigned count, void **states)
564 {
565         struct si_context *sctx = (struct si_context *)ctx;
566         struct si_textures_info *samplers = &sctx->samplers[shader];
567         struct si_descriptors *desc = &samplers->views.desc;
568         struct si_sampler_state **sstates = (struct si_sampler_state**)states;
569         int i;
570
571         if (!count || shader >= SI_NUM_SHADERS)
572                 return;
573
574         for (i = 0; i < count; i++) {
575                 unsigned slot = start + i;
576
577                 if (!sstates[i] ||
578                     sstates[i] == samplers->views.sampler_states[slot])
579                         continue;
580
581                 samplers->views.sampler_states[slot] = sstates[i];
582
583                 /* If FMASK is bound, don't overwrite it.
584                  * The sampler state will be set after FMASK is unbound.
585                  */
586                 if (samplers->views.views[i] &&
587                     samplers->views.views[i]->texture &&
588                     samplers->views.views[i]->texture->target != PIPE_BUFFER &&
589                     ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
590                         continue;
591
592                 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
593                 desc->dirty_mask |= 1llu << slot;
594         }
595 }
596
597 /* BUFFER RESOURCES */
598
599 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
600                                      unsigned num_buffers,
601                                      unsigned shader_userdata_index,
602                                      enum radeon_bo_usage shader_usage,
603                                      enum radeon_bo_priority priority,
604                                      unsigned *ce_offset)
605 {
606         buffers->shader_usage = shader_usage;
607         buffers->priority = priority;
608         buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
609
610         si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
611                             num_buffers, NULL, ce_offset);
612 }
613
614 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
615 {
616         int i;
617
618         for (i = 0; i < buffers->desc.num_elements; i++) {
619                 pipe_resource_reference(&buffers->buffers[i], NULL);
620         }
621
622         FREE(buffers->buffers);
623         si_release_descriptors(&buffers->desc);
624 }
625
626 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
627                                              struct si_buffer_resources *buffers)
628 {
629         uint64_t mask = buffers->desc.enabled_mask;
630
631         /* Add buffers to the CS. */
632         while (mask) {
633                 int i = u_bit_scan64(&mask);
634
635                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
636                                       (struct r600_resource*)buffers->buffers[i],
637                                       buffers->shader_usage, buffers->priority);
638         }
639
640         buffers->desc.ce_ram_dirty = true;
641
642         if (!buffers->desc.buffer)
643                 return;
644         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
645                               buffers->desc.buffer, RADEON_USAGE_READWRITE,
646                               RADEON_PRIO_DESCRIPTORS);
647 }
648
649 /* VERTEX BUFFERS */
650
651 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
652 {
653         struct si_descriptors *desc = &sctx->vertex_buffers;
654         int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
655         int i;
656
657         for (i = 0; i < count; i++) {
658                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
659
660                 if (vb >= Elements(sctx->vertex_buffer))
661                         continue;
662                 if (!sctx->vertex_buffer[vb].buffer)
663                         continue;
664
665                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
666                                       (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
667                                       RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
668         }
669
670         if (!desc->buffer)
671                 return;
672         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
673                               desc->buffer, RADEON_USAGE_READ,
674                               RADEON_PRIO_DESCRIPTORS);
675 }
676
677 static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
678 {
679         struct si_descriptors *desc = &sctx->vertex_buffers;
680         bool bound[SI_NUM_VERTEX_BUFFERS] = {};
681         unsigned i, count = sctx->vertex_elements->count;
682         uint64_t va;
683         uint32_t *ptr;
684
685         if (!sctx->vertex_buffers_dirty)
686                 return true;
687         if (!count || !sctx->vertex_elements)
688                 return true;
689
690         /* Vertex buffer descriptors are the only ones which are uploaded
691          * directly through a staging buffer and don't go through
692          * the fine-grained upload path.
693          */
694         u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
695                        (struct pipe_resource**)&desc->buffer, (void**)&ptr);
696         if (!desc->buffer)
697                 return false;
698
699         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
700                               desc->buffer, RADEON_USAGE_READ,
701                               RADEON_PRIO_DESCRIPTORS);
702
703         assert(count <= SI_NUM_VERTEX_BUFFERS);
704
705         for (i = 0; i < count; i++) {
706                 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
707                 struct pipe_vertex_buffer *vb;
708                 struct r600_resource *rbuffer;
709                 unsigned offset;
710                 uint32_t *desc = &ptr[i*4];
711
712                 if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
713                         memset(desc, 0, 16);
714                         continue;
715                 }
716
717                 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
718                 rbuffer = (struct r600_resource*)vb->buffer;
719                 if (!rbuffer) {
720                         memset(desc, 0, 16);
721                         continue;
722                 }
723
724                 offset = vb->buffer_offset + ve->src_offset;
725                 va = rbuffer->gpu_address + offset;
726
727                 /* Fill in T# buffer resource description */
728                 desc[0] = va;
729                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
730                           S_008F04_STRIDE(vb->stride);
731
732                 if (sctx->b.chip_class <= CIK && vb->stride)
733                         /* Round up by rounding down and adding 1 */
734                         desc[2] = (vb->buffer->width0 - offset -
735                                    sctx->vertex_elements->format_size[i]) /
736                                   vb->stride + 1;
737                 else
738                         desc[2] = vb->buffer->width0 - offset;
739
740                 desc[3] = sctx->vertex_elements->rsrc_word3[i];
741
742                 if (!bound[ve->vertex_buffer_index]) {
743                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
744                                               (struct r600_resource*)vb->buffer,
745                                               RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
746                         bound[ve->vertex_buffer_index] = true;
747                 }
748         }
749
750         /* Don't flush the const cache. It would have a very negative effect
751          * on performance (confirmed by testing). New descriptors are always
752          * uploaded to a fresh new buffer, so I don't think flushing the const
753          * cache is needed. */
754         desc->pointer_dirty = true;
755         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
756         sctx->vertex_buffers_dirty = false;
757         return true;
758 }
759
760
761 /* CONSTANT BUFFERS */
762
763 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
764                             const uint8_t *ptr, unsigned size, uint32_t *const_offset)
765 {
766         void *tmp;
767
768         u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
769                        (struct pipe_resource**)rbuffer, &tmp);
770         if (rbuffer)
771                 util_memcpy_cpu_to_le32(tmp, ptr, size);
772 }
773
774 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
775                                    struct pipe_constant_buffer *input)
776 {
777         struct si_context *sctx = (struct si_context *)ctx;
778         struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
779
780         if (shader >= SI_NUM_SHADERS)
781                 return;
782
783         assert(slot < buffers->desc.num_elements);
784         pipe_resource_reference(&buffers->buffers[slot], NULL);
785
786         /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
787          * with a NULL buffer). We need to use a dummy buffer instead. */
788         if (sctx->b.chip_class == CIK &&
789             (!input || (!input->buffer && !input->user_buffer)))
790                 input = &sctx->null_const_buf;
791
792         if (input && (input->buffer || input->user_buffer)) {
793                 struct pipe_resource *buffer = NULL;
794                 uint64_t va;
795
796                 /* Upload the user buffer if needed. */
797                 if (input->user_buffer) {
798                         unsigned buffer_offset;
799
800                         si_upload_const_buffer(sctx,
801                                                (struct r600_resource**)&buffer, input->user_buffer,
802                                                input->buffer_size, &buffer_offset);
803                         if (!buffer) {
804                                 /* Just unbind on failure. */
805                                 si_set_constant_buffer(ctx, shader, slot, NULL);
806                                 return;
807                         }
808                         va = r600_resource(buffer)->gpu_address + buffer_offset;
809                 } else {
810                         pipe_resource_reference(&buffer, input->buffer);
811                         va = r600_resource(buffer)->gpu_address + input->buffer_offset;
812                 }
813
814                 /* Set the descriptor. */
815                 uint32_t *desc = buffers->desc.list + slot*4;
816                 desc[0] = va;
817                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
818                           S_008F04_STRIDE(0);
819                 desc[2] = input->buffer_size;
820                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
821                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
822                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
823                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
824                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
825                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
826
827                 buffers->buffers[slot] = buffer;
828                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
829                                       (struct r600_resource*)buffer,
830                                       buffers->shader_usage, buffers->priority);
831                 buffers->desc.enabled_mask |= 1llu << slot;
832         } else {
833                 /* Clear the descriptor. */
834                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
835                 buffers->desc.enabled_mask &= ~(1llu << slot);
836         }
837
838         buffers->desc.dirty_mask |= 1llu << slot;
839 }
840
841 /* SHADER BUFFERS */
842
843 static void si_set_shader_buffers(struct pipe_context *ctx, unsigned shader,
844                                   unsigned start_slot, unsigned count,
845                                   struct pipe_shader_buffer *sbuffers)
846 {
847         struct si_context *sctx = (struct si_context *)ctx;
848         struct si_buffer_resources *buffers = &sctx->shader_buffers[shader];
849         unsigned i;
850
851         assert(start_slot + count <= SI_NUM_SHADER_BUFFERS);
852
853         for (i = 0; i < count; ++i) {
854                 struct pipe_shader_buffer *sbuffer = sbuffers ? &sbuffers[i] : NULL;
855                 struct r600_resource *buf;
856                 unsigned slot = start_slot + i;
857                 uint32_t *desc = buffers->desc.list + slot * 4;
858                 uint64_t va;
859
860                 if (!sbuffer || !sbuffer->buffer) {
861                         pipe_resource_reference(&buffers->buffers[slot], NULL);
862                         memset(desc, 0, sizeof(uint32_t) * 4);
863                         buffers->desc.enabled_mask &= ~(1llu << slot);
864                         continue;
865                 }
866
867                 buf = (struct r600_resource *)sbuffer->buffer;
868                 va = buf->gpu_address + sbuffer->buffer_offset;
869
870                 desc[0] = va;
871                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
872                           S_008F04_STRIDE(0);
873                 desc[2] = sbuffer->buffer_size;
874                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
875                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
876                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
877                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
878                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
879                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
880
881                 pipe_resource_reference(&buffers->buffers[slot], &buf->b.b);
882                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, buf,
883                                       buffers->shader_usage, buffers->priority);
884                 buffers->desc.enabled_mask |= 1llu << slot;
885                 buffers->desc.dirty_mask |= 1llu << slot;
886         }
887
888 }
889
890 /* RING BUFFERS */
891
892 void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
893                         struct pipe_resource *buffer,
894                         unsigned stride, unsigned num_records,
895                         bool add_tid, bool swizzle,
896                         unsigned element_size, unsigned index_stride, uint64_t offset)
897 {
898         struct si_context *sctx = (struct si_context *)ctx;
899         struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
900
901         if (shader >= SI_NUM_SHADERS)
902                 return;
903
904         /* The stride field in the resource descriptor has 14 bits */
905         assert(stride < (1 << 14));
906
907         assert(slot < buffers->desc.num_elements);
908         pipe_resource_reference(&buffers->buffers[slot], NULL);
909
910         if (buffer) {
911                 uint64_t va;
912
913                 va = r600_resource(buffer)->gpu_address + offset;
914
915                 switch (element_size) {
916                 default:
917                         assert(!"Unsupported ring buffer element size");
918                 case 0:
919                 case 2:
920                         element_size = 0;
921                         break;
922                 case 4:
923                         element_size = 1;
924                         break;
925                 case 8:
926                         element_size = 2;
927                         break;
928                 case 16:
929                         element_size = 3;
930                         break;
931                 }
932
933                 switch (index_stride) {
934                 default:
935                         assert(!"Unsupported ring buffer index stride");
936                 case 0:
937                 case 8:
938                         index_stride = 0;
939                         break;
940                 case 16:
941                         index_stride = 1;
942                         break;
943                 case 32:
944                         index_stride = 2;
945                         break;
946                 case 64:
947                         index_stride = 3;
948                         break;
949                 }
950
951                 if (sctx->b.chip_class >= VI && stride)
952                         num_records *= stride;
953
954                 /* Set the descriptor. */
955                 uint32_t *desc = buffers->desc.list + slot*4;
956                 desc[0] = va;
957                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
958                           S_008F04_STRIDE(stride) |
959                           S_008F04_SWIZZLE_ENABLE(swizzle);
960                 desc[2] = num_records;
961                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
962                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
963                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
964                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
965                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
966                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
967                           S_008F0C_ELEMENT_SIZE(element_size) |
968                           S_008F0C_INDEX_STRIDE(index_stride) |
969                           S_008F0C_ADD_TID_ENABLE(add_tid);
970
971                 pipe_resource_reference(&buffers->buffers[slot], buffer);
972                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
973                                       (struct r600_resource*)buffer,
974                                       buffers->shader_usage, buffers->priority);
975                 buffers->desc.enabled_mask |= 1llu << slot;
976         } else {
977                 /* Clear the descriptor. */
978                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
979                 buffers->desc.enabled_mask &= ~(1llu << slot);
980         }
981
982         buffers->desc.dirty_mask |= 1llu << slot;
983 }
984
985 /* STREAMOUT BUFFERS */
986
987 static void si_set_streamout_targets(struct pipe_context *ctx,
988                                      unsigned num_targets,
989                                      struct pipe_stream_output_target **targets,
990                                      const unsigned *offsets)
991 {
992         struct si_context *sctx = (struct si_context *)ctx;
993         struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
994         unsigned old_num_targets = sctx->b.streamout.num_targets;
995         unsigned i, bufidx;
996
997         /* We are going to unbind the buffers. Mark which caches need to be flushed. */
998         if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
999                 /* Since streamout uses vector writes which go through TC L2
1000                  * and most other clients can use TC L2 as well, we don't need
1001                  * to flush it.
1002                  *
1003                  * The only case which requires flushing it is VGT DMA index
1004                  * fetching, which is a rare case. Thus, flag the TC L2
1005                  * dirtiness in the resource and handle it when index fetching
1006                  * is used.
1007                  */
1008                 for (i = 0; i < sctx->b.streamout.num_targets; i++)
1009                         if (sctx->b.streamout.targets[i])
1010                                 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
1011
1012                 /* Invalidate the scalar cache in case a streamout buffer is
1013                  * going to be used as a constant buffer.
1014                  *
1015                  * Invalidate TC L1, because streamout bypasses it (done by
1016                  * setting GLC=1 in the store instruction), but it can contain
1017                  * outdated data of streamout buffers.
1018                  *
1019                  * VS_PARTIAL_FLUSH is required if the buffers are going to be
1020                  * used as an input immediately.
1021                  */
1022                 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
1023                                  SI_CONTEXT_INV_VMEM_L1 |
1024                                  SI_CONTEXT_VS_PARTIAL_FLUSH;
1025         }
1026
1027         /* All readers of the streamout targets need to be finished before we can
1028          * start writing to the targets.
1029          */
1030         if (num_targets)
1031                 sctx->b.flags |= SI_CONTEXT_PS_PARTIAL_FLUSH;
1032
1033         /* Streamout buffers must be bound in 2 places:
1034          * 1) in VGT by setting the VGT_STRMOUT registers
1035          * 2) as shader resources
1036          */
1037
1038         /* Set the VGT regs. */
1039         r600_set_streamout_targets(ctx, num_targets, targets, offsets);
1040
1041         /* Set the shader resources.*/
1042         for (i = 0; i < num_targets; i++) {
1043                 bufidx = SI_SO_BUF_OFFSET + i;
1044
1045                 if (targets[i]) {
1046                         struct pipe_resource *buffer = targets[i]->buffer;
1047                         uint64_t va = r600_resource(buffer)->gpu_address;
1048
1049                         /* Set the descriptor.
1050                          *
1051                          * On VI, the format must be non-INVALID, otherwise
1052                          * the buffer will be considered not bound and store
1053                          * instructions will be no-ops.
1054                          */
1055                         uint32_t *desc = buffers->desc.list + bufidx*4;
1056                         desc[0] = va;
1057                         desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
1058                         desc[2] = 0xffffffff;
1059                         desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1060                                   S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1061                                   S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1062                                   S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1063                                   S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
1064
1065                         /* Set the resource. */
1066                         pipe_resource_reference(&buffers->buffers[bufidx],
1067                                                 buffer);
1068                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1069                                               (struct r600_resource*)buffer,
1070                                               buffers->shader_usage, buffers->priority);
1071                         buffers->desc.enabled_mask |= 1llu << bufidx;
1072                 } else {
1073                         /* Clear the descriptor and unset the resource. */
1074                         memset(buffers->desc.list + bufidx*4, 0,
1075                                sizeof(uint32_t) * 4);
1076                         pipe_resource_reference(&buffers->buffers[bufidx],
1077                                                 NULL);
1078                         buffers->desc.enabled_mask &= ~(1llu << bufidx);
1079                 }
1080                 buffers->desc.dirty_mask |= 1llu << bufidx;
1081         }
1082         for (; i < old_num_targets; i++) {
1083                 bufidx = SI_SO_BUF_OFFSET + i;
1084                 /* Clear the descriptor and unset the resource. */
1085                 memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
1086                 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
1087                 buffers->desc.enabled_mask &= ~(1llu << bufidx);
1088                 buffers->desc.dirty_mask |= 1llu << bufidx;
1089         }
1090
1091 }
1092
1093 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
1094                                         uint32_t *desc, uint64_t old_buf_va,
1095                                         struct pipe_resource *new_buf)
1096 {
1097         /* Retrieve the buffer offset from the descriptor. */
1098         uint64_t old_desc_va =
1099                 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
1100
1101         assert(old_buf_va <= old_desc_va);
1102         uint64_t offset_within_buffer = old_desc_va - old_buf_va;
1103
1104         /* Update the descriptor. */
1105         uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
1106
1107         desc[0] = va;
1108         desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
1109                   S_008F04_BASE_ADDRESS_HI(va >> 32);
1110 }
1111
1112 /* TEXTURE METADATA ENABLE/DISABLE */
1113
1114 /* CMASK can be enabled (for fast clear) and disabled (for texture export)
1115  * while the texture is bound, possibly by a different context. In that case,
1116  * call this function to update compressed_colortex_masks.
1117  */
1118 void si_update_compressed_colortex_masks(struct si_context *sctx)
1119 {
1120         for (int i = 0; i < SI_NUM_SHADERS; ++i) {
1121                 si_samplers_update_compressed_colortex_mask(&sctx->samplers[i]);
1122                 si_images_update_compressed_colortex_mask(&sctx->images[i]);
1123         }
1124 }
1125
1126 /* BUFFER DISCARD/INVALIDATION */
1127
1128 /** Reset descriptors of buffer resources after \p buf has been invalidated. */
1129 static void si_reset_buffer_resources(struct si_context *sctx,
1130                                       struct si_buffer_resources *buffers,
1131                                       struct pipe_resource *buf,
1132                                       uint64_t old_va)
1133 {
1134         uint64_t mask = buffers->desc.enabled_mask;
1135
1136         while (mask) {
1137                 unsigned i = u_bit_scan64(&mask);
1138                 if (buffers->buffers[i] == buf) {
1139                         si_desc_reset_buffer_offset(&sctx->b.b,
1140                                                     buffers->desc.list + i*4,
1141                                                     old_va, buf);
1142                         buffers->desc.dirty_mask |= 1llu << i;
1143
1144                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1145                                                 (struct r600_resource *)buf,
1146                                                 buffers->shader_usage,
1147                                                 buffers->priority);
1148                 }
1149         }
1150 }
1151
1152 /* Reallocate a buffer a update all resource bindings where the buffer is
1153  * bound.
1154  *
1155  * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
1156  * idle by discarding its contents. Apps usually tell us when to do this using
1157  * map_buffer flags, for example.
1158  */
1159 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
1160 {
1161         struct si_context *sctx = (struct si_context*)ctx;
1162         struct r600_resource *rbuffer = r600_resource(buf);
1163         unsigned i, shader, alignment = rbuffer->buf->alignment;
1164         uint64_t old_va = rbuffer->gpu_address;
1165         unsigned num_elems = sctx->vertex_elements ?
1166                                        sctx->vertex_elements->count : 0;
1167         struct si_sampler_view *view;
1168
1169         /* Reallocate the buffer in the same pipe_resource. */
1170         r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
1171                            alignment, TRUE);
1172
1173         /* We changed the buffer, now we need to bind it where the old one
1174          * was bound. This consists of 2 things:
1175          *   1) Updating the resource descriptor and dirtying it.
1176          *   2) Adding a relocation to the CS, so that it's usable.
1177          */
1178
1179         /* Vertex buffers. */
1180         for (i = 0; i < num_elems; i++) {
1181                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
1182
1183                 if (vb >= Elements(sctx->vertex_buffer))
1184                         continue;
1185                 if (!sctx->vertex_buffer[vb].buffer)
1186                         continue;
1187
1188                 if (sctx->vertex_buffer[vb].buffer == buf) {
1189                         sctx->vertex_buffers_dirty = true;
1190                         break;
1191                 }
1192         }
1193
1194         /* Read/Write buffers. */
1195         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1196                 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
1197                 uint64_t mask = buffers->desc.enabled_mask;
1198
1199                 while (mask) {
1200                         i = u_bit_scan64(&mask);
1201                         if (buffers->buffers[i] == buf) {
1202                                 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
1203                                                             old_va, buf);
1204                                 buffers->desc.dirty_mask |= 1llu << i;
1205
1206                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1207                                                       rbuffer, buffers->shader_usage,
1208                                                       buffers->priority);
1209
1210                                 if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
1211                                         /* Update the streamout state. */
1212                                         if (sctx->b.streamout.begin_emitted) {
1213                                                 r600_emit_streamout_end(&sctx->b);
1214                                         }
1215                                         sctx->b.streamout.append_bitmask =
1216                                                 sctx->b.streamout.enabled_mask;
1217                                         r600_streamout_buffers_dirty(&sctx->b);
1218                                 }
1219                         }
1220                 }
1221         }
1222
1223         /* Constant and shader buffers. */
1224         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1225                 si_reset_buffer_resources(sctx, &sctx->const_buffers[shader],
1226                                           buf, old_va);
1227                 si_reset_buffer_resources(sctx, &sctx->shader_buffers[shader],
1228                                           buf, old_va);
1229         }
1230
1231         /* Texture buffers - update virtual addresses in sampler view descriptors. */
1232         LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
1233                 if (view->base.texture == buf) {
1234                         si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
1235                 }
1236         }
1237         /* Texture buffers - update bindings. */
1238         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1239                 struct si_sampler_views *views = &sctx->samplers[shader].views;
1240                 uint64_t mask = views->desc.enabled_mask;
1241
1242                 while (mask) {
1243                         unsigned i = u_bit_scan64(&mask);
1244                         if (views->views[i]->texture == buf) {
1245                                 si_desc_reset_buffer_offset(ctx,
1246                                                             views->desc.list +
1247                                                             i * 16 + 4,
1248                                                             old_va, buf);
1249                                 views->desc.dirty_mask |= 1llu << i;
1250
1251                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
1252                                                       rbuffer, RADEON_USAGE_READ,
1253                                                       RADEON_PRIO_SAMPLER_BUFFER);
1254                         }
1255                 }
1256         }
1257
1258         /* Shader images */
1259         for (shader = 0; shader < SI_NUM_SHADERS; ++shader) {
1260                 struct si_images_info *images = &sctx->images[shader];
1261                 unsigned mask = images->desc.enabled_mask;
1262
1263                 while (mask) {
1264                         unsigned i = u_bit_scan(&mask);
1265
1266                         if (images->views[i].resource == buf) {
1267                                 si_desc_reset_buffer_offset(
1268                                         ctx, images->desc.list + i * 8 + 4,
1269                                         old_va, buf);
1270                                 images->desc.dirty_mask |= 1llu << i;
1271
1272                                 radeon_add_to_buffer_list(
1273                                         &sctx->b, &sctx->b.gfx, rbuffer,
1274                                         RADEON_USAGE_READWRITE,
1275                                         RADEON_PRIO_SAMPLER_BUFFER);
1276                         }
1277                 }
1278         }
1279 }
1280
1281 /* SHADER USER DATA */
1282
1283 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
1284                                           unsigned shader)
1285 {
1286         sctx->const_buffers[shader].desc.pointer_dirty = true;
1287         sctx->rw_buffers[shader].desc.pointer_dirty = true;
1288         sctx->shader_buffers[shader].desc.pointer_dirty = true;
1289         sctx->samplers[shader].views.desc.pointer_dirty = true;
1290         sctx->images[shader].desc.pointer_dirty = true;
1291
1292         if (shader == PIPE_SHADER_VERTEX)
1293                 sctx->vertex_buffers.pointer_dirty = true;
1294
1295         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
1296 }
1297
1298 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
1299 {
1300         int i;
1301
1302         for (i = 0; i < SI_NUM_SHADERS; i++) {
1303                 si_mark_shader_pointers_dirty(sctx, i);
1304         }
1305 }
1306
1307 /* Set a base register address for user data constants in the given shader.
1308  * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
1309  */
1310 static void si_set_user_data_base(struct si_context *sctx,
1311                                   unsigned shader, uint32_t new_base)
1312 {
1313         uint32_t *base = &sctx->shader_userdata.sh_base[shader];
1314
1315         if (*base != new_base) {
1316                 *base = new_base;
1317
1318                 if (new_base)
1319                         si_mark_shader_pointers_dirty(sctx, shader);
1320         }
1321 }
1322
1323 /* This must be called when these shaders are changed from non-NULL to NULL
1324  * and vice versa:
1325  * - geometry shader
1326  * - tessellation control shader
1327  * - tessellation evaluation shader
1328  */
1329 void si_shader_change_notify(struct si_context *sctx)
1330 {
1331         /* VS can be bound as VS, ES, or LS. */
1332         if (sctx->tes_shader.cso)
1333                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1334                                       R_00B530_SPI_SHADER_USER_DATA_LS_0);
1335         else if (sctx->gs_shader.cso)
1336                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1337                                       R_00B330_SPI_SHADER_USER_DATA_ES_0);
1338         else
1339                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
1340                                       R_00B130_SPI_SHADER_USER_DATA_VS_0);
1341
1342         /* TES can be bound as ES, VS, or not bound. */
1343         if (sctx->tes_shader.cso) {
1344                 if (sctx->gs_shader.cso)
1345                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1346                                               R_00B330_SPI_SHADER_USER_DATA_ES_0);
1347                 else
1348                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
1349                                               R_00B130_SPI_SHADER_USER_DATA_VS_0);
1350         } else {
1351                 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
1352         }
1353 }
1354
1355 static void si_emit_shader_pointer(struct si_context *sctx,
1356                                    struct si_descriptors *desc,
1357                                    unsigned sh_base, bool keep_dirty)
1358 {
1359         struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
1360         uint64_t va;
1361
1362         if (!desc->pointer_dirty || !desc->buffer)
1363                 return;
1364
1365         va = desc->buffer->gpu_address +
1366              desc->buffer_offset;
1367
1368         radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
1369         radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
1370         radeon_emit(cs, va);
1371         radeon_emit(cs, va >> 32);
1372
1373         desc->pointer_dirty = keep_dirty;
1374 }
1375
1376 void si_emit_shader_userdata(struct si_context *sctx, struct r600_atom *atom)
1377 {
1378         unsigned i;
1379         uint32_t *sh_base = sctx->shader_userdata.sh_base;
1380
1381         if (sctx->gs_shader.cso) {
1382                 /* The VS copy shader needs these for clipping, streamout, and rings. */
1383                 unsigned vs_base = R_00B130_SPI_SHADER_USER_DATA_VS_0;
1384                 unsigned i = PIPE_SHADER_VERTEX;
1385
1386                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, vs_base, true);
1387                 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, vs_base, true);
1388
1389                 if (sctx->tes_shader.cso) {
1390                         /* The TESSEVAL shader needs this for the ESGS ring buffer. */
1391                         si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc,
1392                                                R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
1393                 }
1394         } else if (sctx->tes_shader.cso) {
1395                 /* The TESSEVAL shader needs this for streamout. */
1396                 si_emit_shader_pointer(sctx, &sctx->rw_buffers[PIPE_SHADER_VERTEX].desc,
1397                                        R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1398         }
1399
1400         for (i = 0; i < SI_NUM_SHADERS; i++) {
1401                 unsigned base = sh_base[i];
1402
1403                 if (!base)
1404                         continue;
1405
1406                 if (i != PIPE_SHADER_TESS_EVAL)
1407                         si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, base, false);
1408
1409                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
1410                 si_emit_shader_pointer(sctx, &sctx->shader_buffers[i].desc, base, false);
1411                 si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
1412                 si_emit_shader_pointer(sctx, &sctx->images[i].desc, base, false);
1413         }
1414         si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1415 }
1416
1417 /* INIT/DEINIT/UPLOAD */
1418
1419 void si_init_all_descriptors(struct si_context *sctx)
1420 {
1421         int i;
1422         unsigned ce_offset = 0;
1423
1424         for (i = 0; i < SI_NUM_SHADERS; i++) {
1425                 si_init_buffer_resources(&sctx->const_buffers[i],
1426                                          SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1427                                          RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER,
1428                                          &ce_offset);
1429                 si_init_buffer_resources(&sctx->rw_buffers[i],
1430                                          SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1431                                          RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT,
1432                                          &ce_offset);
1433                 si_init_buffer_resources(&sctx->shader_buffers[i],
1434                                          SI_NUM_SHADER_BUFFERS, SI_SGPR_SHADER_BUFFERS,
1435                                          RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RW_BUFFER,
1436                                          &ce_offset);
1437
1438                 si_init_descriptors(&sctx->samplers[i].views.desc,
1439                                     SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS,
1440                                     null_texture_descriptor, &ce_offset);
1441
1442                 si_init_descriptors(&sctx->images[i].desc,
1443                                     SI_SGPR_IMAGES, 8, SI_NUM_IMAGES,
1444                                     null_image_descriptor, &ce_offset);
1445         }
1446
1447         si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1448                             4, SI_NUM_VERTEX_BUFFERS, NULL, NULL);
1449
1450         assert(ce_offset <= 32768);
1451
1452         /* Set pipe_context functions. */
1453         sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1454         sctx->b.b.set_shader_images = si_set_shader_images;
1455         sctx->b.b.set_constant_buffer = si_set_constant_buffer;
1456         sctx->b.b.set_shader_buffers = si_set_shader_buffers;
1457         sctx->b.b.set_sampler_views = si_set_sampler_views;
1458         sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1459         sctx->b.invalidate_buffer = si_invalidate_buffer;
1460
1461         /* Shader user data. */
1462         si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1463                      si_emit_shader_userdata);
1464
1465         /* Set default and immutable mappings. */
1466         si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1467         si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1468         si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1469         si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1470 }
1471
1472 bool si_upload_shader_descriptors(struct si_context *sctx)
1473 {
1474         int i;
1475
1476         for (i = 0; i < SI_NUM_SHADERS; i++) {
1477                 if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc) ||
1478                     !si_upload_descriptors(sctx, &sctx->rw_buffers[i].desc) ||
1479                     !si_upload_descriptors(sctx, &sctx->shader_buffers[i].desc) ||
1480                     !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc) ||
1481                     !si_upload_descriptors(sctx, &sctx->images[i].desc))
1482                         return false;
1483         }
1484         return si_upload_vertex_buffer_descriptors(sctx);
1485 }
1486
1487 void si_release_all_descriptors(struct si_context *sctx)
1488 {
1489         int i;
1490
1491         for (i = 0; i < SI_NUM_SHADERS; i++) {
1492                 si_release_buffer_resources(&sctx->const_buffers[i]);
1493                 si_release_buffer_resources(&sctx->rw_buffers[i]);
1494                 si_release_buffer_resources(&sctx->shader_buffers[i]);
1495                 si_release_sampler_views(&sctx->samplers[i].views);
1496                 si_release_image_views(&sctx->images[i]);
1497         }
1498         si_release_descriptors(&sctx->vertex_buffers);
1499 }
1500
1501 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1502 {
1503         int i;
1504
1505         for (i = 0; i < SI_NUM_SHADERS; i++) {
1506                 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1507                 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
1508                 si_buffer_resources_begin_new_cs(sctx, &sctx->shader_buffers[i]);
1509                 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1510                 si_image_views_begin_new_cs(sctx, &sctx->images[i]);
1511         }
1512         si_vertex_buffers_begin_new_cs(sctx);
1513         si_shader_userdata_begin_new_cs(sctx);
1514 }