OSDN Git Service

radeonsi: don't set number of IB dwords for states
[android-x86/external-mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Marek Olšák <marek.olsak@amd.com>
25  */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28  * 4 dwords) are stored in lists in memory which is accessed by shaders
29  * using scalar load instructions.
30  *
31  * This file is responsible for managing such lists. It keeps a copy of all
32  * descriptors in CPU memory and re-uploads a whole list if some slots have
33  * been changed.
34  *
35  * This code is also reponsible for updating shader pointers to those lists.
36  *
37  * Note that CP DMA can't be used for updating the lists, because a GPU hang
38  * could leave the list in a mid-IB state and the next IB would get wrong
39  * descriptors and the whole context would be unusable at that point.
40  * (Note: The register shadowing can't be used due to the same reason)
41  *
42  * Also, uploading descriptors to newly allocated memory doesn't require
43  * a KCACHE flush.
44  */
45
46 #include "radeon/r600_cs.h"
47 #include "si_pipe.h"
48 #include "si_shader.h"
49 #include "sid.h"
50
51 #include "util/u_memory.h"
52 #include "util/u_upload_mgr.h"
53
54
55 /* NULL image and buffer descriptor.
56  *
57  * For images, all fields must be zero except for the swizzle, which
58  * supports arbitrary combinations of 0s and 1s. The texture type must be
59  * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
60  *
61  * For buffers, all fields must be zero. If they are not, the hw hangs.
62  *
63  * This is the only reason why the buffer descriptor must be in words [4:7].
64  */
65 static uint32_t null_descriptor[8] = {
66         0,
67         0,
68         0,
69         S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
70         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
71         /* the rest must contain zeros, which is also used by the buffer
72          * descriptor */
73 };
74
75 static void si_init_descriptors(struct si_descriptors *desc,
76                                 unsigned shader_userdata_index,
77                                 unsigned element_dw_size,
78                                 unsigned num_elements)
79 {
80         int i;
81
82         assert(num_elements <= sizeof(desc->enabled_mask)*8);
83
84         desc->list = CALLOC(num_elements, element_dw_size * 4);
85         desc->element_dw_size = element_dw_size;
86         desc->num_elements = num_elements;
87         desc->list_dirty = true; /* upload the list before the next draw */
88         desc->shader_userdata_offset = shader_userdata_index * 4;
89
90         /* Initialize the array to NULL descriptors if the element size is 8. */
91         if (element_dw_size == 8)
92                 for (i = 0; i < num_elements; i++)
93                         memcpy(desc->list + i*element_dw_size, null_descriptor,
94                                sizeof(null_descriptor));
95 }
96
97 static void si_release_descriptors(struct si_descriptors *desc)
98 {
99         pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
100         FREE(desc->list);
101 }
102
103 static bool si_upload_descriptors(struct si_context *sctx,
104                                   struct si_descriptors *desc)
105 {
106         unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
107         void *ptr;
108
109         if (!desc->list_dirty)
110                 return true;
111
112         u_upload_alloc(sctx->b.uploader, 0, list_size,
113                        &desc->buffer_offset,
114                        (struct pipe_resource**)&desc->buffer, &ptr);
115         if (!desc->buffer)
116                 return false; /* skip the draw call */
117
118         util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
119
120         radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, desc->buffer,
121                               RADEON_USAGE_READ, RADEON_PRIO_SHADER_DATA);
122
123         desc->list_dirty = false;
124         desc->pointer_dirty = true;
125         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
126         return true;
127 }
128
129 /* SAMPLER VIEWS */
130
131 static void si_release_sampler_views(struct si_sampler_views *views)
132 {
133         int i;
134
135         for (i = 0; i < Elements(views->views); i++) {
136                 pipe_sampler_view_reference(&views->views[i], NULL);
137         }
138         si_release_descriptors(&views->desc);
139 }
140
141 static enum radeon_bo_priority si_get_resource_ro_priority(struct r600_resource *res)
142 {
143         if (res->b.b.target == PIPE_BUFFER)
144                 return RADEON_PRIO_SHADER_BUFFER_RO;
145
146         if (res->b.b.nr_samples > 1)
147                 return RADEON_PRIO_SHADER_TEXTURE_MSAA;
148
149         return RADEON_PRIO_SHADER_TEXTURE_RO;
150 }
151
152 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
153                                           struct si_sampler_views *views)
154 {
155         uint64_t mask = views->desc.enabled_mask;
156
157         /* Add relocations to the CS. */
158         while (mask) {
159                 int i = u_bit_scan64(&mask);
160                 struct si_sampler_view *rview =
161                         (struct si_sampler_view*)views->views[i];
162
163                 if (!rview->resource)
164                         continue;
165
166                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
167                                       rview->resource, RADEON_USAGE_READ,
168                                       si_get_resource_ro_priority(rview->resource));
169         }
170
171         if (!views->desc.buffer)
172                 return;
173         radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, views->desc.buffer,
174                               RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
175 }
176
177 static void si_set_sampler_view(struct si_context *sctx, unsigned shader,
178                                 unsigned slot, struct pipe_sampler_view *view,
179                                 unsigned *view_desc)
180 {
181         struct si_sampler_views *views = &sctx->samplers[shader].views;
182
183         if (views->views[slot] == view)
184                 return;
185
186         if (view) {
187                 struct si_sampler_view *rview =
188                         (struct si_sampler_view*)view;
189
190                 if (rview->resource)
191                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
192                                 rview->resource, RADEON_USAGE_READ,
193                                 si_get_resource_ro_priority(rview->resource));
194
195                 pipe_sampler_view_reference(&views->views[slot], view);
196                 memcpy(views->desc.list + slot*8, view_desc, 8*4);
197                 views->desc.enabled_mask |= 1llu << slot;
198         } else {
199                 pipe_sampler_view_reference(&views->views[slot], NULL);
200                 memcpy(views->desc.list + slot*8, null_descriptor, 8*4);
201                 views->desc.enabled_mask &= ~(1llu << slot);
202         }
203
204         views->desc.list_dirty = true;
205 }
206
207 static void si_set_sampler_views(struct pipe_context *ctx,
208                                  unsigned shader, unsigned start,
209                                  unsigned count,
210                                  struct pipe_sampler_view **views)
211 {
212         struct si_context *sctx = (struct si_context *)ctx;
213         struct si_textures_info *samplers = &sctx->samplers[shader];
214         struct si_sampler_view **rviews = (struct si_sampler_view **)views;
215         int i;
216
217         if (!count || shader >= SI_NUM_SHADERS)
218                 return;
219
220         for (i = 0; i < count; i++) {
221                 unsigned slot = start + i;
222
223                 if (!views || !views[i]) {
224                         samplers->depth_texture_mask &= ~(1 << slot);
225                         samplers->compressed_colortex_mask &= ~(1 << slot);
226                         si_set_sampler_view(sctx, shader, slot, NULL, NULL);
227                         si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
228                                             NULL, NULL);
229                         continue;
230                 }
231
232                 si_set_sampler_view(sctx, shader, slot, views[i], rviews[i]->state);
233
234                 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
235                         struct r600_texture *rtex =
236                                 (struct r600_texture*)views[i]->texture;
237
238                         if (rtex->is_depth && !rtex->is_flushing_texture) {
239                                 samplers->depth_texture_mask |= 1 << slot;
240                         } else {
241                                 samplers->depth_texture_mask &= ~(1 << slot);
242                         }
243                         if (rtex->cmask.size || rtex->fmask.size) {
244                                 samplers->compressed_colortex_mask |= 1 << slot;
245                         } else {
246                                 samplers->compressed_colortex_mask &= ~(1 << slot);
247                         }
248
249                         if (rtex->fmask.size) {
250                                 si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
251                                                     views[i], rviews[i]->fmask_state);
252                         } else {
253                                 si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
254                                                     NULL, NULL);
255                         }
256                 } else {
257                         samplers->depth_texture_mask &= ~(1 << slot);
258                         samplers->compressed_colortex_mask &= ~(1 << slot);
259                         si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
260                                             NULL, NULL);
261                 }
262         }
263 }
264
265 /* SAMPLER STATES */
266
267 static void si_sampler_states_begin_new_cs(struct si_context *sctx,
268                                            struct si_sampler_states *states)
269 {
270         if (!states->desc.buffer)
271                 return;
272         radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx, states->desc.buffer,
273                               RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
274 }
275
276 void si_set_sampler_descriptors(struct si_context *sctx, unsigned shader,
277                                 unsigned start, unsigned count, void **states)
278 {
279         struct si_sampler_states *samplers = &sctx->samplers[shader].states;
280         struct si_sampler_state **sstates = (struct si_sampler_state**)states;
281         int i;
282
283         if (start == 0)
284                 samplers->saved_states[0] = states[0];
285         if (start == 1)
286                 samplers->saved_states[1] = states[0];
287         else if (start == 0 && count >= 2)
288                 samplers->saved_states[1] = states[1];
289
290         for (i = 0; i < count; i++) {
291                 unsigned slot = start + i;
292
293                 if (!sstates[i])
294                         continue;
295
296                 memcpy(samplers->desc.list + slot*4, sstates[i]->val, 4*4);
297                 samplers->desc.list_dirty = true;
298         }
299 }
300
301 /* BUFFER RESOURCES */
302
303 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
304                                      unsigned num_buffers,
305                                      unsigned shader_userdata_index,
306                                      enum radeon_bo_usage shader_usage,
307                                      enum radeon_bo_priority priority)
308 {
309         buffers->shader_usage = shader_usage;
310         buffers->priority = priority;
311         buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
312
313         si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
314                             num_buffers);
315 }
316
317 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
318 {
319         int i;
320
321         for (i = 0; i < buffers->desc.num_elements; i++) {
322                 pipe_resource_reference(&buffers->buffers[i], NULL);
323         }
324
325         FREE(buffers->buffers);
326         si_release_descriptors(&buffers->desc);
327 }
328
329 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
330                                              struct si_buffer_resources *buffers)
331 {
332         uint64_t mask = buffers->desc.enabled_mask;
333
334         /* Add relocations to the CS. */
335         while (mask) {
336                 int i = u_bit_scan64(&mask);
337
338                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
339                                       (struct r600_resource*)buffers->buffers[i],
340                                       buffers->shader_usage, buffers->priority);
341         }
342
343         if (!buffers->desc.buffer)
344                 return;
345         radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
346                               buffers->desc.buffer, RADEON_USAGE_READWRITE,
347                               RADEON_PRIO_SHADER_DATA);
348 }
349
350 /* VERTEX BUFFERS */
351
352 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
353 {
354         struct si_descriptors *desc = &sctx->vertex_buffers;
355         int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
356         int i;
357
358         for (i = 0; i < count; i++) {
359                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
360
361                 if (vb >= Elements(sctx->vertex_buffer))
362                         continue;
363                 if (!sctx->vertex_buffer[vb].buffer)
364                         continue;
365
366                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
367                                       (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
368                                       RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
369         }
370
371         if (!desc->buffer)
372                 return;
373         radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
374                               desc->buffer, RADEON_USAGE_READ,
375                               RADEON_PRIO_SHADER_DATA);
376 }
377
378 static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
379 {
380         struct si_descriptors *desc = &sctx->vertex_buffers;
381         bool bound[SI_NUM_VERTEX_BUFFERS] = {};
382         unsigned i, count = sctx->vertex_elements->count;
383         uint64_t va;
384         uint32_t *ptr;
385
386         if (!sctx->vertex_buffers_dirty)
387                 return true;
388         if (!count || !sctx->vertex_elements)
389                 return true;
390
391         /* Vertex buffer descriptors are the only ones which are uploaded
392          * directly through a staging buffer and don't go through
393          * the fine-grained upload path.
394          */
395         u_upload_alloc(sctx->b.uploader, 0, count * 16, &desc->buffer_offset,
396                        (struct pipe_resource**)&desc->buffer, (void**)&ptr);
397         if (!desc->buffer)
398                 return false;
399
400         radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
401                               desc->buffer, RADEON_USAGE_READ,
402                               RADEON_PRIO_SHADER_DATA);
403
404         assert(count <= SI_NUM_VERTEX_BUFFERS);
405
406         for (i = 0; i < count; i++) {
407                 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
408                 struct pipe_vertex_buffer *vb;
409                 struct r600_resource *rbuffer;
410                 unsigned offset;
411                 uint32_t *desc = &ptr[i*4];
412
413                 if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
414                         memset(desc, 0, 16);
415                         continue;
416                 }
417
418                 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
419                 rbuffer = (struct r600_resource*)vb->buffer;
420                 if (rbuffer == NULL) {
421                         memset(desc, 0, 16);
422                         continue;
423                 }
424
425                 offset = vb->buffer_offset + ve->src_offset;
426                 va = rbuffer->gpu_address + offset;
427
428                 /* Fill in T# buffer resource description */
429                 desc[0] = va;
430                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
431                           S_008F04_STRIDE(vb->stride);
432
433                 if (sctx->b.chip_class <= CIK && vb->stride)
434                         /* Round up by rounding down and adding 1 */
435                         desc[2] = (vb->buffer->width0 - offset -
436                                    sctx->vertex_elements->format_size[i]) /
437                                   vb->stride + 1;
438                 else
439                         desc[2] = vb->buffer->width0 - offset;
440
441                 desc[3] = sctx->vertex_elements->rsrc_word3[i];
442
443                 if (!bound[ve->vertex_buffer_index]) {
444                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
445                                               (struct r600_resource*)vb->buffer,
446                                               RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
447                         bound[ve->vertex_buffer_index] = true;
448                 }
449         }
450
451         /* Don't flush the const cache. It would have a very negative effect
452          * on performance (confirmed by testing). New descriptors are always
453          * uploaded to a fresh new buffer, so I don't think flushing the const
454          * cache is needed. */
455         desc->pointer_dirty = true;
456         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
457         sctx->vertex_buffers_dirty = false;
458         return true;
459 }
460
461
462 /* CONSTANT BUFFERS */
463
464 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
465                             const uint8_t *ptr, unsigned size, uint32_t *const_offset)
466 {
467         void *tmp;
468
469         u_upload_alloc(sctx->b.uploader, 0, size, const_offset,
470                        (struct pipe_resource**)rbuffer, &tmp);
471         util_memcpy_cpu_to_le32(tmp, ptr, size);
472 }
473
474 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
475                                    struct pipe_constant_buffer *input)
476 {
477         struct si_context *sctx = (struct si_context *)ctx;
478         struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
479
480         if (shader >= SI_NUM_SHADERS)
481                 return;
482
483         assert(slot < buffers->desc.num_elements);
484         pipe_resource_reference(&buffers->buffers[slot], NULL);
485
486         /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
487          * with a NULL buffer). We need to use a dummy buffer instead. */
488         if (sctx->b.chip_class == CIK &&
489             (!input || (!input->buffer && !input->user_buffer)))
490                 input = &sctx->null_const_buf;
491
492         if (input && (input->buffer || input->user_buffer)) {
493                 struct pipe_resource *buffer = NULL;
494                 uint64_t va;
495
496                 /* Upload the user buffer if needed. */
497                 if (input->user_buffer) {
498                         unsigned buffer_offset;
499
500                         si_upload_const_buffer(sctx,
501                                                (struct r600_resource**)&buffer, input->user_buffer,
502                                                input->buffer_size, &buffer_offset);
503                         va = r600_resource(buffer)->gpu_address + buffer_offset;
504                 } else {
505                         pipe_resource_reference(&buffer, input->buffer);
506                         va = r600_resource(buffer)->gpu_address + input->buffer_offset;
507                 }
508
509                 /* Set the descriptor. */
510                 uint32_t *desc = buffers->desc.list + slot*4;
511                 desc[0] = va;
512                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
513                           S_008F04_STRIDE(0);
514                 desc[2] = input->buffer_size;
515                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
516                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
517                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
518                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
519                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
520                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
521
522                 buffers->buffers[slot] = buffer;
523                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
524                                       (struct r600_resource*)buffer,
525                                       buffers->shader_usage, buffers->priority);
526                 buffers->desc.enabled_mask |= 1llu << slot;
527         } else {
528                 /* Clear the descriptor. */
529                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
530                 buffers->desc.enabled_mask &= ~(1llu << slot);
531         }
532
533         buffers->desc.list_dirty = true;
534 }
535
536 /* RING BUFFERS */
537
538 void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
539                         struct pipe_resource *buffer,
540                         unsigned stride, unsigned num_records,
541                         bool add_tid, bool swizzle,
542                         unsigned element_size, unsigned index_stride, uint64_t offset)
543 {
544         struct si_context *sctx = (struct si_context *)ctx;
545         struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
546
547         if (shader >= SI_NUM_SHADERS)
548                 return;
549
550         /* The stride field in the resource descriptor has 14 bits */
551         assert(stride < (1 << 14));
552
553         assert(slot < buffers->desc.num_elements);
554         pipe_resource_reference(&buffers->buffers[slot], NULL);
555
556         if (buffer) {
557                 uint64_t va;
558
559                 va = r600_resource(buffer)->gpu_address + offset;
560
561                 switch (element_size) {
562                 default:
563                         assert(!"Unsupported ring buffer element size");
564                 case 0:
565                 case 2:
566                         element_size = 0;
567                         break;
568                 case 4:
569                         element_size = 1;
570                         break;
571                 case 8:
572                         element_size = 2;
573                         break;
574                 case 16:
575                         element_size = 3;
576                         break;
577                 }
578
579                 switch (index_stride) {
580                 default:
581                         assert(!"Unsupported ring buffer index stride");
582                 case 0:
583                 case 8:
584                         index_stride = 0;
585                         break;
586                 case 16:
587                         index_stride = 1;
588                         break;
589                 case 32:
590                         index_stride = 2;
591                         break;
592                 case 64:
593                         index_stride = 3;
594                         break;
595                 }
596
597                 if (sctx->b.chip_class >= VI && stride)
598                         num_records *= stride;
599
600                 /* Set the descriptor. */
601                 uint32_t *desc = buffers->desc.list + slot*4;
602                 desc[0] = va;
603                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
604                           S_008F04_STRIDE(stride) |
605                           S_008F04_SWIZZLE_ENABLE(swizzle);
606                 desc[2] = num_records;
607                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
608                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
609                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
610                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
611                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
612                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
613                           S_008F0C_ELEMENT_SIZE(element_size) |
614                           S_008F0C_INDEX_STRIDE(index_stride) |
615                           S_008F0C_ADD_TID_ENABLE(add_tid);
616
617                 pipe_resource_reference(&buffers->buffers[slot], buffer);
618                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
619                                       (struct r600_resource*)buffer,
620                                       buffers->shader_usage, buffers->priority);
621                 buffers->desc.enabled_mask |= 1llu << slot;
622         } else {
623                 /* Clear the descriptor. */
624                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
625                 buffers->desc.enabled_mask &= ~(1llu << slot);
626         }
627
628         buffers->desc.list_dirty = true;
629 }
630
631 /* STREAMOUT BUFFERS */
632
633 static void si_set_streamout_targets(struct pipe_context *ctx,
634                                      unsigned num_targets,
635                                      struct pipe_stream_output_target **targets,
636                                      const unsigned *offsets)
637 {
638         struct si_context *sctx = (struct si_context *)ctx;
639         struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
640         unsigned old_num_targets = sctx->b.streamout.num_targets;
641         unsigned i, bufidx;
642
643         /* We are going to unbind the buffers. Mark which caches need to be flushed. */
644         if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
645                 /* Since streamout uses vector writes which go through TC L2
646                  * and most other clients can use TC L2 as well, we don't need
647                  * to flush it.
648                  *
649                  * The only case which requires flushing it is VGT DMA index
650                  * fetching, which is a rare case. Thus, flag the TC L2
651                  * dirtiness in the resource and handle it when index fetching
652                  * is used.
653                  */
654                 for (i = 0; i < sctx->b.streamout.num_targets; i++)
655                         if (sctx->b.streamout.targets[i])
656                                 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
657
658                 /* Invalidate the scalar cache in case a streamout buffer is
659                  * going to be used as a constant buffer.
660                  *
661                  * Invalidate TC L1, because streamout bypasses it (done by
662                  * setting GLC=1 in the store instruction), but it can contain
663                  * outdated data of streamout buffers.
664                  *
665                  * VS_PARTIAL_FLUSH is required if the buffers are going to be
666                  * used as an input immediately.
667                  */
668                 sctx->b.flags |= SI_CONTEXT_INV_KCACHE |
669                                  SI_CONTEXT_INV_TC_L1 |
670                                  SI_CONTEXT_VS_PARTIAL_FLUSH;
671         }
672
673         /* Streamout buffers must be bound in 2 places:
674          * 1) in VGT by setting the VGT_STRMOUT registers
675          * 2) as shader resources
676          */
677
678         /* Set the VGT regs. */
679         r600_set_streamout_targets(ctx, num_targets, targets, offsets);
680
681         /* Set the shader resources.*/
682         for (i = 0; i < num_targets; i++) {
683                 bufidx = SI_SO_BUF_OFFSET + i;
684
685                 if (targets[i]) {
686                         struct pipe_resource *buffer = targets[i]->buffer;
687                         uint64_t va = r600_resource(buffer)->gpu_address;
688
689                         /* Set the descriptor.
690                          *
691                          * On VI, the format must be non-INVALID, otherwise
692                          * the buffer will be considered not bound and store
693                          * instructions will be no-ops.
694                          */
695                         uint32_t *desc = buffers->desc.list + bufidx*4;
696                         desc[0] = va;
697                         desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
698                         desc[2] = 0xffffffff;
699                         desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
700                                   S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
701                                   S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
702                                   S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
703                                   S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
704
705                         /* Set the resource. */
706                         pipe_resource_reference(&buffers->buffers[bufidx],
707                                                 buffer);
708                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
709                                               (struct r600_resource*)buffer,
710                                               buffers->shader_usage, buffers->priority);
711                         buffers->desc.enabled_mask |= 1llu << bufidx;
712                 } else {
713                         /* Clear the descriptor and unset the resource. */
714                         memset(buffers->desc.list + bufidx*4, 0,
715                                sizeof(uint32_t) * 4);
716                         pipe_resource_reference(&buffers->buffers[bufidx],
717                                                 NULL);
718                         buffers->desc.enabled_mask &= ~(1llu << bufidx);
719                 }
720         }
721         for (; i < old_num_targets; i++) {
722                 bufidx = SI_SO_BUF_OFFSET + i;
723                 /* Clear the descriptor and unset the resource. */
724                 memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
725                 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
726                 buffers->desc.enabled_mask &= ~(1llu << bufidx);
727         }
728
729         buffers->desc.list_dirty = true;
730 }
731
732 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
733                                         uint32_t *desc, uint64_t old_buf_va,
734                                         struct pipe_resource *new_buf)
735 {
736         /* Retrieve the buffer offset from the descriptor. */
737         uint64_t old_desc_va =
738                 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
739
740         assert(old_buf_va <= old_desc_va);
741         uint64_t offset_within_buffer = old_desc_va - old_buf_va;
742
743         /* Update the descriptor. */
744         uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
745
746         desc[0] = va;
747         desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
748                   S_008F04_BASE_ADDRESS_HI(va >> 32);
749 }
750
751 /* BUFFER DISCARD/INVALIDATION */
752
753 /* Reallocate a buffer a update all resource bindings where the buffer is
754  * bound.
755  *
756  * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
757  * idle by discarding its contents. Apps usually tell us when to do this using
758  * map_buffer flags, for example.
759  */
760 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
761 {
762         struct si_context *sctx = (struct si_context*)ctx;
763         struct r600_resource *rbuffer = r600_resource(buf);
764         unsigned i, shader, alignment = rbuffer->buf->alignment;
765         uint64_t old_va = rbuffer->gpu_address;
766         unsigned num_elems = sctx->vertex_elements ?
767                                        sctx->vertex_elements->count : 0;
768         struct si_sampler_view *view;
769
770         /* Reallocate the buffer in the same pipe_resource. */
771         r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
772                            alignment, TRUE);
773
774         /* We changed the buffer, now we need to bind it where the old one
775          * was bound. This consists of 2 things:
776          *   1) Updating the resource descriptor and dirtying it.
777          *   2) Adding a relocation to the CS, so that it's usable.
778          */
779
780         /* Vertex buffers. */
781         for (i = 0; i < num_elems; i++) {
782                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
783
784                 if (vb >= Elements(sctx->vertex_buffer))
785                         continue;
786                 if (!sctx->vertex_buffer[vb].buffer)
787                         continue;
788
789                 if (sctx->vertex_buffer[vb].buffer == buf) {
790                         sctx->vertex_buffers_dirty = true;
791                         break;
792                 }
793         }
794
795         /* Read/Write buffers. */
796         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
797                 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
798                 uint64_t mask = buffers->desc.enabled_mask;
799
800                 while (mask) {
801                         i = u_bit_scan64(&mask);
802                         if (buffers->buffers[i] == buf) {
803                                 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
804                                                             old_va, buf);
805                                 buffers->desc.list_dirty = true;
806
807                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
808                                                       rbuffer, buffers->shader_usage,
809                                                       buffers->priority);
810
811                                 if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
812                                         /* Update the streamout state. */
813                                         if (sctx->b.streamout.begin_emitted) {
814                                                 r600_emit_streamout_end(&sctx->b);
815                                         }
816                                         sctx->b.streamout.append_bitmask =
817                                                 sctx->b.streamout.enabled_mask;
818                                         r600_streamout_buffers_dirty(&sctx->b);
819                                 }
820                         }
821                 }
822         }
823
824         /* Constant buffers. */
825         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
826                 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
827                 uint64_t mask = buffers->desc.enabled_mask;
828
829                 while (mask) {
830                         unsigned i = u_bit_scan64(&mask);
831                         if (buffers->buffers[i] == buf) {
832                                 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
833                                                             old_va, buf);
834                                 buffers->desc.list_dirty = true;
835
836                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
837                                                       rbuffer, buffers->shader_usage,
838                                                       buffers->priority);
839                         }
840                 }
841         }
842
843         /* Texture buffers - update virtual addresses in sampler view descriptors. */
844         LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
845                 if (view->base.texture == buf) {
846                         si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
847                 }
848         }
849         /* Texture buffers - update bindings. */
850         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
851                 struct si_sampler_views *views = &sctx->samplers[shader].views;
852                 uint64_t mask = views->desc.enabled_mask;
853
854                 while (mask) {
855                         unsigned i = u_bit_scan64(&mask);
856                         if (views->views[i]->texture == buf) {
857                                 si_desc_reset_buffer_offset(ctx, views->desc.list + i*8+4,
858                                                             old_va, buf);
859                                 views->desc.list_dirty = true;
860
861                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.rings.gfx,
862                                                       rbuffer, RADEON_USAGE_READ,
863                                                       RADEON_PRIO_SHADER_BUFFER_RO);
864                         }
865                 }
866         }
867 }
868
869 /* SHADER USER DATA */
870
871 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
872                                           unsigned shader)
873 {
874         sctx->const_buffers[shader].desc.pointer_dirty = true;
875         sctx->rw_buffers[shader].desc.pointer_dirty = true;
876         sctx->samplers[shader].views.desc.pointer_dirty = true;
877         sctx->samplers[shader].states.desc.pointer_dirty = true;
878
879         if (shader == PIPE_SHADER_VERTEX)
880                 sctx->vertex_buffers.pointer_dirty = true;
881
882         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
883 }
884
885 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
886 {
887         int i;
888
889         for (i = 0; i < SI_NUM_SHADERS; i++) {
890                 si_mark_shader_pointers_dirty(sctx, i);
891         }
892 }
893
894 /* Set a base register address for user data constants in the given shader.
895  * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
896  */
897 static void si_set_user_data_base(struct si_context *sctx,
898                                   unsigned shader, uint32_t new_base)
899 {
900         uint32_t *base = &sctx->shader_userdata.sh_base[shader];
901
902         if (*base != new_base) {
903                 *base = new_base;
904
905                 if (new_base)
906                         si_mark_shader_pointers_dirty(sctx, shader);
907         }
908 }
909
910 /* This must be called when these shaders are changed from non-NULL to NULL
911  * and vice versa:
912  * - geometry shader
913  * - tessellation control shader
914  * - tessellation evaluation shader
915  */
916 void si_shader_change_notify(struct si_context *sctx)
917 {
918         /* VS can be bound as VS, ES, or LS. */
919         if (sctx->tes_shader)
920                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
921                                       R_00B530_SPI_SHADER_USER_DATA_LS_0);
922         else if (sctx->gs_shader)
923                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
924                                       R_00B330_SPI_SHADER_USER_DATA_ES_0);
925         else
926                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
927                                       R_00B130_SPI_SHADER_USER_DATA_VS_0);
928
929         /* TES can be bound as ES, VS, or not bound. */
930         if (sctx->tes_shader) {
931                 if (sctx->gs_shader)
932                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
933                                               R_00B330_SPI_SHADER_USER_DATA_ES_0);
934                 else
935                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
936                                               R_00B130_SPI_SHADER_USER_DATA_VS_0);
937         } else {
938                 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
939         }
940 }
941
942 static void si_emit_shader_pointer(struct si_context *sctx,
943                                    struct si_descriptors *desc,
944                                    unsigned sh_base, bool keep_dirty)
945 {
946         struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
947         uint64_t va;
948
949         if (!desc->pointer_dirty || !desc->buffer)
950                 return;
951
952         va = desc->buffer->gpu_address +
953              desc->buffer_offset;
954
955         radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
956         radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
957         radeon_emit(cs, va);
958         radeon_emit(cs, va >> 32);
959
960         desc->pointer_dirty = keep_dirty;
961 }
962
963 void si_emit_shader_userdata(struct si_context *sctx, struct r600_atom *atom)
964 {
965         unsigned i;
966         uint32_t *sh_base = sctx->shader_userdata.sh_base;
967
968         if (sctx->gs_shader) {
969                 /* The VS copy shader needs these for clipping, streamout, and rings. */
970                 unsigned vs_base = R_00B130_SPI_SHADER_USER_DATA_VS_0;
971                 unsigned i = PIPE_SHADER_VERTEX;
972
973                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, vs_base, true);
974                 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, vs_base, true);
975
976                 /* The TESSEVAL shader needs this for the ESGS ring buffer. */
977                 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc,
978                                        R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
979         } else if (sctx->tes_shader) {
980                 /* The TESSEVAL shader needs this for streamout. */
981                 si_emit_shader_pointer(sctx, &sctx->rw_buffers[PIPE_SHADER_VERTEX].desc,
982                                        R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
983         }
984
985         for (i = 0; i < SI_NUM_SHADERS; i++) {
986                 unsigned base = sh_base[i];
987
988                 if (!base)
989                         continue;
990
991                 if (i != PIPE_SHADER_TESS_EVAL)
992                         si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, base, false);
993
994                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
995                 si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
996                 si_emit_shader_pointer(sctx, &sctx->samplers[i].states.desc, base, false);
997         }
998         si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
999 }
1000
1001 /* INIT/DEINIT/UPLOAD */
1002
1003 void si_init_all_descriptors(struct si_context *sctx)
1004 {
1005         int i;
1006
1007         for (i = 0; i < SI_NUM_SHADERS; i++) {
1008                 si_init_buffer_resources(&sctx->const_buffers[i],
1009                                          SI_NUM_CONST_BUFFERS, SI_SGPR_CONST,
1010                                          RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
1011                 si_init_buffer_resources(&sctx->rw_buffers[i],
1012                                          SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1013                                          RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW);
1014
1015                 si_init_descriptors(&sctx->samplers[i].views.desc,
1016                                     SI_SGPR_RESOURCE, 8, SI_NUM_SAMPLER_VIEWS);
1017                 si_init_descriptors(&sctx->samplers[i].states.desc,
1018                                     SI_SGPR_SAMPLER, 4, SI_NUM_SAMPLER_STATES);
1019         }
1020
1021         si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFER,
1022                             4, SI_NUM_VERTEX_BUFFERS);
1023
1024         /* Set pipe_context functions. */
1025         sctx->b.b.set_constant_buffer = si_set_constant_buffer;
1026         sctx->b.b.set_sampler_views = si_set_sampler_views;
1027         sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1028         sctx->b.invalidate_buffer = si_invalidate_buffer;
1029
1030         /* Shader user data. */
1031         si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1032                      si_emit_shader_userdata);
1033
1034         /* Set default and immutable mappings. */
1035         si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1036         si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1037         si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1038         si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1039 }
1040
1041 bool si_upload_shader_descriptors(struct si_context *sctx)
1042 {
1043         int i;
1044
1045         for (i = 0; i < SI_NUM_SHADERS; i++) {
1046                 if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc) ||
1047                     !si_upload_descriptors(sctx, &sctx->rw_buffers[i].desc) ||
1048                     !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc) ||
1049                     !si_upload_descriptors(sctx, &sctx->samplers[i].states.desc))
1050                         return false;
1051         }
1052         return si_upload_vertex_buffer_descriptors(sctx);
1053 }
1054
1055 void si_release_all_descriptors(struct si_context *sctx)
1056 {
1057         int i;
1058
1059         for (i = 0; i < SI_NUM_SHADERS; i++) {
1060                 si_release_buffer_resources(&sctx->const_buffers[i]);
1061                 si_release_buffer_resources(&sctx->rw_buffers[i]);
1062                 si_release_sampler_views(&sctx->samplers[i].views);
1063                 si_release_descriptors(&sctx->samplers[i].states.desc);
1064         }
1065         si_release_descriptors(&sctx->vertex_buffers);
1066 }
1067
1068 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1069 {
1070         int i;
1071
1072         for (i = 0; i < SI_NUM_SHADERS; i++) {
1073                 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1074                 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
1075                 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1076                 si_sampler_states_begin_new_cs(sctx, &sctx->samplers[i].states);
1077         }
1078         si_vertex_buffers_begin_new_cs(sctx);
1079         si_shader_userdata_begin_new_cs(sctx);
1080 }