OSDN Git Service

radeonsi: put image, fmask, and sampler descriptors into one array
[android-x86/external-mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Marek Olšák <marek.olsak@amd.com>
25  */
26
27 /* Resource binding slots and sampler states (each described with 8 or
28  * 4 dwords) are stored in lists in memory which is accessed by shaders
29  * using scalar load instructions.
30  *
31  * This file is responsible for managing such lists. It keeps a copy of all
32  * descriptors in CPU memory and re-uploads a whole list if some slots have
33  * been changed.
34  *
35  * This code is also reponsible for updating shader pointers to those lists.
36  *
37  * Note that CP DMA can't be used for updating the lists, because a GPU hang
38  * could leave the list in a mid-IB state and the next IB would get wrong
39  * descriptors and the whole context would be unusable at that point.
40  * (Note: The register shadowing can't be used due to the same reason)
41  *
42  * Also, uploading descriptors to newly allocated memory doesn't require
43  * a KCACHE flush.
44  *
45  *
46  * Possible scenarios for one 16 dword image+sampler slot:
47  *
48  *       | Image        | w/ FMASK   | Buffer       | NULL
49  * [ 0: 3] Image[0:3]   | Image[0:3] | Null[0:3]    | Null[0:3]
50  * [ 4: 7] Image[4:7]   | Image[4:7] | Buffer[0:3]  | 0
51  * [ 8:11] Null[0:3]    | Fmask[0:3] | Null[0:3]    | Null[0:3]
52  * [12:15] Sampler[0:3] | Fmask[4:7] | Sampler[0:3] | Sampler[0:3]
53  *
54  * FMASK implies MSAA, therefore no sampler state.
55  * Sampler states are never unbound except when FMASK is bound.
56  */
57
58 #include "radeon/r600_cs.h"
59 #include "si_pipe.h"
60 #include "si_shader.h"
61 #include "sid.h"
62
63 #include "util/u_memory.h"
64 #include "util/u_upload_mgr.h"
65
66
67 /* NULL image and buffer descriptor.
68  *
69  * For images, all fields must be zero except for the swizzle, which
70  * supports arbitrary combinations of 0s and 1s. The texture type must be
71  * any valid type (e.g. 1D). If the texture type isn't set, the hw hangs.
72  *
73  * For buffers, all fields must be zero. If they are not, the hw hangs.
74  *
75  * This is the only reason why the buffer descriptor must be in words [4:7].
76  */
77 static uint32_t null_descriptor[8] = {
78         0,
79         0,
80         0,
81         S_008F1C_DST_SEL_W(V_008F1C_SQ_SEL_1) |
82         S_008F1C_TYPE(V_008F1C_SQ_RSRC_IMG_1D)
83         /* the rest must contain zeros, which is also used by the buffer
84          * descriptor */
85 };
86
87 static void si_init_descriptors(struct si_descriptors *desc,
88                                 unsigned shader_userdata_index,
89                                 unsigned element_dw_size,
90                                 unsigned num_elements)
91 {
92         int i;
93
94         assert(num_elements <= sizeof(desc->enabled_mask)*8);
95
96         desc->list = CALLOC(num_elements, element_dw_size * 4);
97         desc->element_dw_size = element_dw_size;
98         desc->num_elements = num_elements;
99         desc->list_dirty = true; /* upload the list before the next draw */
100         desc->shader_userdata_offset = shader_userdata_index * 4;
101
102         /* Initialize the array to NULL descriptors if the element size is 8. */
103         if (element_dw_size % 8 == 0)
104                 for (i = 0; i < num_elements * element_dw_size / 8; i++)
105                         memcpy(desc->list + i*8, null_descriptor,
106                                sizeof(null_descriptor));
107 }
108
109 static void si_release_descriptors(struct si_descriptors *desc)
110 {
111         pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
112         FREE(desc->list);
113 }
114
115 static bool si_upload_descriptors(struct si_context *sctx,
116                                   struct si_descriptors *desc)
117 {
118         unsigned list_size = desc->num_elements * desc->element_dw_size * 4;
119         void *ptr;
120
121         if (!desc->list_dirty)
122                 return true;
123
124         u_upload_alloc(sctx->b.uploader, 0, list_size, 256,
125                        &desc->buffer_offset,
126                        (struct pipe_resource**)&desc->buffer, &ptr);
127         if (!desc->buffer)
128                 return false; /* skip the draw call */
129
130         util_memcpy_cpu_to_le32(ptr, desc->list, list_size);
131
132         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, desc->buffer,
133                               RADEON_USAGE_READ, RADEON_PRIO_DESCRIPTORS);
134
135         desc->list_dirty = false;
136         desc->pointer_dirty = true;
137         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
138         return true;
139 }
140
141 /* SAMPLER VIEWS */
142
143 static void si_release_sampler_views(struct si_sampler_views *views)
144 {
145         int i;
146
147         for (i = 0; i < Elements(views->views); i++) {
148                 pipe_sampler_view_reference(&views->views[i], NULL);
149         }
150         si_release_descriptors(&views->desc);
151 }
152
153 static void si_sampler_view_add_buffers(struct si_context *sctx,
154                                         struct si_sampler_view *rview)
155 {
156         if (rview->resource) {
157                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
158                         rview->resource, RADEON_USAGE_READ,
159                         r600_get_sampler_view_priority(rview->resource));
160         }
161
162         if (rview->dcc_buffer && rview->dcc_buffer != rview->resource) {
163                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
164                         rview->dcc_buffer, RADEON_USAGE_READ,
165                         RADEON_PRIO_DCC);
166         }
167 }
168
169 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
170                                           struct si_sampler_views *views)
171 {
172         uint64_t mask = views->desc.enabled_mask;
173
174         /* Add buffers to the CS. */
175         while (mask) {
176                 int i = u_bit_scan64(&mask);
177                 struct si_sampler_view *rview =
178                         (struct si_sampler_view*)views->views[i];
179
180                 si_sampler_view_add_buffers(sctx, rview);
181         }
182
183         if (!views->desc.buffer)
184                 return;
185         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx, views->desc.buffer,
186                               RADEON_USAGE_READWRITE, RADEON_PRIO_DESCRIPTORS);
187 }
188
189 static void si_set_sampler_view(struct si_context *sctx,
190                                 struct si_sampler_views *views,
191                                 unsigned slot, struct pipe_sampler_view *view)
192 {
193         if (views->views[slot] == view)
194                 return;
195
196         if (view) {
197                 struct si_sampler_view *rview =
198                         (struct si_sampler_view*)view;
199                 struct r600_texture *rtex = (struct r600_texture*)view->texture;
200
201                 si_sampler_view_add_buffers(sctx, rview);
202
203                 pipe_sampler_view_reference(&views->views[slot], view);
204                 memcpy(views->desc.list + slot * 16, rview->state, 8*4);
205
206                 if (rtex && rtex->fmask.size) {
207                         memcpy(views->desc.list + slot*16 + 8,
208                                rview->fmask_state, 8*4);
209                 } else {
210                         /* Disable FMASK and bind sampler state in [12:15]. */
211                         memcpy(views->desc.list + slot*16 + 8,
212                                null_descriptor, 4*4);
213
214                         if (views->sampler_states[slot])
215                                 memcpy(views->desc.list + slot*16 + 12,
216                                        views->sampler_states[slot], 4*4);
217                 }
218
219                 views->desc.enabled_mask |= 1llu << slot;
220         } else {
221                 pipe_sampler_view_reference(&views->views[slot], NULL);
222                 memcpy(views->desc.list + slot*16, null_descriptor, 8*4);
223                 /* Only clear the lower dwords of FMASK. */
224                 memcpy(views->desc.list + slot*16 + 8, null_descriptor, 4*4);
225                 views->desc.enabled_mask &= ~(1llu << slot);
226         }
227
228         views->desc.list_dirty = true;
229 }
230
231 static void si_set_sampler_views(struct pipe_context *ctx,
232                                  unsigned shader, unsigned start,
233                                  unsigned count,
234                                  struct pipe_sampler_view **views)
235 {
236         struct si_context *sctx = (struct si_context *)ctx;
237         struct si_textures_info *samplers = &sctx->samplers[shader];
238         int i;
239
240         if (!count || shader >= SI_NUM_SHADERS)
241                 return;
242
243         for (i = 0; i < count; i++) {
244                 unsigned slot = start + i;
245
246                 if (!views || !views[i]) {
247                         samplers->depth_texture_mask &= ~(1 << slot);
248                         samplers->compressed_colortex_mask &= ~(1 << slot);
249                         si_set_sampler_view(sctx, &samplers->views, slot, NULL);
250                         continue;
251                 }
252
253                 si_set_sampler_view(sctx, &samplers->views, slot, views[i]);
254
255                 if (views[i]->texture && views[i]->texture->target != PIPE_BUFFER) {
256                         struct r600_texture *rtex =
257                                 (struct r600_texture*)views[i]->texture;
258
259                         if (rtex->is_depth && !rtex->is_flushing_texture) {
260                                 samplers->depth_texture_mask |= 1 << slot;
261                         } else {
262                                 samplers->depth_texture_mask &= ~(1 << slot);
263                         }
264                         if (rtex->cmask.size || rtex->fmask.size ||
265                             (rtex->dcc_buffer && rtex->dirty_level_mask)) {
266                                 samplers->compressed_colortex_mask |= 1 << slot;
267                         } else {
268                                 samplers->compressed_colortex_mask &= ~(1 << slot);
269                         }
270                 } else {
271                         samplers->depth_texture_mask &= ~(1 << slot);
272                         samplers->compressed_colortex_mask &= ~(1 << slot);
273                 }
274         }
275 }
276
277 /* SAMPLER STATES */
278
279 static void si_bind_sampler_states(struct pipe_context *ctx, unsigned shader,
280                                    unsigned start, unsigned count, void **states)
281 {
282         struct si_context *sctx = (struct si_context *)ctx;
283         struct si_textures_info *samplers = &sctx->samplers[shader];
284         struct si_descriptors *desc = &samplers->views.desc;
285         struct si_sampler_state **sstates = (struct si_sampler_state**)states;
286         int i;
287
288         if (!count || shader >= SI_NUM_SHADERS)
289                 return;
290
291         for (i = 0; i < count; i++) {
292                 unsigned slot = start + i;
293
294                 if (!sstates[i] ||
295                     sstates[i] == samplers->views.sampler_states[slot])
296                         continue;
297
298                 samplers->views.sampler_states[slot] = sstates[i];
299
300                 /* If FMASK is bound, don't overwrite it.
301                  * The sampler state will be set after FMASK is unbound.
302                  */
303                 if (samplers->views.views[i] &&
304                     samplers->views.views[i]->texture &&
305                     ((struct r600_texture*)samplers->views.views[i]->texture)->fmask.size)
306                         continue;
307
308                 memcpy(desc->list + slot * 16 + 12, sstates[i]->val, 4*4);
309                 desc->list_dirty = true;
310         }
311 }
312
313 /* BUFFER RESOURCES */
314
315 static void si_init_buffer_resources(struct si_buffer_resources *buffers,
316                                      unsigned num_buffers,
317                                      unsigned shader_userdata_index,
318                                      enum radeon_bo_usage shader_usage,
319                                      enum radeon_bo_priority priority)
320 {
321         buffers->shader_usage = shader_usage;
322         buffers->priority = priority;
323         buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
324
325         si_init_descriptors(&buffers->desc, shader_userdata_index, 4,
326                             num_buffers);
327 }
328
329 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
330 {
331         int i;
332
333         for (i = 0; i < buffers->desc.num_elements; i++) {
334                 pipe_resource_reference(&buffers->buffers[i], NULL);
335         }
336
337         FREE(buffers->buffers);
338         si_release_descriptors(&buffers->desc);
339 }
340
341 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
342                                              struct si_buffer_resources *buffers)
343 {
344         uint64_t mask = buffers->desc.enabled_mask;
345
346         /* Add buffers to the CS. */
347         while (mask) {
348                 int i = u_bit_scan64(&mask);
349
350                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
351                                       (struct r600_resource*)buffers->buffers[i],
352                                       buffers->shader_usage, buffers->priority);
353         }
354
355         if (!buffers->desc.buffer)
356                 return;
357         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
358                               buffers->desc.buffer, RADEON_USAGE_READWRITE,
359                               RADEON_PRIO_DESCRIPTORS);
360 }
361
362 /* VERTEX BUFFERS */
363
364 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
365 {
366         struct si_descriptors *desc = &sctx->vertex_buffers;
367         int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
368         int i;
369
370         for (i = 0; i < count; i++) {
371                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
372
373                 if (vb >= Elements(sctx->vertex_buffer))
374                         continue;
375                 if (!sctx->vertex_buffer[vb].buffer)
376                         continue;
377
378                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
379                                       (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
380                                       RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
381         }
382
383         if (!desc->buffer)
384                 return;
385         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
386                               desc->buffer, RADEON_USAGE_READ,
387                               RADEON_PRIO_DESCRIPTORS);
388 }
389
390 static bool si_upload_vertex_buffer_descriptors(struct si_context *sctx)
391 {
392         struct si_descriptors *desc = &sctx->vertex_buffers;
393         bool bound[SI_NUM_VERTEX_BUFFERS] = {};
394         unsigned i, count = sctx->vertex_elements->count;
395         uint64_t va;
396         uint32_t *ptr;
397
398         if (!sctx->vertex_buffers_dirty)
399                 return true;
400         if (!count || !sctx->vertex_elements)
401                 return true;
402
403         /* Vertex buffer descriptors are the only ones which are uploaded
404          * directly through a staging buffer and don't go through
405          * the fine-grained upload path.
406          */
407         u_upload_alloc(sctx->b.uploader, 0, count * 16, 256, &desc->buffer_offset,
408                        (struct pipe_resource**)&desc->buffer, (void**)&ptr);
409         if (!desc->buffer)
410                 return false;
411
412         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
413                               desc->buffer, RADEON_USAGE_READ,
414                               RADEON_PRIO_DESCRIPTORS);
415
416         assert(count <= SI_NUM_VERTEX_BUFFERS);
417
418         for (i = 0; i < count; i++) {
419                 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
420                 struct pipe_vertex_buffer *vb;
421                 struct r600_resource *rbuffer;
422                 unsigned offset;
423                 uint32_t *desc = &ptr[i*4];
424
425                 if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
426                         memset(desc, 0, 16);
427                         continue;
428                 }
429
430                 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
431                 rbuffer = (struct r600_resource*)vb->buffer;
432                 if (!rbuffer) {
433                         memset(desc, 0, 16);
434                         continue;
435                 }
436
437                 offset = vb->buffer_offset + ve->src_offset;
438                 va = rbuffer->gpu_address + offset;
439
440                 /* Fill in T# buffer resource description */
441                 desc[0] = va;
442                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
443                           S_008F04_STRIDE(vb->stride);
444
445                 if (sctx->b.chip_class <= CIK && vb->stride)
446                         /* Round up by rounding down and adding 1 */
447                         desc[2] = (vb->buffer->width0 - offset -
448                                    sctx->vertex_elements->format_size[i]) /
449                                   vb->stride + 1;
450                 else
451                         desc[2] = vb->buffer->width0 - offset;
452
453                 desc[3] = sctx->vertex_elements->rsrc_word3[i];
454
455                 if (!bound[ve->vertex_buffer_index]) {
456                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
457                                               (struct r600_resource*)vb->buffer,
458                                               RADEON_USAGE_READ, RADEON_PRIO_VERTEX_BUFFER);
459                         bound[ve->vertex_buffer_index] = true;
460                 }
461         }
462
463         /* Don't flush the const cache. It would have a very negative effect
464          * on performance (confirmed by testing). New descriptors are always
465          * uploaded to a fresh new buffer, so I don't think flushing the const
466          * cache is needed. */
467         desc->pointer_dirty = true;
468         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
469         sctx->vertex_buffers_dirty = false;
470         return true;
471 }
472
473
474 /* CONSTANT BUFFERS */
475
476 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
477                             const uint8_t *ptr, unsigned size, uint32_t *const_offset)
478 {
479         void *tmp;
480
481         u_upload_alloc(sctx->b.uploader, 0, size, 256, const_offset,
482                        (struct pipe_resource**)rbuffer, &tmp);
483         if (rbuffer)
484                 util_memcpy_cpu_to_le32(tmp, ptr, size);
485 }
486
487 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
488                                    struct pipe_constant_buffer *input)
489 {
490         struct si_context *sctx = (struct si_context *)ctx;
491         struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
492
493         if (shader >= SI_NUM_SHADERS)
494                 return;
495
496         assert(slot < buffers->desc.num_elements);
497         pipe_resource_reference(&buffers->buffers[slot], NULL);
498
499         /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
500          * with a NULL buffer). We need to use a dummy buffer instead. */
501         if (sctx->b.chip_class == CIK &&
502             (!input || (!input->buffer && !input->user_buffer)))
503                 input = &sctx->null_const_buf;
504
505         if (input && (input->buffer || input->user_buffer)) {
506                 struct pipe_resource *buffer = NULL;
507                 uint64_t va;
508
509                 /* Upload the user buffer if needed. */
510                 if (input->user_buffer) {
511                         unsigned buffer_offset;
512
513                         si_upload_const_buffer(sctx,
514                                                (struct r600_resource**)&buffer, input->user_buffer,
515                                                input->buffer_size, &buffer_offset);
516                         if (!buffer) {
517                                 /* Just unbind on failure. */
518                                 si_set_constant_buffer(ctx, shader, slot, NULL);
519                                 return;
520                         }
521                         va = r600_resource(buffer)->gpu_address + buffer_offset;
522                 } else {
523                         pipe_resource_reference(&buffer, input->buffer);
524                         va = r600_resource(buffer)->gpu_address + input->buffer_offset;
525                 }
526
527                 /* Set the descriptor. */
528                 uint32_t *desc = buffers->desc.list + slot*4;
529                 desc[0] = va;
530                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
531                           S_008F04_STRIDE(0);
532                 desc[2] = input->buffer_size;
533                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
534                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
535                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
536                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
537                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
538                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
539
540                 buffers->buffers[slot] = buffer;
541                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
542                                       (struct r600_resource*)buffer,
543                                       buffers->shader_usage, buffers->priority);
544                 buffers->desc.enabled_mask |= 1llu << slot;
545         } else {
546                 /* Clear the descriptor. */
547                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
548                 buffers->desc.enabled_mask &= ~(1llu << slot);
549         }
550
551         buffers->desc.list_dirty = true;
552 }
553
554 /* RING BUFFERS */
555
556 void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
557                         struct pipe_resource *buffer,
558                         unsigned stride, unsigned num_records,
559                         bool add_tid, bool swizzle,
560                         unsigned element_size, unsigned index_stride, uint64_t offset)
561 {
562         struct si_context *sctx = (struct si_context *)ctx;
563         struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
564
565         if (shader >= SI_NUM_SHADERS)
566                 return;
567
568         /* The stride field in the resource descriptor has 14 bits */
569         assert(stride < (1 << 14));
570
571         assert(slot < buffers->desc.num_elements);
572         pipe_resource_reference(&buffers->buffers[slot], NULL);
573
574         if (buffer) {
575                 uint64_t va;
576
577                 va = r600_resource(buffer)->gpu_address + offset;
578
579                 switch (element_size) {
580                 default:
581                         assert(!"Unsupported ring buffer element size");
582                 case 0:
583                 case 2:
584                         element_size = 0;
585                         break;
586                 case 4:
587                         element_size = 1;
588                         break;
589                 case 8:
590                         element_size = 2;
591                         break;
592                 case 16:
593                         element_size = 3;
594                         break;
595                 }
596
597                 switch (index_stride) {
598                 default:
599                         assert(!"Unsupported ring buffer index stride");
600                 case 0:
601                 case 8:
602                         index_stride = 0;
603                         break;
604                 case 16:
605                         index_stride = 1;
606                         break;
607                 case 32:
608                         index_stride = 2;
609                         break;
610                 case 64:
611                         index_stride = 3;
612                         break;
613                 }
614
615                 if (sctx->b.chip_class >= VI && stride)
616                         num_records *= stride;
617
618                 /* Set the descriptor. */
619                 uint32_t *desc = buffers->desc.list + slot*4;
620                 desc[0] = va;
621                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
622                           S_008F04_STRIDE(stride) |
623                           S_008F04_SWIZZLE_ENABLE(swizzle);
624                 desc[2] = num_records;
625                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
626                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
627                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
628                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
629                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
630                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
631                           S_008F0C_ELEMENT_SIZE(element_size) |
632                           S_008F0C_INDEX_STRIDE(index_stride) |
633                           S_008F0C_ADD_TID_ENABLE(add_tid);
634
635                 pipe_resource_reference(&buffers->buffers[slot], buffer);
636                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
637                                       (struct r600_resource*)buffer,
638                                       buffers->shader_usage, buffers->priority);
639                 buffers->desc.enabled_mask |= 1llu << slot;
640         } else {
641                 /* Clear the descriptor. */
642                 memset(buffers->desc.list + slot*4, 0, sizeof(uint32_t) * 4);
643                 buffers->desc.enabled_mask &= ~(1llu << slot);
644         }
645
646         buffers->desc.list_dirty = true;
647 }
648
649 /* STREAMOUT BUFFERS */
650
651 static void si_set_streamout_targets(struct pipe_context *ctx,
652                                      unsigned num_targets,
653                                      struct pipe_stream_output_target **targets,
654                                      const unsigned *offsets)
655 {
656         struct si_context *sctx = (struct si_context *)ctx;
657         struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
658         unsigned old_num_targets = sctx->b.streamout.num_targets;
659         unsigned i, bufidx;
660
661         /* We are going to unbind the buffers. Mark which caches need to be flushed. */
662         if (sctx->b.streamout.num_targets && sctx->b.streamout.begin_emitted) {
663                 /* Since streamout uses vector writes which go through TC L2
664                  * and most other clients can use TC L2 as well, we don't need
665                  * to flush it.
666                  *
667                  * The only case which requires flushing it is VGT DMA index
668                  * fetching, which is a rare case. Thus, flag the TC L2
669                  * dirtiness in the resource and handle it when index fetching
670                  * is used.
671                  */
672                 for (i = 0; i < sctx->b.streamout.num_targets; i++)
673                         if (sctx->b.streamout.targets[i])
674                                 r600_resource(sctx->b.streamout.targets[i]->b.buffer)->TC_L2_dirty = true;
675
676                 /* Invalidate the scalar cache in case a streamout buffer is
677                  * going to be used as a constant buffer.
678                  *
679                  * Invalidate TC L1, because streamout bypasses it (done by
680                  * setting GLC=1 in the store instruction), but it can contain
681                  * outdated data of streamout buffers.
682                  *
683                  * VS_PARTIAL_FLUSH is required if the buffers are going to be
684                  * used as an input immediately.
685                  */
686                 sctx->b.flags |= SI_CONTEXT_INV_SMEM_L1 |
687                                  SI_CONTEXT_INV_VMEM_L1 |
688                                  SI_CONTEXT_VS_PARTIAL_FLUSH;
689         }
690
691         /* Streamout buffers must be bound in 2 places:
692          * 1) in VGT by setting the VGT_STRMOUT registers
693          * 2) as shader resources
694          */
695
696         /* Set the VGT regs. */
697         r600_set_streamout_targets(ctx, num_targets, targets, offsets);
698
699         /* Set the shader resources.*/
700         for (i = 0; i < num_targets; i++) {
701                 bufidx = SI_SO_BUF_OFFSET + i;
702
703                 if (targets[i]) {
704                         struct pipe_resource *buffer = targets[i]->buffer;
705                         uint64_t va = r600_resource(buffer)->gpu_address;
706
707                         /* Set the descriptor.
708                          *
709                          * On VI, the format must be non-INVALID, otherwise
710                          * the buffer will be considered not bound and store
711                          * instructions will be no-ops.
712                          */
713                         uint32_t *desc = buffers->desc.list + bufidx*4;
714                         desc[0] = va;
715                         desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
716                         desc[2] = 0xffffffff;
717                         desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
718                                   S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
719                                   S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
720                                   S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
721                                   S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
722
723                         /* Set the resource. */
724                         pipe_resource_reference(&buffers->buffers[bufidx],
725                                                 buffer);
726                         radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
727                                               (struct r600_resource*)buffer,
728                                               buffers->shader_usage, buffers->priority);
729                         buffers->desc.enabled_mask |= 1llu << bufidx;
730                 } else {
731                         /* Clear the descriptor and unset the resource. */
732                         memset(buffers->desc.list + bufidx*4, 0,
733                                sizeof(uint32_t) * 4);
734                         pipe_resource_reference(&buffers->buffers[bufidx],
735                                                 NULL);
736                         buffers->desc.enabled_mask &= ~(1llu << bufidx);
737                 }
738         }
739         for (; i < old_num_targets; i++) {
740                 bufidx = SI_SO_BUF_OFFSET + i;
741                 /* Clear the descriptor and unset the resource. */
742                 memset(buffers->desc.list + bufidx*4, 0, sizeof(uint32_t) * 4);
743                 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
744                 buffers->desc.enabled_mask &= ~(1llu << bufidx);
745         }
746
747         buffers->desc.list_dirty = true;
748 }
749
750 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
751                                         uint32_t *desc, uint64_t old_buf_va,
752                                         struct pipe_resource *new_buf)
753 {
754         /* Retrieve the buffer offset from the descriptor. */
755         uint64_t old_desc_va =
756                 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
757
758         assert(old_buf_va <= old_desc_va);
759         uint64_t offset_within_buffer = old_desc_va - old_buf_va;
760
761         /* Update the descriptor. */
762         uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
763
764         desc[0] = va;
765         desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
766                   S_008F04_BASE_ADDRESS_HI(va >> 32);
767 }
768
769 /* BUFFER DISCARD/INVALIDATION */
770
771 /* Reallocate a buffer a update all resource bindings where the buffer is
772  * bound.
773  *
774  * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
775  * idle by discarding its contents. Apps usually tell us when to do this using
776  * map_buffer flags, for example.
777  */
778 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
779 {
780         struct si_context *sctx = (struct si_context*)ctx;
781         struct r600_resource *rbuffer = r600_resource(buf);
782         unsigned i, shader, alignment = rbuffer->buf->alignment;
783         uint64_t old_va = rbuffer->gpu_address;
784         unsigned num_elems = sctx->vertex_elements ?
785                                        sctx->vertex_elements->count : 0;
786         struct si_sampler_view *view;
787
788         /* Reallocate the buffer in the same pipe_resource. */
789         r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
790                            alignment, TRUE);
791
792         /* We changed the buffer, now we need to bind it where the old one
793          * was bound. This consists of 2 things:
794          *   1) Updating the resource descriptor and dirtying it.
795          *   2) Adding a relocation to the CS, so that it's usable.
796          */
797
798         /* Vertex buffers. */
799         for (i = 0; i < num_elems; i++) {
800                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
801
802                 if (vb >= Elements(sctx->vertex_buffer))
803                         continue;
804                 if (!sctx->vertex_buffer[vb].buffer)
805                         continue;
806
807                 if (sctx->vertex_buffer[vb].buffer == buf) {
808                         sctx->vertex_buffers_dirty = true;
809                         break;
810                 }
811         }
812
813         /* Read/Write buffers. */
814         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
815                 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
816                 uint64_t mask = buffers->desc.enabled_mask;
817
818                 while (mask) {
819                         i = u_bit_scan64(&mask);
820                         if (buffers->buffers[i] == buf) {
821                                 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
822                                                             old_va, buf);
823                                 buffers->desc.list_dirty = true;
824
825                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
826                                                       rbuffer, buffers->shader_usage,
827                                                       buffers->priority);
828
829                                 if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
830                                         /* Update the streamout state. */
831                                         if (sctx->b.streamout.begin_emitted) {
832                                                 r600_emit_streamout_end(&sctx->b);
833                                         }
834                                         sctx->b.streamout.append_bitmask =
835                                                 sctx->b.streamout.enabled_mask;
836                                         r600_streamout_buffers_dirty(&sctx->b);
837                                 }
838                         }
839                 }
840         }
841
842         /* Constant buffers. */
843         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
844                 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
845                 uint64_t mask = buffers->desc.enabled_mask;
846
847                 while (mask) {
848                         unsigned i = u_bit_scan64(&mask);
849                         if (buffers->buffers[i] == buf) {
850                                 si_desc_reset_buffer_offset(ctx, buffers->desc.list + i*4,
851                                                             old_va, buf);
852                                 buffers->desc.list_dirty = true;
853
854                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
855                                                       rbuffer, buffers->shader_usage,
856                                                       buffers->priority);
857                         }
858                 }
859         }
860
861         /* Texture buffers - update virtual addresses in sampler view descriptors. */
862         LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
863                 if (view->base.texture == buf) {
864                         si_desc_reset_buffer_offset(ctx, &view->state[4], old_va, buf);
865                 }
866         }
867         /* Texture buffers - update bindings. */
868         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
869                 struct si_sampler_views *views = &sctx->samplers[shader].views;
870                 uint64_t mask = views->desc.enabled_mask;
871
872                 while (mask) {
873                         unsigned i = u_bit_scan64(&mask);
874                         if (views->views[i]->texture == buf) {
875                                 si_desc_reset_buffer_offset(ctx,
876                                                             views->desc.list +
877                                                             i * 16 + 4,
878                                                             old_va, buf);
879                                 views->desc.list_dirty = true;
880
881                                 radeon_add_to_buffer_list(&sctx->b, &sctx->b.gfx,
882                                                       rbuffer, RADEON_USAGE_READ,
883                                                       RADEON_PRIO_SAMPLER_BUFFER);
884                         }
885                 }
886         }
887 }
888
889 /* SHADER USER DATA */
890
891 static void si_mark_shader_pointers_dirty(struct si_context *sctx,
892                                           unsigned shader)
893 {
894         sctx->const_buffers[shader].desc.pointer_dirty = true;
895         sctx->rw_buffers[shader].desc.pointer_dirty = true;
896         sctx->samplers[shader].views.desc.pointer_dirty = true;
897
898         if (shader == PIPE_SHADER_VERTEX)
899                 sctx->vertex_buffers.pointer_dirty = true;
900
901         si_mark_atom_dirty(sctx, &sctx->shader_userdata.atom);
902 }
903
904 static void si_shader_userdata_begin_new_cs(struct si_context *sctx)
905 {
906         int i;
907
908         for (i = 0; i < SI_NUM_SHADERS; i++) {
909                 si_mark_shader_pointers_dirty(sctx, i);
910         }
911 }
912
913 /* Set a base register address for user data constants in the given shader.
914  * This assigns a mapping from PIPE_SHADER_* to SPI_SHADER_USER_DATA_*.
915  */
916 static void si_set_user_data_base(struct si_context *sctx,
917                                   unsigned shader, uint32_t new_base)
918 {
919         uint32_t *base = &sctx->shader_userdata.sh_base[shader];
920
921         if (*base != new_base) {
922                 *base = new_base;
923
924                 if (new_base)
925                         si_mark_shader_pointers_dirty(sctx, shader);
926         }
927 }
928
929 /* This must be called when these shaders are changed from non-NULL to NULL
930  * and vice versa:
931  * - geometry shader
932  * - tessellation control shader
933  * - tessellation evaluation shader
934  */
935 void si_shader_change_notify(struct si_context *sctx)
936 {
937         /* VS can be bound as VS, ES, or LS. */
938         if (sctx->tes_shader.cso)
939                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
940                                       R_00B530_SPI_SHADER_USER_DATA_LS_0);
941         else if (sctx->gs_shader.cso)
942                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
943                                       R_00B330_SPI_SHADER_USER_DATA_ES_0);
944         else
945                 si_set_user_data_base(sctx, PIPE_SHADER_VERTEX,
946                                       R_00B130_SPI_SHADER_USER_DATA_VS_0);
947
948         /* TES can be bound as ES, VS, or not bound. */
949         if (sctx->tes_shader.cso) {
950                 if (sctx->gs_shader.cso)
951                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
952                                               R_00B330_SPI_SHADER_USER_DATA_ES_0);
953                 else
954                         si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL,
955                                               R_00B130_SPI_SHADER_USER_DATA_VS_0);
956         } else {
957                 si_set_user_data_base(sctx, PIPE_SHADER_TESS_EVAL, 0);
958         }
959 }
960
961 static void si_emit_shader_pointer(struct si_context *sctx,
962                                    struct si_descriptors *desc,
963                                    unsigned sh_base, bool keep_dirty)
964 {
965         struct radeon_winsys_cs *cs = sctx->b.gfx.cs;
966         uint64_t va;
967
968         if (!desc->pointer_dirty || !desc->buffer)
969                 return;
970
971         va = desc->buffer->gpu_address +
972              desc->buffer_offset;
973
974         radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
975         radeon_emit(cs, (sh_base + desc->shader_userdata_offset - SI_SH_REG_OFFSET) >> 2);
976         radeon_emit(cs, va);
977         radeon_emit(cs, va >> 32);
978
979         desc->pointer_dirty = keep_dirty;
980 }
981
982 void si_emit_shader_userdata(struct si_context *sctx, struct r600_atom *atom)
983 {
984         unsigned i;
985         uint32_t *sh_base = sctx->shader_userdata.sh_base;
986
987         if (sctx->gs_shader.cso) {
988                 /* The VS copy shader needs these for clipping, streamout, and rings. */
989                 unsigned vs_base = R_00B130_SPI_SHADER_USER_DATA_VS_0;
990                 unsigned i = PIPE_SHADER_VERTEX;
991
992                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, vs_base, true);
993                 si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, vs_base, true);
994
995                 if (sctx->tes_shader.cso) {
996                         /* The TESSEVAL shader needs this for the ESGS ring buffer. */
997                         si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc,
998                                                R_00B330_SPI_SHADER_USER_DATA_ES_0, true);
999                 }
1000         } else if (sctx->tes_shader.cso) {
1001                 /* The TESSEVAL shader needs this for streamout. */
1002                 si_emit_shader_pointer(sctx, &sctx->rw_buffers[PIPE_SHADER_VERTEX].desc,
1003                                        R_00B130_SPI_SHADER_USER_DATA_VS_0, true);
1004         }
1005
1006         for (i = 0; i < SI_NUM_SHADERS; i++) {
1007                 unsigned base = sh_base[i];
1008
1009                 if (!base)
1010                         continue;
1011
1012                 if (i != PIPE_SHADER_TESS_EVAL)
1013                         si_emit_shader_pointer(sctx, &sctx->rw_buffers[i].desc, base, false);
1014
1015                 si_emit_shader_pointer(sctx, &sctx->const_buffers[i].desc, base, false);
1016                 si_emit_shader_pointer(sctx, &sctx->samplers[i].views.desc, base, false);
1017         }
1018         si_emit_shader_pointer(sctx, &sctx->vertex_buffers, sh_base[PIPE_SHADER_VERTEX], false);
1019 }
1020
1021 /* INIT/DEINIT/UPLOAD */
1022
1023 void si_init_all_descriptors(struct si_context *sctx)
1024 {
1025         int i;
1026
1027         for (i = 0; i < SI_NUM_SHADERS; i++) {
1028                 si_init_buffer_resources(&sctx->const_buffers[i],
1029                                          SI_NUM_CONST_BUFFERS, SI_SGPR_CONST_BUFFERS,
1030                                          RADEON_USAGE_READ, RADEON_PRIO_CONST_BUFFER);
1031                 si_init_buffer_resources(&sctx->rw_buffers[i],
1032                                          SI_NUM_RW_BUFFERS, SI_SGPR_RW_BUFFERS,
1033                                          RADEON_USAGE_READWRITE, RADEON_PRIO_RINGS_STREAMOUT);
1034
1035                 si_init_descriptors(&sctx->samplers[i].views.desc,
1036                                     SI_SGPR_SAMPLERS, 16, SI_NUM_SAMPLERS);
1037         }
1038
1039         si_init_descriptors(&sctx->vertex_buffers, SI_SGPR_VERTEX_BUFFERS,
1040                             4, SI_NUM_VERTEX_BUFFERS);
1041
1042         /* Set pipe_context functions. */
1043         sctx->b.b.bind_sampler_states = si_bind_sampler_states;
1044         sctx->b.b.set_constant_buffer = si_set_constant_buffer;
1045         sctx->b.b.set_sampler_views = si_set_sampler_views;
1046         sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1047         sctx->b.invalidate_buffer = si_invalidate_buffer;
1048
1049         /* Shader user data. */
1050         si_init_atom(sctx, &sctx->shader_userdata.atom, &sctx->atoms.s.shader_userdata,
1051                      si_emit_shader_userdata);
1052
1053         /* Set default and immutable mappings. */
1054         si_set_user_data_base(sctx, PIPE_SHADER_VERTEX, R_00B130_SPI_SHADER_USER_DATA_VS_0);
1055         si_set_user_data_base(sctx, PIPE_SHADER_TESS_CTRL, R_00B430_SPI_SHADER_USER_DATA_HS_0);
1056         si_set_user_data_base(sctx, PIPE_SHADER_GEOMETRY, R_00B230_SPI_SHADER_USER_DATA_GS_0);
1057         si_set_user_data_base(sctx, PIPE_SHADER_FRAGMENT, R_00B030_SPI_SHADER_USER_DATA_PS_0);
1058 }
1059
1060 bool si_upload_shader_descriptors(struct si_context *sctx)
1061 {
1062         int i;
1063
1064         for (i = 0; i < SI_NUM_SHADERS; i++) {
1065                 if (!si_upload_descriptors(sctx, &sctx->const_buffers[i].desc) ||
1066                     !si_upload_descriptors(sctx, &sctx->rw_buffers[i].desc) ||
1067                     !si_upload_descriptors(sctx, &sctx->samplers[i].views.desc))
1068                         return false;
1069         }
1070         return si_upload_vertex_buffer_descriptors(sctx);
1071 }
1072
1073 void si_release_all_descriptors(struct si_context *sctx)
1074 {
1075         int i;
1076
1077         for (i = 0; i < SI_NUM_SHADERS; i++) {
1078                 si_release_buffer_resources(&sctx->const_buffers[i]);
1079                 si_release_buffer_resources(&sctx->rw_buffers[i]);
1080                 si_release_sampler_views(&sctx->samplers[i].views);
1081         }
1082         si_release_descriptors(&sctx->vertex_buffers);
1083 }
1084
1085 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1086 {
1087         int i;
1088
1089         for (i = 0; i < SI_NUM_SHADERS; i++) {
1090                 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1091                 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
1092                 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1093         }
1094         si_vertex_buffers_begin_new_cs(sctx);
1095         si_shader_userdata_begin_new_cs(sctx);
1096 }