OSDN Git Service

radeonsi: Clear sampler view flags when binding a buffer
[android-x86/external-mesa.git] / src / gallium / drivers / radeonsi / si_descriptors.c
1 /*
2  * Copyright 2013 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Marek Olšák <marek.olsak@amd.com>
25  */
26
27 /* Resource binding slots and sampler states (each described with 8 or 4 dwords)
28  * live in memory on SI.
29  *
30  * This file is responsible for managing lists of resources and sampler states
31  * in memory and binding them, which means updating those structures in memory.
32  *
33  * There is also code for updating shader pointers to resources and sampler
34  * states. CP DMA functions are here too.
35  */
36
37 #include "radeon/r600_cs.h"
38 #include "si_pipe.h"
39 #include "si_shader.h"
40 #include "sid.h"
41
42 #include "util/u_memory.h"
43 #include "util/u_upload_mgr.h"
44
45 #define SI_NUM_CONTEXTS 16
46
47 static uint32_t null_desc[8]; /* zeros */
48
49 /* Set this if you want the 3D engine to wait until CP DMA is done.
50  * It should be set on the last CP DMA packet. */
51 #define R600_CP_DMA_SYNC        (1 << 0) /* R600+ */
52
53 /* Set this if the source data was used as a destination in a previous CP DMA
54  * packet. It's for preventing a read-after-write (RAW) hazard between two
55  * CP DMA packets. */
56 #define SI_CP_DMA_RAW_WAIT      (1 << 1) /* SI+ */
57
58 /* Emit a CP DMA packet to do a copy from one buffer to another.
59  * The size must fit in bits [20:0].
60  */
61 static void si_emit_cp_dma_copy_buffer(struct si_context *sctx,
62                                        uint64_t dst_va, uint64_t src_va,
63                                        unsigned size, unsigned flags)
64 {
65         struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
66         uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
67         uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
68
69         assert(size);
70         assert((size & ((1<<21)-1)) == size);
71
72         if (sctx->b.chip_class >= CIK) {
73                 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
74                 radeon_emit(cs, sync_flag);             /* CP_SYNC [31] */
75                 radeon_emit(cs, src_va);                /* SRC_ADDR_LO [31:0] */
76                 radeon_emit(cs, src_va >> 32);          /* SRC_ADDR_HI [31:0] */
77                 radeon_emit(cs, dst_va);                /* DST_ADDR_LO [31:0] */
78                 radeon_emit(cs, dst_va >> 32);          /* DST_ADDR_HI [31:0] */
79                 radeon_emit(cs, size | raw_wait);       /* COMMAND [29:22] | BYTE_COUNT [20:0] */
80         } else {
81                 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
82                 radeon_emit(cs, src_va);                        /* SRC_ADDR_LO [31:0] */
83                 radeon_emit(cs, sync_flag | ((src_va >> 32) & 0xffff)); /* CP_SYNC [31] | SRC_ADDR_HI [15:0] */
84                 radeon_emit(cs, dst_va);                        /* DST_ADDR_LO [31:0] */
85                 radeon_emit(cs, (dst_va >> 32) & 0xffff);       /* DST_ADDR_HI [15:0] */
86                 radeon_emit(cs, size | raw_wait);               /* COMMAND [29:22] | BYTE_COUNT [20:0] */
87         }
88 }
89
90 /* Emit a CP DMA packet to clear a buffer. The size must fit in bits [20:0]. */
91 static void si_emit_cp_dma_clear_buffer(struct si_context *sctx,
92                                         uint64_t dst_va, unsigned size,
93                                         uint32_t clear_value, unsigned flags)
94 {
95         struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
96         uint32_t sync_flag = flags & R600_CP_DMA_SYNC ? PKT3_CP_DMA_CP_SYNC : 0;
97         uint32_t raw_wait = flags & SI_CP_DMA_RAW_WAIT ? PKT3_CP_DMA_CMD_RAW_WAIT : 0;
98
99         assert(size);
100         assert((size & ((1<<21)-1)) == size);
101
102         if (sctx->b.chip_class >= CIK) {
103                 radeon_emit(cs, PKT3(PKT3_DMA_DATA, 5, 0));
104                 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
105                 radeon_emit(cs, clear_value);           /* DATA [31:0] */
106                 radeon_emit(cs, 0);
107                 radeon_emit(cs, dst_va);                /* DST_ADDR_LO [31:0] */
108                 radeon_emit(cs, dst_va >> 32);          /* DST_ADDR_HI [15:0] */
109                 radeon_emit(cs, size | raw_wait);       /* COMMAND [29:22] | BYTE_COUNT [20:0] */
110         } else {
111                 radeon_emit(cs, PKT3(PKT3_CP_DMA, 4, 0));
112                 radeon_emit(cs, clear_value);           /* DATA [31:0] */
113                 radeon_emit(cs, sync_flag | PKT3_CP_DMA_SRC_SEL(2)); /* CP_SYNC [31] | SRC_SEL[30:29] */
114                 radeon_emit(cs, dst_va);                        /* DST_ADDR_LO [31:0] */
115                 radeon_emit(cs, (dst_va >> 32) & 0xffff);       /* DST_ADDR_HI [15:0] */
116                 radeon_emit(cs, size | raw_wait);               /* COMMAND [29:22] | BYTE_COUNT [20:0] */
117         }
118 }
119
120 static void si_init_descriptors(struct si_context *sctx,
121                                 struct si_descriptors *desc,
122                                 unsigned shader_userdata_reg,
123                                 unsigned element_dw_size,
124                                 unsigned num_elements,
125                                 void (*emit_func)(struct si_context *ctx, struct r600_atom *state))
126 {
127         assert(num_elements <= sizeof(desc->enabled_mask)*8);
128         assert(num_elements <= sizeof(desc->dirty_mask)*8);
129
130         desc->atom.emit = (void*)emit_func;
131         desc->shader_userdata_reg = shader_userdata_reg;
132         desc->element_dw_size = element_dw_size;
133         desc->num_elements = num_elements;
134         desc->context_size = num_elements * element_dw_size * 4;
135
136         desc->buffer = (struct r600_resource*)
137                 pipe_buffer_create(sctx->b.b.screen, PIPE_BIND_CUSTOM,
138                                    PIPE_USAGE_DEFAULT,
139                                    SI_NUM_CONTEXTS * desc->context_size);
140
141         r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, desc->buffer,
142                               RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
143
144         /* We don't check for CS space here, because this should be called
145          * only once at context initialization. */
146         si_emit_cp_dma_clear_buffer(sctx, desc->buffer->gpu_address,
147                                     desc->buffer->b.b.width0, 0,
148                                     R600_CP_DMA_SYNC);
149 }
150
151 static void si_release_descriptors(struct si_descriptors *desc)
152 {
153         pipe_resource_reference((struct pipe_resource**)&desc->buffer, NULL);
154 }
155
156 static void si_update_descriptors(struct si_context *sctx,
157                                   struct si_descriptors *desc)
158 {
159         if (desc->dirty_mask) {
160                 desc->atom.num_dw =
161                         7 + /* copy */
162                         (4 + desc->element_dw_size) * util_bitcount(desc->dirty_mask) + /* update */
163                         4; /* pointer update */
164
165                 if (desc->shader_userdata_reg >= R_00B130_SPI_SHADER_USER_DATA_VS_0 &&
166                     desc->shader_userdata_reg < R_00B230_SPI_SHADER_USER_DATA_GS_0)
167                         desc->atom.num_dw += 4; /* second pointer update */
168
169                 desc->atom.dirty = true;
170                 /* The descriptors are read with the K cache. */
171                 sctx->b.flags |= R600_CONTEXT_INV_CONST_CACHE;
172         } else {
173                 desc->atom.dirty = false;
174         }
175 }
176
177 static void si_emit_shader_pointer(struct si_context *sctx,
178                                    struct r600_atom *atom)
179 {
180         struct si_descriptors *desc = (struct si_descriptors*)atom;
181         struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
182         uint64_t va = desc->buffer->gpu_address +
183                       desc->current_context_id * desc->context_size +
184                       desc->buffer_offset;
185
186         radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
187         radeon_emit(cs, (desc->shader_userdata_reg - SI_SH_REG_OFFSET) >> 2);
188         radeon_emit(cs, va);
189         radeon_emit(cs, va >> 32);
190
191         if (desc->shader_userdata_reg >= R_00B130_SPI_SHADER_USER_DATA_VS_0 &&
192             desc->shader_userdata_reg < R_00B230_SPI_SHADER_USER_DATA_GS_0) {
193                 radeon_emit(cs, PKT3(PKT3_SET_SH_REG, 2, 0));
194                 radeon_emit(cs, (desc->shader_userdata_reg +
195                                  (R_00B330_SPI_SHADER_USER_DATA_ES_0 -
196                                   R_00B130_SPI_SHADER_USER_DATA_VS_0) -
197                                  SI_SH_REG_OFFSET) >> 2);
198                 radeon_emit(cs, va);
199                 radeon_emit(cs, va >> 32);
200         }
201 }
202
203 static void si_emit_descriptors(struct si_context *sctx,
204                                 struct si_descriptors *desc,
205                                 uint32_t **descriptors)
206 {
207         struct radeon_winsys_cs *cs = sctx->b.rings.gfx.cs;
208         uint64_t va_base;
209         int packet_start;
210         int packet_size = 0;
211         int last_index = desc->num_elements; /* point to a non-existing element */
212         unsigned dirty_mask = desc->dirty_mask;
213         unsigned new_context_id = (desc->current_context_id + 1) % SI_NUM_CONTEXTS;
214
215         assert(dirty_mask);
216
217         va_base = desc->buffer->gpu_address;
218
219         /* Copy the descriptors to a new context slot. */
220         /* XXX Consider using TC or L2 for this copy on CIK. */
221         si_emit_cp_dma_copy_buffer(sctx,
222                                    va_base + new_context_id * desc->context_size,
223                                    va_base + desc->current_context_id * desc->context_size,
224                                    desc->context_size, R600_CP_DMA_SYNC);
225
226         va_base += new_context_id * desc->context_size;
227
228         /* Update the descriptors.
229          * Updates of consecutive descriptors are merged to one WRITE_DATA packet.
230          *
231          * XXX When unbinding lots of resources, consider clearing the memory
232          *     with CP DMA instead of emitting zeros.
233          */
234         while (dirty_mask) {
235                 int i = u_bit_scan(&dirty_mask);
236
237                 assert(i < desc->num_elements);
238
239                 if (last_index+1 == i && packet_size) {
240                         /* Append new data at the end of the last packet. */
241                         packet_size += desc->element_dw_size;
242                         cs->buf[packet_start] = PKT3(PKT3_WRITE_DATA, packet_size, 0);
243                 } else {
244                         /* Start a new packet. */
245                         uint64_t va = va_base + i * desc->element_dw_size * 4;
246
247                         packet_start = cs->cdw;
248                         packet_size = 2 + desc->element_dw_size;
249
250                         radeon_emit(cs, PKT3(PKT3_WRITE_DATA, packet_size, 0));
251                         radeon_emit(cs, PKT3_WRITE_DATA_DST_SEL(PKT3_WRITE_DATA_DST_SEL_TC_OR_L2) |
252                                              PKT3_WRITE_DATA_WR_CONFIRM |
253                                              PKT3_WRITE_DATA_ENGINE_SEL(PKT3_WRITE_DATA_ENGINE_SEL_ME));
254                         radeon_emit(cs, va & 0xFFFFFFFFUL);
255                         radeon_emit(cs, (va >> 32UL) & 0xFFFFFFFFUL);
256                 }
257
258                 radeon_emit_array(cs, descriptors[i], desc->element_dw_size);
259
260                 last_index = i;
261         }
262
263         desc->dirty_mask = 0;
264         desc->current_context_id = new_context_id;
265
266         /* Now update the shader userdata pointer. */
267         si_emit_shader_pointer(sctx, &desc->atom);
268 }
269
270 static unsigned si_get_shader_user_data_base(unsigned shader)
271 {
272         switch (shader) {
273         case PIPE_SHADER_VERTEX:
274                 return R_00B130_SPI_SHADER_USER_DATA_VS_0;
275         case PIPE_SHADER_GEOMETRY:
276                 return R_00B230_SPI_SHADER_USER_DATA_GS_0;
277         case PIPE_SHADER_FRAGMENT:
278                 return R_00B030_SPI_SHADER_USER_DATA_PS_0;
279         default:
280                 assert(0);
281                 return 0;
282         }
283 }
284
285 /* SAMPLER VIEWS */
286
287 static void si_emit_sampler_views(struct si_context *sctx, struct r600_atom *atom)
288 {
289         struct si_sampler_views *views = (struct si_sampler_views*)atom;
290
291         si_emit_descriptors(sctx, &views->desc, views->desc_data);
292 }
293
294 static void si_init_sampler_views(struct si_context *sctx,
295                                   struct si_sampler_views *views,
296                                   unsigned shader)
297 {
298         si_init_descriptors(sctx, &views->desc,
299                             si_get_shader_user_data_base(shader) +
300                             SI_SGPR_RESOURCE * 4,
301                             8, SI_NUM_SAMPLER_VIEWS, si_emit_sampler_views);
302 }
303
304 static void si_release_sampler_views(struct si_sampler_views *views)
305 {
306         int i;
307
308         for (i = 0; i < Elements(views->views); i++) {
309                 pipe_sampler_view_reference(&views->views[i], NULL);
310         }
311         si_release_descriptors(&views->desc);
312 }
313
314 static enum radeon_bo_priority si_get_resource_ro_priority(struct r600_resource *res)
315 {
316         if (res->b.b.target == PIPE_BUFFER)
317                 return RADEON_PRIO_SHADER_BUFFER_RO;
318
319         if (res->b.b.nr_samples > 1)
320                 return RADEON_PRIO_SHADER_TEXTURE_MSAA;
321
322         return RADEON_PRIO_SHADER_TEXTURE_RO;
323 }
324
325 static void si_sampler_views_begin_new_cs(struct si_context *sctx,
326                                           struct si_sampler_views *views)
327 {
328         unsigned mask = views->desc.enabled_mask;
329
330         /* Add relocations to the CS. */
331         while (mask) {
332                 int i = u_bit_scan(&mask);
333                 struct si_sampler_view *rview =
334                         (struct si_sampler_view*)views->views[i];
335
336                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
337                                       rview->resource, RADEON_USAGE_READ,
338                                       si_get_resource_ro_priority(rview->resource));
339         }
340
341         r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, views->desc.buffer,
342                               RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
343
344         si_emit_shader_pointer(sctx, &views->desc.atom);
345 }
346
347 static void si_set_sampler_view(struct si_context *sctx, unsigned shader,
348                                 unsigned slot, struct pipe_sampler_view *view,
349                                 unsigned *view_desc)
350 {
351         struct si_sampler_views *views = &sctx->samplers[shader].views;
352
353         if (views->views[slot] == view)
354                 return;
355
356         if (view) {
357                 struct si_sampler_view *rview =
358                         (struct si_sampler_view*)view;
359
360                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
361                                       rview->resource, RADEON_USAGE_READ,
362                                       si_get_resource_ro_priority(rview->resource));
363
364                 pipe_sampler_view_reference(&views->views[slot], view);
365                 views->desc_data[slot] = view_desc;
366                 views->desc.enabled_mask |= 1 << slot;
367         } else {
368                 pipe_sampler_view_reference(&views->views[slot], NULL);
369                 views->desc_data[slot] = null_desc;
370                 views->desc.enabled_mask &= ~(1 << slot);
371         }
372
373         views->desc.dirty_mask |= 1 << slot;
374 }
375
376 static void si_set_sampler_views(struct pipe_context *ctx,
377                                  unsigned shader, unsigned start,
378                                  unsigned count,
379                                  struct pipe_sampler_view **views)
380 {
381         struct si_context *sctx = (struct si_context *)ctx;
382         struct si_textures_info *samplers = &sctx->samplers[shader];
383         struct si_sampler_view **rviews = (struct si_sampler_view **)views;
384         int i;
385
386         if (!count || shader >= SI_NUM_SHADERS)
387                 return;
388
389         for (i = 0; i < count; i++) {
390                 unsigned slot = start + i;
391
392                 if (!views[i]) {
393                         samplers->depth_texture_mask &= ~(1 << slot);
394                         samplers->compressed_colortex_mask &= ~(1 << slot);
395                         si_set_sampler_view(sctx, shader, slot, NULL, NULL);
396                         si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
397                                             NULL, NULL);
398                         continue;
399                 }
400
401                 si_set_sampler_view(sctx, shader, slot, views[i], rviews[i]->state);
402
403                 if (views[i]->texture->target != PIPE_BUFFER) {
404                         struct r600_texture *rtex =
405                                 (struct r600_texture*)views[i]->texture;
406
407                         if (rtex->is_depth && !rtex->is_flushing_texture) {
408                                 samplers->depth_texture_mask |= 1 << slot;
409                         } else {
410                                 samplers->depth_texture_mask &= ~(1 << slot);
411                         }
412                         if (rtex->cmask.size || rtex->fmask.size) {
413                                 samplers->compressed_colortex_mask |= 1 << slot;
414                         } else {
415                                 samplers->compressed_colortex_mask &= ~(1 << slot);
416                         }
417
418                         if (rtex->fmask.size) {
419                                 si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
420                                                     views[i], rviews[i]->fmask_state);
421                         } else {
422                                 si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
423                                                     NULL, NULL);
424                         }
425                 } else {
426                         samplers->depth_texture_mask &= ~(1 << slot);
427                         samplers->compressed_colortex_mask &= ~(1 << slot);
428                         si_set_sampler_view(sctx, shader, SI_FMASK_TEX_OFFSET + slot,
429                                             NULL, NULL);
430                 }
431         }
432
433         sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
434         si_update_descriptors(sctx, &samplers->views.desc);
435 }
436
437 /* SAMPLER STATES */
438
439 static void si_emit_sampler_states(struct si_context *sctx, struct r600_atom *atom)
440 {
441         struct si_sampler_states *states = (struct si_sampler_states*)atom;
442
443         si_emit_descriptors(sctx, &states->desc, states->desc_data);
444 }
445
446 static void si_sampler_states_begin_new_cs(struct si_context *sctx,
447                                            struct si_sampler_states *states)
448 {
449         r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, states->desc.buffer,
450                               RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_DATA);
451         si_emit_shader_pointer(sctx, &states->desc.atom);
452 }
453
454 void si_set_sampler_descriptors(struct si_context *sctx, unsigned shader,
455                                 unsigned start, unsigned count, void **states)
456 {
457         struct si_sampler_states *samplers = &sctx->samplers[shader].states;
458         struct si_sampler_state **sstates = (struct si_sampler_state**)states;
459         int i;
460
461         if (start == 0)
462                 samplers->saved_states[0] = states[0];
463         if (start == 1)
464                 samplers->saved_states[1] = states[0];
465         else if (start == 0 && count >= 2)
466                 samplers->saved_states[1] = states[1];
467
468         for (i = 0; i < count; i++) {
469                 unsigned slot = start + i;
470
471                 if (!sstates[i]) {
472                         samplers->desc.dirty_mask &= ~(1 << slot);
473                         continue;
474                 }
475
476                 samplers->desc_data[slot] = sstates[i]->val;
477                 samplers->desc.dirty_mask |= 1 << slot;
478         }
479
480         si_update_descriptors(sctx, &samplers->desc);
481 }
482
483 /* BUFFER RESOURCES */
484
485 static void si_emit_buffer_resources(struct si_context *sctx, struct r600_atom *atom)
486 {
487         struct si_buffer_resources *buffers = (struct si_buffer_resources*)atom;
488
489         si_emit_descriptors(sctx, &buffers->desc, buffers->desc_data);
490 }
491
492 static void si_init_buffer_resources(struct si_context *sctx,
493                                      struct si_buffer_resources *buffers,
494                                      unsigned num_buffers, unsigned shader,
495                                      unsigned shader_userdata_index,
496                                      enum radeon_bo_usage shader_usage,
497                                      enum radeon_bo_priority priority)
498 {
499         int i;
500
501         buffers->num_buffers = num_buffers;
502         buffers->shader_usage = shader_usage;
503         buffers->priority = priority;
504         buffers->buffers = CALLOC(num_buffers, sizeof(struct pipe_resource*));
505         buffers->desc_storage = CALLOC(num_buffers, sizeof(uint32_t) * 4);
506
507         /* si_emit_descriptors only accepts an array of arrays.
508          * This adds such an array. */
509         buffers->desc_data = CALLOC(num_buffers, sizeof(uint32_t*));
510         for (i = 0; i < num_buffers; i++) {
511                 buffers->desc_data[i] = &buffers->desc_storage[i*4];
512         }
513
514         si_init_descriptors(sctx, &buffers->desc,
515                             si_get_shader_user_data_base(shader) +
516                             shader_userdata_index*4, 4, num_buffers,
517                             si_emit_buffer_resources);
518 }
519
520 static void si_release_buffer_resources(struct si_buffer_resources *buffers)
521 {
522         int i;
523
524         for (i = 0; i < buffers->num_buffers; i++) {
525                 pipe_resource_reference(&buffers->buffers[i], NULL);
526         }
527
528         FREE(buffers->buffers);
529         FREE(buffers->desc_storage);
530         FREE(buffers->desc_data);
531         si_release_descriptors(&buffers->desc);
532 }
533
534 static void si_buffer_resources_begin_new_cs(struct si_context *sctx,
535                                              struct si_buffer_resources *buffers)
536 {
537         unsigned mask = buffers->desc.enabled_mask;
538
539         /* Add relocations to the CS. */
540         while (mask) {
541                 int i = u_bit_scan(&mask);
542
543                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
544                                       (struct r600_resource*)buffers->buffers[i],
545                                       buffers->shader_usage, buffers->priority);
546         }
547
548         r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
549                               buffers->desc.buffer, RADEON_USAGE_READWRITE,
550                               RADEON_PRIO_SHADER_DATA);
551
552         si_emit_shader_pointer(sctx, &buffers->desc.atom);
553 }
554
555 /* VERTEX BUFFERS */
556
557 static void si_vertex_buffers_begin_new_cs(struct si_context *sctx)
558 {
559         struct si_descriptors *desc = &sctx->vertex_buffers;
560         int count = sctx->vertex_elements ? sctx->vertex_elements->count : 0;
561         int i;
562
563         for (i = 0; i < count; i++) {
564                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
565
566                 if (vb >= Elements(sctx->vertex_buffer))
567                         continue;
568                 if (!sctx->vertex_buffer[vb].buffer)
569                         continue;
570
571                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
572                                       (struct r600_resource*)sctx->vertex_buffer[vb].buffer,
573                                       RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
574         }
575         r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
576                               desc->buffer, RADEON_USAGE_READ,
577                               RADEON_PRIO_SHADER_DATA);
578
579         si_emit_shader_pointer(sctx, &desc->atom);
580 }
581
582 void si_update_vertex_buffers(struct si_context *sctx)
583 {
584         struct si_descriptors *desc = &sctx->vertex_buffers;
585         bool bound[SI_NUM_VERTEX_BUFFERS] = {};
586         unsigned i, count = sctx->vertex_elements->count;
587         uint64_t va;
588         uint32_t *ptr;
589
590         if (!count || !sctx->vertex_elements)
591                 return;
592
593         /* Vertex buffer descriptors are the only ones which are uploaded
594          * directly through a staging buffer and don't go through
595          * the fine-grained upload path.
596          */
597         u_upload_alloc(sctx->b.uploader, 0, count * 16, &desc->buffer_offset,
598                        (struct pipe_resource**)&desc->buffer, (void**)&ptr);
599
600         r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
601                               desc->buffer, RADEON_USAGE_READ,
602                               RADEON_PRIO_SHADER_DATA);
603
604         assert(count <= SI_NUM_VERTEX_BUFFERS);
605         assert(desc->current_context_id == 0);
606
607         for (i = 0; i < count; i++) {
608                 struct pipe_vertex_element *ve = &sctx->vertex_elements->elements[i];
609                 struct pipe_vertex_buffer *vb;
610                 struct r600_resource *rbuffer;
611                 unsigned offset;
612                 uint32_t *desc = &ptr[i*4];
613
614                 if (ve->vertex_buffer_index >= Elements(sctx->vertex_buffer)) {
615                         memset(desc, 0, 16);
616                         continue;
617                 }
618
619                 vb = &sctx->vertex_buffer[ve->vertex_buffer_index];
620                 rbuffer = (struct r600_resource*)vb->buffer;
621                 if (rbuffer == NULL) {
622                         memset(desc, 0, 16);
623                         continue;
624                 }
625
626                 offset = vb->buffer_offset + ve->src_offset;
627                 va = rbuffer->gpu_address + offset;
628
629                 /* Fill in T# buffer resource description */
630                 desc[0] = va & 0xFFFFFFFF;
631                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
632                           S_008F04_STRIDE(vb->stride);
633                 if (vb->stride)
634                         /* Round up by rounding down and adding 1 */
635                         desc[2] = (vb->buffer->width0 - offset -
636                                    sctx->vertex_elements->format_size[i]) /
637                                   vb->stride + 1;
638                 else
639                         desc[2] = vb->buffer->width0 - offset;
640
641                 desc[3] = sctx->vertex_elements->rsrc_word3[i];
642
643                 if (!bound[ve->vertex_buffer_index]) {
644                         r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
645                                               (struct r600_resource*)vb->buffer,
646                                               RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
647                         bound[ve->vertex_buffer_index] = true;
648                 }
649         }
650
651         desc->atom.num_dw = 8; /* update 2 shader pointers (VS+ES) */
652         desc->atom.dirty = true;
653
654         /* Don't flush the const cache. It would have a very negative effect
655          * on performance (confirmed by testing). New descriptors are always
656          * uploaded to a fresh new buffer, so I don't think flushing the const
657          * cache is needed. */
658         sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE;
659 }
660
661
662 /* CONSTANT BUFFERS */
663
664 void si_upload_const_buffer(struct si_context *sctx, struct r600_resource **rbuffer,
665                             const uint8_t *ptr, unsigned size, uint32_t *const_offset)
666 {
667         void *tmp;
668
669         u_upload_alloc(sctx->b.uploader, 0, size, const_offset,
670                        (struct pipe_resource**)rbuffer, &tmp);
671         util_memcpy_cpu_to_le32(tmp, ptr, size);
672 }
673
674 static void si_set_constant_buffer(struct pipe_context *ctx, uint shader, uint slot,
675                                    struct pipe_constant_buffer *input)
676 {
677         struct si_context *sctx = (struct si_context *)ctx;
678         struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
679
680         if (shader >= SI_NUM_SHADERS)
681                 return;
682
683         assert(slot < buffers->num_buffers);
684         pipe_resource_reference(&buffers->buffers[slot], NULL);
685
686         /* CIK cannot unbind a constant buffer (S_BUFFER_LOAD is buggy
687          * with a NULL buffer). We need to use a dummy buffer instead. */
688         if (sctx->b.chip_class == CIK &&
689             (!input || (!input->buffer && !input->user_buffer)))
690                 input = &sctx->null_const_buf;
691
692         if (input && (input->buffer || input->user_buffer)) {
693                 struct pipe_resource *buffer = NULL;
694                 uint64_t va;
695
696                 /* Upload the user buffer if needed. */
697                 if (input->user_buffer) {
698                         unsigned buffer_offset;
699
700                         si_upload_const_buffer(sctx,
701                                                (struct r600_resource**)&buffer, input->user_buffer,
702                                                input->buffer_size, &buffer_offset);
703                         va = r600_resource(buffer)->gpu_address + buffer_offset;
704                 } else {
705                         pipe_resource_reference(&buffer, input->buffer);
706                         va = r600_resource(buffer)->gpu_address + input->buffer_offset;
707                 }
708
709                 /* Set the descriptor. */
710                 uint32_t *desc = buffers->desc_data[slot];
711                 desc[0] = va;
712                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
713                           S_008F04_STRIDE(0);
714                 desc[2] = input->buffer_size;
715                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
716                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
717                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
718                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
719                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
720                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32);
721
722                 buffers->buffers[slot] = buffer;
723                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
724                                       (struct r600_resource*)buffer,
725                                       buffers->shader_usage, buffers->priority);
726                 buffers->desc.enabled_mask |= 1 << slot;
727         } else {
728                 /* Clear the descriptor. */
729                 memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
730                 buffers->desc.enabled_mask &= ~(1 << slot);
731         }
732
733         buffers->desc.dirty_mask |= 1 << slot;
734         si_update_descriptors(sctx, &buffers->desc);
735 }
736
737 /* RING BUFFERS */
738
739 void si_set_ring_buffer(struct pipe_context *ctx, uint shader, uint slot,
740                         struct pipe_resource *buffer,
741                         unsigned stride, unsigned num_records,
742                         bool add_tid, bool swizzle,
743                         unsigned element_size, unsigned index_stride)
744 {
745         struct si_context *sctx = (struct si_context *)ctx;
746         struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
747
748         if (shader >= SI_NUM_SHADERS)
749                 return;
750
751         /* The stride field in the resource descriptor has 14 bits */
752         assert(stride < (1 << 14));
753
754         assert(slot < buffers->num_buffers);
755         pipe_resource_reference(&buffers->buffers[slot], NULL);
756
757         if (buffer) {
758                 uint64_t va;
759
760                 va = r600_resource(buffer)->gpu_address;
761
762                 switch (element_size) {
763                 default:
764                         assert(!"Unsupported ring buffer element size");
765                 case 0:
766                 case 2:
767                         element_size = 0;
768                         break;
769                 case 4:
770                         element_size = 1;
771                         break;
772                 case 8:
773                         element_size = 2;
774                         break;
775                 case 16:
776                         element_size = 3;
777                         break;
778                 }
779
780                 switch (index_stride) {
781                 default:
782                         assert(!"Unsupported ring buffer index stride");
783                 case 0:
784                 case 8:
785                         index_stride = 0;
786                         break;
787                 case 16:
788                         index_stride = 1;
789                         break;
790                 case 32:
791                         index_stride = 2;
792                         break;
793                 case 64:
794                         index_stride = 3;
795                         break;
796                 }
797
798                 /* Set the descriptor. */
799                 uint32_t *desc = buffers->desc_data[slot];
800                 desc[0] = va;
801                 desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32) |
802                           S_008F04_STRIDE(stride) |
803                           S_008F04_SWIZZLE_ENABLE(swizzle);
804                 desc[2] = num_records;
805                 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
806                           S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
807                           S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
808                           S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
809                           S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
810                           S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
811                           S_008F0C_ELEMENT_SIZE(element_size) |
812                           S_008F0C_INDEX_STRIDE(index_stride) |
813                           S_008F0C_ADD_TID_ENABLE(add_tid);
814
815                 pipe_resource_reference(&buffers->buffers[slot], buffer);
816                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
817                                       (struct r600_resource*)buffer,
818                                       buffers->shader_usage, buffers->priority);
819                 buffers->desc.enabled_mask |= 1 << slot;
820         } else {
821                 /* Clear the descriptor. */
822                 memset(buffers->desc_data[slot], 0, sizeof(uint32_t) * 4);
823                 buffers->desc.enabled_mask &= ~(1 << slot);
824         }
825
826         buffers->desc.dirty_mask |= 1 << slot;
827         si_update_descriptors(sctx, &buffers->desc);
828 }
829
830 /* STREAMOUT BUFFERS */
831
832 static void si_set_streamout_targets(struct pipe_context *ctx,
833                                      unsigned num_targets,
834                                      struct pipe_stream_output_target **targets,
835                                      const unsigned *offsets)
836 {
837         struct si_context *sctx = (struct si_context *)ctx;
838         struct si_buffer_resources *buffers = &sctx->rw_buffers[PIPE_SHADER_VERTEX];
839         unsigned old_num_targets = sctx->b.streamout.num_targets;
840         unsigned i, bufidx;
841
842         /* Streamout buffers must be bound in 2 places:
843          * 1) in VGT by setting the VGT_STRMOUT registers
844          * 2) as shader resources
845          */
846
847         /* Set the VGT regs. */
848         r600_set_streamout_targets(ctx, num_targets, targets, offsets);
849
850         /* Set the shader resources.*/
851         for (i = 0; i < num_targets; i++) {
852                 bufidx = SI_SO_BUF_OFFSET + i;
853
854                 if (targets[i]) {
855                         struct pipe_resource *buffer = targets[i]->buffer;
856                         uint64_t va = r600_resource(buffer)->gpu_address;
857
858                         /* Set the descriptor. */
859                         uint32_t *desc = buffers->desc_data[bufidx];
860                         desc[0] = va;
861                         desc[1] = S_008F04_BASE_ADDRESS_HI(va >> 32);
862                         desc[2] = 0xffffffff;
863                         desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
864                                   S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
865                                   S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
866                                   S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W);
867
868                         /* Set the resource. */
869                         pipe_resource_reference(&buffers->buffers[bufidx],
870                                                 buffer);
871                         r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
872                                               (struct r600_resource*)buffer,
873                                               buffers->shader_usage, buffers->priority);
874                         buffers->desc.enabled_mask |= 1 << bufidx;
875                 } else {
876                         /* Clear the descriptor and unset the resource. */
877                         memset(buffers->desc_data[bufidx], 0,
878                                sizeof(uint32_t) * 4);
879                         pipe_resource_reference(&buffers->buffers[bufidx],
880                                                 NULL);
881                         buffers->desc.enabled_mask &= ~(1 << bufidx);
882                 }
883                 buffers->desc.dirty_mask |= 1 << bufidx;
884         }
885         for (; i < old_num_targets; i++) {
886                 bufidx = SI_SO_BUF_OFFSET + i;
887                 /* Clear the descriptor and unset the resource. */
888                 memset(buffers->desc_data[bufidx], 0, sizeof(uint32_t) * 4);
889                 pipe_resource_reference(&buffers->buffers[bufidx], NULL);
890                 buffers->desc.enabled_mask &= ~(1 << bufidx);
891                 buffers->desc.dirty_mask |= 1 << bufidx;
892         }
893
894         si_update_descriptors(sctx, &buffers->desc);
895 }
896
897 static void si_desc_reset_buffer_offset(struct pipe_context *ctx,
898                                         uint32_t *desc, uint64_t old_buf_va,
899                                         struct pipe_resource *new_buf)
900 {
901         /* Retrieve the buffer offset from the descriptor. */
902         uint64_t old_desc_va =
903                 desc[0] | ((uint64_t)G_008F04_BASE_ADDRESS_HI(desc[1]) << 32);
904
905         assert(old_buf_va <= old_desc_va);
906         uint64_t offset_within_buffer = old_desc_va - old_buf_va;
907
908         /* Update the descriptor. */
909         uint64_t va = r600_resource(new_buf)->gpu_address + offset_within_buffer;
910
911         desc[0] = va;
912         desc[1] = (desc[1] & C_008F04_BASE_ADDRESS_HI) |
913                   S_008F04_BASE_ADDRESS_HI(va >> 32);
914 }
915
916 /* BUFFER DISCARD/INVALIDATION */
917
918 /* Reallocate a buffer a update all resource bindings where the buffer is
919  * bound.
920  *
921  * This is used to avoid CPU-GPU synchronizations, because it makes the buffer
922  * idle by discarding its contents. Apps usually tell us when to do this using
923  * map_buffer flags, for example.
924  */
925 static void si_invalidate_buffer(struct pipe_context *ctx, struct pipe_resource *buf)
926 {
927         struct si_context *sctx = (struct si_context*)ctx;
928         struct r600_resource *rbuffer = r600_resource(buf);
929         unsigned i, shader, alignment = rbuffer->buf->alignment;
930         uint64_t old_va = rbuffer->gpu_address;
931         unsigned num_elems = sctx->vertex_elements ?
932                                        sctx->vertex_elements->count : 0;
933         struct si_sampler_view *view;
934
935         /* Reallocate the buffer in the same pipe_resource. */
936         r600_init_resource(&sctx->screen->b, rbuffer, rbuffer->b.b.width0,
937                            alignment, TRUE);
938
939         /* We changed the buffer, now we need to bind it where the old one
940          * was bound. This consists of 2 things:
941          *   1) Updating the resource descriptor and dirtying it.
942          *   2) Adding a relocation to the CS, so that it's usable.
943          */
944
945         /* Vertex buffers. */
946         for (i = 0; i < num_elems; i++) {
947                 int vb = sctx->vertex_elements->elements[i].vertex_buffer_index;
948
949                 if (vb >= Elements(sctx->vertex_buffer))
950                         continue;
951                 if (!sctx->vertex_buffer[vb].buffer)
952                         continue;
953
954                 if (sctx->vertex_buffer[vb].buffer == buf) {
955                         sctx->vertex_buffers_dirty = true;
956                         break;
957                 }
958         }
959
960         /* Read/Write buffers. */
961         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
962                 struct si_buffer_resources *buffers = &sctx->rw_buffers[shader];
963                 bool found = false;
964                 uint32_t mask = buffers->desc.enabled_mask;
965
966                 while (mask) {
967                         i = u_bit_scan(&mask);
968                         if (buffers->buffers[i] == buf) {
969                                 si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
970                                                             old_va, buf);
971
972                                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
973                                                       rbuffer, buffers->shader_usage,
974                                                       buffers->priority);
975
976                                 buffers->desc.dirty_mask |= 1 << i;
977                                 found = true;
978
979                                 if (i >= SI_SO_BUF_OFFSET && shader == PIPE_SHADER_VERTEX) {
980                                         /* Update the streamout state. */
981                                         if (sctx->b.streamout.begin_emitted) {
982                                                 r600_emit_streamout_end(&sctx->b);
983                                         }
984                                         sctx->b.streamout.append_bitmask =
985                                                 sctx->b.streamout.enabled_mask;
986                                         r600_streamout_buffers_dirty(&sctx->b);
987                                 }
988                         }
989                 }
990                 if (found) {
991                         si_update_descriptors(sctx, &buffers->desc);
992                 }
993         }
994
995         /* Constant buffers. */
996         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
997                 struct si_buffer_resources *buffers = &sctx->const_buffers[shader];
998                 bool found = false;
999                 uint32_t mask = buffers->desc.enabled_mask;
1000
1001                 while (mask) {
1002                         unsigned i = u_bit_scan(&mask);
1003                         if (buffers->buffers[i] == buf) {
1004                                 si_desc_reset_buffer_offset(ctx, buffers->desc_data[i],
1005                                                             old_va, buf);
1006
1007                                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
1008                                                       rbuffer, buffers->shader_usage,
1009                                                       buffers->priority);
1010
1011                                 buffers->desc.dirty_mask |= 1 << i;
1012                                 found = true;
1013                         }
1014                 }
1015                 if (found) {
1016                         si_update_descriptors(sctx, &buffers->desc);
1017                 }
1018         }
1019
1020         /* Texture buffers - update virtual addresses in sampler view descriptors. */
1021         LIST_FOR_EACH_ENTRY(view, &sctx->b.texture_buffers, list) {
1022                 if (view->base.texture == buf) {
1023                         si_desc_reset_buffer_offset(ctx, view->state, old_va, buf);
1024                 }
1025         }
1026         /* Texture buffers - update bindings. */
1027         for (shader = 0; shader < SI_NUM_SHADERS; shader++) {
1028                 struct si_sampler_views *views = &sctx->samplers[shader].views;
1029                 bool found = false;
1030                 uint32_t mask = views->desc.enabled_mask;
1031
1032                 while (mask) {
1033                         unsigned i = u_bit_scan(&mask);
1034                         if (views->views[i]->texture == buf) {
1035                                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
1036                                                       rbuffer, RADEON_USAGE_READ,
1037                                                       RADEON_PRIO_SHADER_BUFFER_RO);
1038
1039                                 views->desc.dirty_mask |= 1 << i;
1040                                 found = true;
1041                         }
1042                 }
1043                 if (found) {
1044                         si_update_descriptors(sctx, &views->desc);
1045                 }
1046         }
1047 }
1048
1049 /* CP DMA */
1050
1051 /* The max number of bytes to copy per packet. */
1052 #define CP_DMA_MAX_BYTE_COUNT ((1 << 21) - 8)
1053
1054 static void si_clear_buffer(struct pipe_context *ctx, struct pipe_resource *dst,
1055                             unsigned offset, unsigned size, unsigned value)
1056 {
1057         struct si_context *sctx = (struct si_context*)ctx;
1058
1059         if (!size)
1060                 return;
1061
1062         /* Mark the buffer range of destination as valid (initialized),
1063          * so that transfer_map knows it should wait for the GPU when mapping
1064          * that range. */
1065         util_range_add(&r600_resource(dst)->valid_buffer_range, offset,
1066                        offset + size);
1067
1068         /* Fallback for unaligned clears. */
1069         if (offset % 4 != 0 || size % 4 != 0) {
1070                 uint32_t *map = sctx->b.ws->buffer_map(r600_resource(dst)->cs_buf,
1071                                                        sctx->b.rings.gfx.cs,
1072                                                        PIPE_TRANSFER_WRITE);
1073                 size /= 4;
1074                 for (unsigned i = 0; i < size; i++)
1075                         *map++ = value;
1076                 return;
1077         }
1078
1079         uint64_t va = r600_resource(dst)->gpu_address + offset;
1080
1081         /* Flush the caches where the resource is bound. */
1082         /* XXX only flush the caches where the buffer is bound. */
1083         sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
1084                          R600_CONTEXT_INV_CONST_CACHE |
1085                          R600_CONTEXT_FLUSH_AND_INV_CB |
1086                          R600_CONTEXT_FLUSH_AND_INV_DB |
1087                          R600_CONTEXT_FLUSH_AND_INV_CB_META |
1088                          R600_CONTEXT_FLUSH_AND_INV_DB_META;
1089         sctx->b.flags |= R600_CONTEXT_WAIT_3D_IDLE;
1090
1091         while (size) {
1092                 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
1093                 unsigned dma_flags = 0;
1094
1095                 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0),
1096                                  FALSE);
1097
1098                 /* This must be done after need_cs_space. */
1099                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx,
1100                                       (struct r600_resource*)dst, RADEON_USAGE_WRITE,
1101                                       RADEON_PRIO_MIN);
1102
1103                 /* Flush the caches for the first copy only.
1104                  * Also wait for the previous CP DMA operations. */
1105                 if (sctx->b.flags) {
1106                         si_emit_cache_flush(&sctx->b, NULL);
1107                         dma_flags |= SI_CP_DMA_RAW_WAIT; /* same as WAIT_UNTIL=CP_DMA_IDLE */
1108                 }
1109
1110                 /* Do the synchronization after the last copy, so that all data is written to memory. */
1111                 if (size == byte_count)
1112                         dma_flags |= R600_CP_DMA_SYNC;
1113
1114                 /* Emit the clear packet. */
1115                 si_emit_cp_dma_clear_buffer(sctx, va, byte_count, value, dma_flags);
1116
1117                 size -= byte_count;
1118                 va += byte_count;
1119         }
1120
1121         /* Flush the caches again in case the 3D engine has been prefetching
1122          * the resource. */
1123         /* XXX only flush the caches where the buffer is bound. */
1124         sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
1125                          R600_CONTEXT_INV_CONST_CACHE |
1126                          R600_CONTEXT_FLUSH_AND_INV_CB |
1127                          R600_CONTEXT_FLUSH_AND_INV_DB |
1128                          R600_CONTEXT_FLUSH_AND_INV_CB_META |
1129                          R600_CONTEXT_FLUSH_AND_INV_DB_META;
1130 }
1131
1132 void si_copy_buffer(struct si_context *sctx,
1133                     struct pipe_resource *dst, struct pipe_resource *src,
1134                     uint64_t dst_offset, uint64_t src_offset, unsigned size)
1135 {
1136         if (!size)
1137                 return;
1138
1139         /* Mark the buffer range of destination as valid (initialized),
1140          * so that transfer_map knows it should wait for the GPU when mapping
1141          * that range. */
1142         util_range_add(&r600_resource(dst)->valid_buffer_range, dst_offset,
1143                        dst_offset + size);
1144
1145         dst_offset += r600_resource(dst)->gpu_address;
1146         src_offset += r600_resource(src)->gpu_address;
1147
1148         /* Flush the caches where the resource is bound. */
1149         sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
1150                          R600_CONTEXT_INV_CONST_CACHE |
1151                          R600_CONTEXT_FLUSH_AND_INV_CB |
1152                          R600_CONTEXT_FLUSH_AND_INV_DB |
1153                          R600_CONTEXT_FLUSH_AND_INV_CB_META |
1154                          R600_CONTEXT_FLUSH_AND_INV_DB_META |
1155                          R600_CONTEXT_WAIT_3D_IDLE;
1156
1157         while (size) {
1158                 unsigned sync_flags = 0;
1159                 unsigned byte_count = MIN2(size, CP_DMA_MAX_BYTE_COUNT);
1160
1161                 si_need_cs_space(sctx, 7 + (sctx->b.flags ? sctx->cache_flush.num_dw : 0), FALSE);
1162
1163                 /* Flush the caches for the first copy only. Also wait for old CP DMA packets to complete. */
1164                 if (sctx->b.flags) {
1165                         si_emit_cache_flush(&sctx->b, NULL);
1166                         sync_flags |= SI_CP_DMA_RAW_WAIT;
1167                 }
1168
1169                 /* Do the synchronization after the last copy, so that all data is written to memory. */
1170                 if (size == byte_count) {
1171                         sync_flags |= R600_CP_DMA_SYNC;
1172                 }
1173
1174                 /* This must be done after r600_need_cs_space. */
1175                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)src,
1176                                       RADEON_USAGE_READ, RADEON_PRIO_MIN);
1177                 r600_context_bo_reloc(&sctx->b, &sctx->b.rings.gfx, (struct r600_resource*)dst,
1178                                       RADEON_USAGE_WRITE, RADEON_PRIO_MIN);
1179
1180                 si_emit_cp_dma_copy_buffer(sctx, dst_offset, src_offset, byte_count, sync_flags);
1181
1182                 size -= byte_count;
1183                 src_offset += byte_count;
1184                 dst_offset += byte_count;
1185         }
1186
1187         sctx->b.flags |= R600_CONTEXT_INV_TEX_CACHE |
1188                          R600_CONTEXT_INV_CONST_CACHE |
1189                          R600_CONTEXT_FLUSH_AND_INV_CB |
1190                          R600_CONTEXT_FLUSH_AND_INV_DB |
1191                          R600_CONTEXT_FLUSH_AND_INV_CB_META |
1192                          R600_CONTEXT_FLUSH_AND_INV_DB_META;
1193 }
1194
1195 /* INIT/DEINIT */
1196
1197 void si_init_all_descriptors(struct si_context *sctx)
1198 {
1199         int i;
1200
1201         for (i = 0; i < SI_NUM_SHADERS; i++) {
1202                 si_init_buffer_resources(sctx, &sctx->const_buffers[i],
1203                                          SI_NUM_CONST_BUFFERS, i, SI_SGPR_CONST,
1204                                          RADEON_USAGE_READ, RADEON_PRIO_SHADER_BUFFER_RO);
1205                 si_init_buffer_resources(sctx, &sctx->rw_buffers[i],
1206                                          i == PIPE_SHADER_VERTEX ?
1207                                          SI_NUM_RW_BUFFERS : SI_NUM_RING_BUFFERS,
1208                                          i, SI_SGPR_RW_BUFFERS,
1209                                          RADEON_USAGE_READWRITE, RADEON_PRIO_SHADER_RESOURCE_RW);
1210
1211                 si_init_sampler_views(sctx, &sctx->samplers[i].views, i);
1212
1213                 si_init_descriptors(sctx, &sctx->samplers[i].states.desc,
1214                                     si_get_shader_user_data_base(i) + SI_SGPR_SAMPLER * 4,
1215                                     4, SI_NUM_SAMPLER_STATES, si_emit_sampler_states);
1216
1217                 sctx->atoms.s.const_buffers[i] = &sctx->const_buffers[i].desc.atom;
1218                 sctx->atoms.s.rw_buffers[i] = &sctx->rw_buffers[i].desc.atom;
1219                 sctx->atoms.s.sampler_views[i] = &sctx->samplers[i].views.desc.atom;
1220                 sctx->atoms.s.sampler_states[i] = &sctx->samplers[i].states.desc.atom;
1221         }
1222
1223         si_init_descriptors(sctx, &sctx->vertex_buffers,
1224                             si_get_shader_user_data_base(PIPE_SHADER_VERTEX) +
1225                             SI_SGPR_VERTEX_BUFFER*4, 4, SI_NUM_VERTEX_BUFFERS,
1226                             si_emit_shader_pointer);
1227         sctx->atoms.s.vertex_buffers = &sctx->vertex_buffers.atom;
1228
1229         /* Set pipe_context functions. */
1230         sctx->b.b.set_constant_buffer = si_set_constant_buffer;
1231         sctx->b.b.set_sampler_views = si_set_sampler_views;
1232         sctx->b.b.set_stream_output_targets = si_set_streamout_targets;
1233         sctx->b.clear_buffer = si_clear_buffer;
1234         sctx->b.invalidate_buffer = si_invalidate_buffer;
1235 }
1236
1237 void si_release_all_descriptors(struct si_context *sctx)
1238 {
1239         int i;
1240
1241         for (i = 0; i < SI_NUM_SHADERS; i++) {
1242                 si_release_buffer_resources(&sctx->const_buffers[i]);
1243                 si_release_buffer_resources(&sctx->rw_buffers[i]);
1244                 si_release_sampler_views(&sctx->samplers[i].views);
1245                 si_release_descriptors(&sctx->samplers[i].states.desc);
1246         }
1247         si_release_descriptors(&sctx->vertex_buffers);
1248 }
1249
1250 void si_all_descriptors_begin_new_cs(struct si_context *sctx)
1251 {
1252         int i;
1253
1254         for (i = 0; i < SI_NUM_SHADERS; i++) {
1255                 si_buffer_resources_begin_new_cs(sctx, &sctx->const_buffers[i]);
1256                 si_buffer_resources_begin_new_cs(sctx, &sctx->rw_buffers[i]);
1257                 si_sampler_views_begin_new_cs(sctx, &sctx->samplers[i].views);
1258                 si_sampler_states_begin_new_cs(sctx, &sctx->samplers[i].states);
1259         }
1260         si_vertex_buffers_begin_new_cs(sctx);
1261 }