OSDN Git Service

radeonsi: rename si_gfx_* functions to si_cp_*
[android-x86/external-mesa.git] / src / gallium / drivers / radeonsi / si_state_draw.c
1 /*
2  * Copyright 2012 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24
25 #include "si_build_pm4.h"
26 #include "gfx9d.h"
27
28 #include "util/u_index_modify.h"
29 #include "util/u_log.h"
30 #include "util/u_upload_mgr.h"
31 #include "util/u_prim.h"
32
33 #include "ac_debug.h"
34
35 /* special primitive types */
36 #define SI_PRIM_RECTANGLE_LIST  PIPE_PRIM_MAX
37
38 static unsigned si_conv_pipe_prim(unsigned mode)
39 {
40         static const unsigned prim_conv[] = {
41                 [PIPE_PRIM_POINTS]                      = V_008958_DI_PT_POINTLIST,
42                 [PIPE_PRIM_LINES]                       = V_008958_DI_PT_LINELIST,
43                 [PIPE_PRIM_LINE_LOOP]                   = V_008958_DI_PT_LINELOOP,
44                 [PIPE_PRIM_LINE_STRIP]                  = V_008958_DI_PT_LINESTRIP,
45                 [PIPE_PRIM_TRIANGLES]                   = V_008958_DI_PT_TRILIST,
46                 [PIPE_PRIM_TRIANGLE_STRIP]              = V_008958_DI_PT_TRISTRIP,
47                 [PIPE_PRIM_TRIANGLE_FAN]                = V_008958_DI_PT_TRIFAN,
48                 [PIPE_PRIM_QUADS]                       = V_008958_DI_PT_QUADLIST,
49                 [PIPE_PRIM_QUAD_STRIP]                  = V_008958_DI_PT_QUADSTRIP,
50                 [PIPE_PRIM_POLYGON]                     = V_008958_DI_PT_POLYGON,
51                 [PIPE_PRIM_LINES_ADJACENCY]             = V_008958_DI_PT_LINELIST_ADJ,
52                 [PIPE_PRIM_LINE_STRIP_ADJACENCY]        = V_008958_DI_PT_LINESTRIP_ADJ,
53                 [PIPE_PRIM_TRIANGLES_ADJACENCY]         = V_008958_DI_PT_TRILIST_ADJ,
54                 [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY]    = V_008958_DI_PT_TRISTRIP_ADJ,
55                 [PIPE_PRIM_PATCHES]                     = V_008958_DI_PT_PATCH,
56                 [SI_PRIM_RECTANGLE_LIST]                = V_008958_DI_PT_RECTLIST
57         };
58         assert(mode < ARRAY_SIZE(prim_conv));
59         return prim_conv[mode];
60 }
61
62 /**
63  * This calculates the LDS size for tessellation shaders (VS, TCS, TES).
64  * LS.LDS_SIZE is shared by all 3 shader stages.
65  *
66  * The information about LDS and other non-compile-time parameters is then
67  * written to userdata SGPRs.
68  */
69 static bool si_emit_derived_tess_state(struct si_context *sctx,
70                                        const struct pipe_draw_info *info,
71                                        unsigned *num_patches)
72 {
73         struct radeon_cmdbuf *cs = sctx->gfx_cs;
74         struct si_shader *ls_current;
75         struct si_shader_selector *ls;
76         /* The TES pointer will only be used for sctx->last_tcs.
77          * It would be wrong to think that TCS = TES. */
78         struct si_shader_selector *tcs =
79                 sctx->tcs_shader.cso ? sctx->tcs_shader.cso : sctx->tes_shader.cso;
80         unsigned tess_uses_primid = sctx->ia_multi_vgt_param_key.u.tess_uses_prim_id;
81         bool has_primid_instancing_bug = sctx->chip_class == SI &&
82                                          sctx->screen->info.max_se == 1;
83         unsigned tes_sh_base = sctx->shader_pointers.sh_base[PIPE_SHADER_TESS_EVAL];
84         unsigned num_tcs_input_cp = info->vertices_per_patch;
85         unsigned num_tcs_output_cp, num_tcs_inputs, num_tcs_outputs;
86         unsigned num_tcs_patch_outputs;
87         unsigned input_vertex_size, output_vertex_size, pervertex_output_patch_size;
88         unsigned input_patch_size, output_patch_size, output_patch0_offset;
89         unsigned perpatch_output_offset, lds_size;
90         unsigned tcs_in_layout, tcs_out_layout, tcs_out_offsets;
91         unsigned offchip_layout, hardware_lds_size, ls_hs_config;
92
93         /* Since GFX9 has merged LS-HS in the TCS state, set LS = TCS. */
94         if (sctx->chip_class >= GFX9) {
95                 if (sctx->tcs_shader.cso)
96                         ls_current = sctx->tcs_shader.current;
97                 else
98                         ls_current = sctx->fixed_func_tcs_shader.current;
99
100                 ls = ls_current->key.part.tcs.ls;
101         } else {
102                 ls_current = sctx->vs_shader.current;
103                 ls = sctx->vs_shader.cso;
104         }
105
106         if (sctx->last_ls == ls_current &&
107             sctx->last_tcs == tcs &&
108             sctx->last_tes_sh_base == tes_sh_base &&
109             sctx->last_num_tcs_input_cp == num_tcs_input_cp &&
110             (!has_primid_instancing_bug ||
111              (sctx->last_tess_uses_primid == tess_uses_primid))) {
112                 *num_patches = sctx->last_num_patches;
113                 return false;
114         }
115
116         sctx->last_ls = ls_current;
117         sctx->last_tcs = tcs;
118         sctx->last_tes_sh_base = tes_sh_base;
119         sctx->last_num_tcs_input_cp = num_tcs_input_cp;
120         sctx->last_tess_uses_primid = tess_uses_primid;
121
122         /* This calculates how shader inputs and outputs among VS, TCS, and TES
123          * are laid out in LDS. */
124         num_tcs_inputs = util_last_bit64(ls->outputs_written);
125
126         if (sctx->tcs_shader.cso) {
127                 num_tcs_outputs = util_last_bit64(tcs->outputs_written);
128                 num_tcs_output_cp = tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
129                 num_tcs_patch_outputs = util_last_bit64(tcs->patch_outputs_written);
130         } else {
131                 /* No TCS. Route varyings from LS to TES. */
132                 num_tcs_outputs = num_tcs_inputs;
133                 num_tcs_output_cp = num_tcs_input_cp;
134                 num_tcs_patch_outputs = 2; /* TESSINNER + TESSOUTER */
135         }
136
137         input_vertex_size = ls->lshs_vertex_stride;
138         output_vertex_size = num_tcs_outputs * 16;
139
140         input_patch_size = num_tcs_input_cp * input_vertex_size;
141
142         pervertex_output_patch_size = num_tcs_output_cp * output_vertex_size;
143         output_patch_size = pervertex_output_patch_size + num_tcs_patch_outputs * 16;
144
145         /* Ensure that we only need one wave per SIMD so we don't need to check
146          * resource usage. Also ensures that the number of tcs in and out
147          * vertices per threadgroup are at most 256.
148          */
149         unsigned max_verts_per_patch = MAX2(num_tcs_input_cp, num_tcs_output_cp);
150         *num_patches = 256 / max_verts_per_patch;
151
152         /* Make sure that the data fits in LDS. This assumes the shaders only
153          * use LDS for the inputs and outputs.
154          *
155          * While CIK can use 64K per threadgroup, there is a hang on Stoney
156          * with 2 CUs if we use more than 32K. The closed Vulkan driver also
157          * uses 32K at most on all GCN chips.
158          */
159         hardware_lds_size = 32768;
160         *num_patches = MIN2(*num_patches, hardware_lds_size / (input_patch_size +
161                                                                output_patch_size));
162
163         /* Make sure the output data fits in the offchip buffer */
164         *num_patches = MIN2(*num_patches,
165                             (sctx->screen->tess_offchip_block_dw_size * 4) /
166                             output_patch_size);
167
168         /* Not necessary for correctness, but improves performance.
169          * The hardware can do more, but the radeonsi shader constant is
170          * limited to 6 bits.
171          */
172         *num_patches = MIN2(*num_patches, 63); /* triangles: 3 full waves except 3 lanes */
173
174         /* When distributed tessellation is unsupported, switch between SEs
175          * at a higher frequency to compensate for it.
176          */
177         if (!sctx->screen->has_distributed_tess && sctx->screen->info.max_se > 1)
178                 *num_patches = MIN2(*num_patches, 16); /* recommended */
179
180         /* Make sure that vector lanes are reasonably occupied. It probably
181          * doesn't matter much because this is LS-HS, and TES is likely to
182          * occupy significantly more CUs.
183          */
184         unsigned temp_verts_per_tg = *num_patches * max_verts_per_patch;
185         if (temp_verts_per_tg > 64 && temp_verts_per_tg % 64 < 48)
186                 *num_patches = (temp_verts_per_tg & ~63) / max_verts_per_patch;
187
188         if (sctx->chip_class == SI) {
189                 /* SI bug workaround, related to power management. Limit LS-HS
190                  * threadgroups to only one wave.
191                  */
192                 unsigned one_wave = 64 / max_verts_per_patch;
193                 *num_patches = MIN2(*num_patches, one_wave);
194         }
195
196         /* The VGT HS block increments the patch ID unconditionally
197          * within a single threadgroup. This results in incorrect
198          * patch IDs when instanced draws are used.
199          *
200          * The intended solution is to restrict threadgroups to
201          * a single instance by setting SWITCH_ON_EOI, which
202          * should cause IA to split instances up. However, this
203          * doesn't work correctly on SI when there is no other
204          * SE to switch to.
205          */
206         if (has_primid_instancing_bug && tess_uses_primid)
207                 *num_patches = 1;
208
209         sctx->last_num_patches = *num_patches;
210
211         output_patch0_offset = input_patch_size * *num_patches;
212         perpatch_output_offset = output_patch0_offset + pervertex_output_patch_size;
213
214         /* Compute userdata SGPRs. */
215         assert(((input_vertex_size / 4) & ~0xff) == 0);
216         assert(((output_vertex_size / 4) & ~0xff) == 0);
217         assert(((input_patch_size / 4) & ~0x1fff) == 0);
218         assert(((output_patch_size / 4) & ~0x1fff) == 0);
219         assert(((output_patch0_offset / 16) & ~0xffff) == 0);
220         assert(((perpatch_output_offset / 16) & ~0xffff) == 0);
221         assert(num_tcs_input_cp <= 32);
222         assert(num_tcs_output_cp <= 32);
223
224         uint64_t ring_va = r600_resource(sctx->tess_rings)->gpu_address;
225         assert((ring_va & u_bit_consecutive(0, 19)) == 0);
226
227         tcs_in_layout = S_VS_STATE_LS_OUT_PATCH_SIZE(input_patch_size / 4) |
228                         S_VS_STATE_LS_OUT_VERTEX_SIZE(input_vertex_size / 4);
229         tcs_out_layout = (output_patch_size / 4) |
230                          (num_tcs_input_cp << 13) |
231                          ring_va;
232         tcs_out_offsets = (output_patch0_offset / 16) |
233                           ((perpatch_output_offset / 16) << 16);
234         offchip_layout = *num_patches |
235                          (num_tcs_output_cp << 6) |
236                          (pervertex_output_patch_size * *num_patches << 12);
237
238         /* Compute the LDS size. */
239         lds_size = output_patch0_offset + output_patch_size * *num_patches;
240
241         if (sctx->chip_class >= CIK) {
242                 assert(lds_size <= 65536);
243                 lds_size = align(lds_size, 512) / 512;
244         } else {
245                 assert(lds_size <= 32768);
246                 lds_size = align(lds_size, 256) / 256;
247         }
248
249         /* Set SI_SGPR_VS_STATE_BITS. */
250         sctx->current_vs_state &= C_VS_STATE_LS_OUT_PATCH_SIZE &
251                                   C_VS_STATE_LS_OUT_VERTEX_SIZE;
252         sctx->current_vs_state |= tcs_in_layout;
253
254         if (sctx->chip_class >= GFX9) {
255                 unsigned hs_rsrc2 = ls_current->config.rsrc2 |
256                                     S_00B42C_LDS_SIZE(lds_size);
257
258                 radeon_set_sh_reg(cs, R_00B42C_SPI_SHADER_PGM_RSRC2_HS, hs_rsrc2);
259
260                 /* Set userdata SGPRs for merged LS-HS. */
261                 radeon_set_sh_reg_seq(cs,
262                                       R_00B430_SPI_SHADER_USER_DATA_LS_0 +
263                                       GFX9_SGPR_TCS_OFFCHIP_LAYOUT * 4, 3);
264                 radeon_emit(cs, offchip_layout);
265                 radeon_emit(cs, tcs_out_offsets);
266                 radeon_emit(cs, tcs_out_layout);
267         } else {
268                 unsigned ls_rsrc2 = ls_current->config.rsrc2;
269
270                 si_multiwave_lds_size_workaround(sctx->screen, &lds_size);
271                 ls_rsrc2 |= S_00B52C_LDS_SIZE(lds_size);
272
273                 /* Due to a hw bug, RSRC2_LS must be written twice with another
274                  * LS register written in between. */
275                 if (sctx->chip_class == CIK && sctx->family != CHIP_HAWAII)
276                         radeon_set_sh_reg(cs, R_00B52C_SPI_SHADER_PGM_RSRC2_LS, ls_rsrc2);
277                 radeon_set_sh_reg_seq(cs, R_00B528_SPI_SHADER_PGM_RSRC1_LS, 2);
278                 radeon_emit(cs, ls_current->config.rsrc1);
279                 radeon_emit(cs, ls_rsrc2);
280
281                 /* Set userdata SGPRs for TCS. */
282                 radeon_set_sh_reg_seq(cs,
283                         R_00B430_SPI_SHADER_USER_DATA_HS_0 + GFX6_SGPR_TCS_OFFCHIP_LAYOUT * 4, 4);
284                 radeon_emit(cs, offchip_layout);
285                 radeon_emit(cs, tcs_out_offsets);
286                 radeon_emit(cs, tcs_out_layout);
287                 radeon_emit(cs, tcs_in_layout);
288         }
289
290         /* Set userdata SGPRs for TES. */
291         radeon_set_sh_reg_seq(cs, tes_sh_base + SI_SGPR_TES_OFFCHIP_LAYOUT * 4, 2);
292         radeon_emit(cs, offchip_layout);
293         radeon_emit(cs, ring_va);
294
295         ls_hs_config = S_028B58_NUM_PATCHES(*num_patches) |
296                        S_028B58_HS_NUM_INPUT_CP(num_tcs_input_cp) |
297                        S_028B58_HS_NUM_OUTPUT_CP(num_tcs_output_cp);
298
299         if (sctx->last_ls_hs_config != ls_hs_config) {
300                 if (sctx->chip_class >= CIK) {
301                         radeon_set_context_reg_idx(cs, R_028B58_VGT_LS_HS_CONFIG, 2,
302                                                    ls_hs_config);
303                 } else {
304                         radeon_set_context_reg(cs, R_028B58_VGT_LS_HS_CONFIG,
305                                                ls_hs_config);
306                 }
307                 sctx->last_ls_hs_config = ls_hs_config;
308                 return true; /* true if the context rolls */
309         }
310         return false;
311 }
312
313 static unsigned si_num_prims_for_vertices(const struct pipe_draw_info *info)
314 {
315         switch (info->mode) {
316         case PIPE_PRIM_PATCHES:
317                 return info->count / info->vertices_per_patch;
318         case SI_PRIM_RECTANGLE_LIST:
319                 return info->count / 3;
320         default:
321                 return u_prims_for_vertices(info->mode, info->count);
322         }
323 }
324
325 static unsigned
326 si_get_init_multi_vgt_param(struct si_screen *sscreen,
327                             union si_vgt_param_key *key)
328 {
329         STATIC_ASSERT(sizeof(union si_vgt_param_key) == 4);
330         unsigned max_primgroup_in_wave = 2;
331
332         /* SWITCH_ON_EOP(0) is always preferable. */
333         bool wd_switch_on_eop = false;
334         bool ia_switch_on_eop = false;
335         bool ia_switch_on_eoi = false;
336         bool partial_vs_wave = false;
337         bool partial_es_wave = false;
338
339         if (key->u.uses_tess) {
340                 /* SWITCH_ON_EOI must be set if PrimID is used. */
341                 if (key->u.tess_uses_prim_id)
342                         ia_switch_on_eoi = true;
343
344                 /* Bug with tessellation and GS on Bonaire and older 2 SE chips. */
345                 if ((sscreen->info.family == CHIP_TAHITI ||
346                      sscreen->info.family == CHIP_PITCAIRN ||
347                      sscreen->info.family == CHIP_BONAIRE) &&
348                     key->u.uses_gs)
349                         partial_vs_wave = true;
350
351                 /* Needed for 028B6C_DISTRIBUTION_MODE != 0 */
352                 if (sscreen->has_distributed_tess) {
353                         if (key->u.uses_gs) {
354                                 if (sscreen->info.chip_class <= VI)
355                                         partial_es_wave = true;
356
357                                 /* GPU hang workaround. */
358                                 if (sscreen->info.family == CHIP_TONGA ||
359                                     sscreen->info.family == CHIP_FIJI ||
360                                     sscreen->info.family == CHIP_POLARIS10 ||
361                                     sscreen->info.family == CHIP_POLARIS11 ||
362                                     sscreen->info.family == CHIP_POLARIS12 ||
363                                     sscreen->info.family == CHIP_VEGAM)
364                                         partial_vs_wave = true;
365                         } else {
366                                 partial_vs_wave = true;
367                         }
368                 }
369         }
370
371         /* This is a hardware requirement. */
372         if (key->u.line_stipple_enabled ||
373             (sscreen->debug_flags & DBG(SWITCH_ON_EOP))) {
374                 ia_switch_on_eop = true;
375                 wd_switch_on_eop = true;
376         }
377
378         if (sscreen->info.chip_class >= CIK) {
379                 /* WD_SWITCH_ON_EOP has no effect on GPUs with less than
380                  * 4 shader engines. Set 1 to pass the assertion below.
381                  * The other cases are hardware requirements.
382                  *
383                  * Polaris supports primitive restart with WD_SWITCH_ON_EOP=0
384                  * for points, line strips, and tri strips.
385                  */
386                 if (sscreen->info.max_se < 4 ||
387                     key->u.prim == PIPE_PRIM_POLYGON ||
388                     key->u.prim == PIPE_PRIM_LINE_LOOP ||
389                     key->u.prim == PIPE_PRIM_TRIANGLE_FAN ||
390                     key->u.prim == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY ||
391                     (key->u.primitive_restart &&
392                      (sscreen->info.family < CHIP_POLARIS10 ||
393                       (key->u.prim != PIPE_PRIM_POINTS &&
394                        key->u.prim != PIPE_PRIM_LINE_STRIP &&
395                        key->u.prim != PIPE_PRIM_TRIANGLE_STRIP))) ||
396                     key->u.count_from_stream_output)
397                         wd_switch_on_eop = true;
398
399                 /* Hawaii hangs if instancing is enabled and WD_SWITCH_ON_EOP is 0.
400                  * We don't know that for indirect drawing, so treat it as
401                  * always problematic. */
402                 if (sscreen->info.family == CHIP_HAWAII &&
403                     key->u.uses_instancing)
404                         wd_switch_on_eop = true;
405
406                 /* Performance recommendation for 4 SE Gfx7-8 parts if
407                  * instances are smaller than a primgroup.
408                  * Assume indirect draws always use small instances.
409                  * This is needed for good VS wave utilization.
410                  */
411                 if (sscreen->info.chip_class <= VI &&
412                     sscreen->info.max_se == 4 &&
413                     key->u.multi_instances_smaller_than_primgroup)
414                         wd_switch_on_eop = true;
415
416                 /* Required on CIK and later. */
417                 if (sscreen->info.max_se > 2 && !wd_switch_on_eop)
418                         ia_switch_on_eoi = true;
419
420                 /* Required by Hawaii and, for some special cases, by VI. */
421                 if (ia_switch_on_eoi &&
422                     (sscreen->info.family == CHIP_HAWAII ||
423                      (sscreen->info.chip_class == VI &&
424                       (key->u.uses_gs || max_primgroup_in_wave != 2))))
425                         partial_vs_wave = true;
426
427                 /* Instancing bug on Bonaire. */
428                 if (sscreen->info.family == CHIP_BONAIRE && ia_switch_on_eoi &&
429                     key->u.uses_instancing)
430                         partial_vs_wave = true;
431
432                 /* If the WD switch is false, the IA switch must be false too. */
433                 assert(wd_switch_on_eop || !ia_switch_on_eop);
434         }
435
436         /* If SWITCH_ON_EOI is set, PARTIAL_ES_WAVE must be set too. */
437         if (sscreen->info.chip_class <= VI && ia_switch_on_eoi)
438                 partial_es_wave = true;
439
440         return S_028AA8_SWITCH_ON_EOP(ia_switch_on_eop) |
441                 S_028AA8_SWITCH_ON_EOI(ia_switch_on_eoi) |
442                 S_028AA8_PARTIAL_VS_WAVE_ON(partial_vs_wave) |
443                 S_028AA8_PARTIAL_ES_WAVE_ON(partial_es_wave) |
444                 S_028AA8_WD_SWITCH_ON_EOP(sscreen->info.chip_class >= CIK ? wd_switch_on_eop : 0) |
445                 /* The following field was moved to VGT_SHADER_STAGES_EN in GFX9. */
446                 S_028AA8_MAX_PRIMGRP_IN_WAVE(sscreen->info.chip_class == VI ?
447                                              max_primgroup_in_wave : 0) |
448                 S_030960_EN_INST_OPT_BASIC(sscreen->info.chip_class >= GFX9) |
449                 S_030960_EN_INST_OPT_ADV(sscreen->info.chip_class >= GFX9);
450 }
451
452 void si_init_ia_multi_vgt_param_table(struct si_context *sctx)
453 {
454         for (int prim = 0; prim <= SI_PRIM_RECTANGLE_LIST; prim++)
455         for (int uses_instancing = 0; uses_instancing < 2; uses_instancing++)
456         for (int multi_instances = 0; multi_instances < 2; multi_instances++)
457         for (int primitive_restart = 0; primitive_restart < 2; primitive_restart++)
458         for (int count_from_so = 0; count_from_so < 2; count_from_so++)
459         for (int line_stipple = 0; line_stipple < 2; line_stipple++)
460         for (int uses_tess = 0; uses_tess < 2; uses_tess++)
461         for (int tess_uses_primid = 0; tess_uses_primid < 2; tess_uses_primid++)
462         for (int uses_gs = 0; uses_gs < 2; uses_gs++) {
463                 union si_vgt_param_key key;
464
465                 key.index = 0;
466                 key.u.prim = prim;
467                 key.u.uses_instancing = uses_instancing;
468                 key.u.multi_instances_smaller_than_primgroup = multi_instances;
469                 key.u.primitive_restart = primitive_restart;
470                 key.u.count_from_stream_output = count_from_so;
471                 key.u.line_stipple_enabled = line_stipple;
472                 key.u.uses_tess = uses_tess;
473                 key.u.tess_uses_prim_id = tess_uses_primid;
474                 key.u.uses_gs = uses_gs;
475
476                 sctx->ia_multi_vgt_param[key.index] =
477                         si_get_init_multi_vgt_param(sctx->screen, &key);
478         }
479 }
480
481 static unsigned si_get_ia_multi_vgt_param(struct si_context *sctx,
482                                           const struct pipe_draw_info *info,
483                                           unsigned num_patches)
484 {
485         union si_vgt_param_key key = sctx->ia_multi_vgt_param_key;
486         unsigned primgroup_size;
487         unsigned ia_multi_vgt_param;
488
489         if (sctx->tes_shader.cso) {
490                 primgroup_size = num_patches; /* must be a multiple of NUM_PATCHES */
491         } else if (sctx->gs_shader.cso) {
492                 primgroup_size = 64; /* recommended with a GS */
493         } else {
494                 primgroup_size = 128; /* recommended without a GS and tess */
495         }
496
497         key.u.prim = info->mode;
498         key.u.uses_instancing = info->indirect || info->instance_count > 1;
499         key.u.multi_instances_smaller_than_primgroup =
500                 info->indirect ||
501                 (info->instance_count > 1 &&
502                  (info->count_from_stream_output ||
503                   si_num_prims_for_vertices(info) < primgroup_size));
504         key.u.primitive_restart = info->primitive_restart;
505         key.u.count_from_stream_output = info->count_from_stream_output != NULL;
506
507         ia_multi_vgt_param = sctx->ia_multi_vgt_param[key.index] |
508                              S_028AA8_PRIMGROUP_SIZE(primgroup_size - 1);
509
510         if (sctx->gs_shader.cso) {
511                 /* GS requirement. */
512                 if (sctx->chip_class <= VI &&
513                     SI_GS_PER_ES / primgroup_size >= sctx->screen->gs_table_depth - 3)
514                         ia_multi_vgt_param |= S_028AA8_PARTIAL_ES_WAVE_ON(1);
515
516                 /* GS hw bug with single-primitive instances and SWITCH_ON_EOI.
517                  * The hw doc says all multi-SE chips are affected, but Vulkan
518                  * only applies it to Hawaii. Do what Vulkan does.
519                  */
520                 if (sctx->family == CHIP_HAWAII &&
521                     G_028AA8_SWITCH_ON_EOI(ia_multi_vgt_param) &&
522                     (info->indirect ||
523                      (info->instance_count > 1 &&
524                       (info->count_from_stream_output ||
525                        si_num_prims_for_vertices(info) <= 1))))
526                         sctx->flags |= SI_CONTEXT_VGT_FLUSH;
527         }
528
529         return ia_multi_vgt_param;
530 }
531
532 /* rast_prim is the primitive type after GS. */
533 static bool si_emit_rasterizer_prim_state(struct si_context *sctx)
534 {
535         struct radeon_cmdbuf *cs = sctx->gfx_cs;
536         enum pipe_prim_type rast_prim = sctx->current_rast_prim;
537         struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
538
539         /* Skip this if not rendering lines. */
540         if (!util_prim_is_lines(rast_prim))
541                 return false;
542
543         if (rast_prim == sctx->last_rast_prim &&
544             rs->pa_sc_line_stipple == sctx->last_sc_line_stipple)
545                 return false;
546
547         /* For lines, reset the stipple pattern at each primitive. Otherwise,
548          * reset the stipple pattern at each packet (line strips, line loops).
549          */
550         radeon_set_context_reg(cs, R_028A0C_PA_SC_LINE_STIPPLE,
551                 rs->pa_sc_line_stipple |
552                 S_028A0C_AUTO_RESET_CNTL(rast_prim == PIPE_PRIM_LINES ? 1 : 2));
553
554         sctx->last_rast_prim = rast_prim;
555         sctx->last_sc_line_stipple = rs->pa_sc_line_stipple;
556         return true; /* true if the context rolls */
557 }
558
559 static void si_emit_vs_state(struct si_context *sctx,
560                              const struct pipe_draw_info *info)
561 {
562         sctx->current_vs_state &= C_VS_STATE_INDEXED;
563         sctx->current_vs_state |= S_VS_STATE_INDEXED(!!info->index_size);
564
565         if (sctx->num_vs_blit_sgprs) {
566                 /* Re-emit the state after we leave u_blitter. */
567                 sctx->last_vs_state = ~0;
568                 return;
569         }
570
571         if (sctx->current_vs_state != sctx->last_vs_state) {
572                 struct radeon_cmdbuf *cs = sctx->gfx_cs;
573
574                 /* For the API vertex shader (VS_STATE_INDEXED). */
575                 radeon_set_sh_reg(cs,
576                         sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX] +
577                         SI_SGPR_VS_STATE_BITS * 4,
578                         sctx->current_vs_state);
579
580                 /* For vertex color clamping, which is done in the last stage
581                  * before the rasterizer. */
582                 if (sctx->gs_shader.cso || sctx->tes_shader.cso) {
583                         /* GS copy shader or TES if GS is missing. */
584                         radeon_set_sh_reg(cs,
585                                 R_00B130_SPI_SHADER_USER_DATA_VS_0 +
586                                 SI_SGPR_VS_STATE_BITS * 4,
587                                 sctx->current_vs_state);
588                 }
589
590                 sctx->last_vs_state = sctx->current_vs_state;
591         }
592 }
593
594 static inline bool si_prim_restart_index_changed(struct si_context *sctx,
595                                                  const struct pipe_draw_info *info)
596 {
597         return info->primitive_restart &&
598                (info->restart_index != sctx->last_restart_index ||
599                 sctx->last_restart_index == SI_RESTART_INDEX_UNKNOWN);
600 }
601
602 static void si_emit_draw_registers(struct si_context *sctx,
603                                    const struct pipe_draw_info *info,
604                                    unsigned num_patches)
605 {
606         struct radeon_cmdbuf *cs = sctx->gfx_cs;
607         unsigned prim = si_conv_pipe_prim(info->mode);
608         unsigned ia_multi_vgt_param;
609
610         ia_multi_vgt_param = si_get_ia_multi_vgt_param(sctx, info, num_patches);
611
612         /* Draw state. */
613         if (ia_multi_vgt_param != sctx->last_multi_vgt_param) {
614                 if (sctx->chip_class >= GFX9)
615                         radeon_set_uconfig_reg_idx(cs, R_030960_IA_MULTI_VGT_PARAM, 4, ia_multi_vgt_param);
616                 else if (sctx->chip_class >= CIK)
617                         radeon_set_context_reg_idx(cs, R_028AA8_IA_MULTI_VGT_PARAM, 1, ia_multi_vgt_param);
618                 else
619                         radeon_set_context_reg(cs, R_028AA8_IA_MULTI_VGT_PARAM, ia_multi_vgt_param);
620
621                 sctx->last_multi_vgt_param = ia_multi_vgt_param;
622         }
623         if (prim != sctx->last_prim) {
624                 if (sctx->chip_class >= CIK)
625                         radeon_set_uconfig_reg_idx(cs, R_030908_VGT_PRIMITIVE_TYPE, 1, prim);
626                 else
627                         radeon_set_config_reg(cs, R_008958_VGT_PRIMITIVE_TYPE, prim);
628
629                 sctx->last_prim = prim;
630         }
631
632         /* Primitive restart. */
633         if (info->primitive_restart != sctx->last_primitive_restart_en) {
634                 if (sctx->chip_class >= GFX9)
635                         radeon_set_uconfig_reg(cs, R_03092C_VGT_MULTI_PRIM_IB_RESET_EN,
636                                                info->primitive_restart);
637                 else
638                         radeon_set_context_reg(cs, R_028A94_VGT_MULTI_PRIM_IB_RESET_EN,
639                                                info->primitive_restart);
640
641                 sctx->last_primitive_restart_en = info->primitive_restart;
642
643         }
644         if (si_prim_restart_index_changed(sctx, info)) {
645                 radeon_set_context_reg(cs, R_02840C_VGT_MULTI_PRIM_IB_RESET_INDX,
646                                        info->restart_index);
647                 sctx->last_restart_index = info->restart_index;
648         }
649 }
650
651 static void si_emit_draw_packets(struct si_context *sctx,
652                                  const struct pipe_draw_info *info,
653                                  struct pipe_resource *indexbuf,
654                                  unsigned index_size,
655                                  unsigned index_offset)
656 {
657         struct pipe_draw_indirect_info *indirect = info->indirect;
658         struct radeon_cmdbuf *cs = sctx->gfx_cs;
659         unsigned sh_base_reg = sctx->shader_pointers.sh_base[PIPE_SHADER_VERTEX];
660         bool render_cond_bit = sctx->render_cond && !sctx->render_cond_force_off;
661         uint32_t index_max_size = 0;
662         uint64_t index_va = 0;
663
664         if (info->count_from_stream_output) {
665                 struct si_streamout_target *t =
666                         (struct si_streamout_target*)info->count_from_stream_output;
667                 uint64_t va = t->buf_filled_size->gpu_address +
668                               t->buf_filled_size_offset;
669
670                 radeon_set_context_reg(cs, R_028B30_VGT_STRMOUT_DRAW_OPAQUE_VERTEX_STRIDE,
671                                        t->stride_in_dw);
672
673                 radeon_emit(cs, PKT3(PKT3_COPY_DATA, 4, 0));
674                 radeon_emit(cs, COPY_DATA_SRC_SEL(COPY_DATA_SRC_MEM) |
675                             COPY_DATA_DST_SEL(COPY_DATA_REG) |
676                             COPY_DATA_WR_CONFIRM);
677                 radeon_emit(cs, va);     /* src address lo */
678                 radeon_emit(cs, va >> 32); /* src address hi */
679                 radeon_emit(cs, R_028B2C_VGT_STRMOUT_DRAW_OPAQUE_BUFFER_FILLED_SIZE >> 2);
680                 radeon_emit(cs, 0); /* unused */
681
682                 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
683                                       t->buf_filled_size, RADEON_USAGE_READ,
684                                       RADEON_PRIO_SO_FILLED_SIZE);
685         }
686
687         /* draw packet */
688         if (index_size) {
689                 if (index_size != sctx->last_index_size) {
690                         unsigned index_type;
691
692                         /* index type */
693                         switch (index_size) {
694                         case 1:
695                                 index_type = V_028A7C_VGT_INDEX_8;
696                                 break;
697                         case 2:
698                                 index_type = V_028A7C_VGT_INDEX_16 |
699                                              (SI_BIG_ENDIAN && sctx->chip_class <= CIK ?
700                                                       V_028A7C_VGT_DMA_SWAP_16_BIT : 0);
701                                 break;
702                         case 4:
703                                 index_type = V_028A7C_VGT_INDEX_32 |
704                                              (SI_BIG_ENDIAN && sctx->chip_class <= CIK ?
705                                                       V_028A7C_VGT_DMA_SWAP_32_BIT : 0);
706                                 break;
707                         default:
708                                 assert(!"unreachable");
709                                 return;
710                         }
711
712                         if (sctx->chip_class >= GFX9) {
713                                 radeon_set_uconfig_reg_idx(cs, R_03090C_VGT_INDEX_TYPE,
714                                                            2, index_type);
715                         } else {
716                                 radeon_emit(cs, PKT3(PKT3_INDEX_TYPE, 0, 0));
717                                 radeon_emit(cs, index_type);
718                         }
719
720                         sctx->last_index_size = index_size;
721                 }
722
723                 index_max_size = (indexbuf->width0 - index_offset) /
724                                   index_size;
725                 index_va = r600_resource(indexbuf)->gpu_address + index_offset;
726
727                 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
728                                       r600_resource(indexbuf),
729                                       RADEON_USAGE_READ, RADEON_PRIO_INDEX_BUFFER);
730         } else {
731                 /* On CI and later, non-indexed draws overwrite VGT_INDEX_TYPE,
732                  * so the state must be re-emitted before the next indexed draw.
733                  */
734                 if (sctx->chip_class >= CIK)
735                         sctx->last_index_size = -1;
736         }
737
738         if (indirect) {
739                 uint64_t indirect_va = r600_resource(indirect->buffer)->gpu_address;
740
741                 assert(indirect_va % 8 == 0);
742
743                 si_invalidate_draw_sh_constants(sctx);
744
745                 radeon_emit(cs, PKT3(PKT3_SET_BASE, 2, 0));
746                 radeon_emit(cs, 1);
747                 radeon_emit(cs, indirect_va);
748                 radeon_emit(cs, indirect_va >> 32);
749
750                 radeon_add_to_buffer_list(sctx, sctx->gfx_cs,
751                                       r600_resource(indirect->buffer),
752                                       RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
753
754                 unsigned di_src_sel = index_size ? V_0287F0_DI_SRC_SEL_DMA
755                                                     : V_0287F0_DI_SRC_SEL_AUTO_INDEX;
756
757                 assert(indirect->offset % 4 == 0);
758
759                 if (index_size) {
760                         radeon_emit(cs, PKT3(PKT3_INDEX_BASE, 1, 0));
761                         radeon_emit(cs, index_va);
762                         radeon_emit(cs, index_va >> 32);
763
764                         radeon_emit(cs, PKT3(PKT3_INDEX_BUFFER_SIZE, 0, 0));
765                         radeon_emit(cs, index_max_size);
766                 }
767
768                 if (!sctx->screen->has_draw_indirect_multi) {
769                         radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT
770                                                            : PKT3_DRAW_INDIRECT,
771                                              3, render_cond_bit));
772                         radeon_emit(cs, indirect->offset);
773                         radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
774                         radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
775                         radeon_emit(cs, di_src_sel);
776                 } else {
777                         uint64_t count_va = 0;
778
779                         if (indirect->indirect_draw_count) {
780                                 struct r600_resource *params_buf =
781                                         r600_resource(indirect->indirect_draw_count);
782
783                                 radeon_add_to_buffer_list(
784                                         sctx, sctx->gfx_cs, params_buf,
785                                         RADEON_USAGE_READ, RADEON_PRIO_DRAW_INDIRECT);
786
787                                 count_va = params_buf->gpu_address + indirect->indirect_draw_count_offset;
788                         }
789
790                         radeon_emit(cs, PKT3(index_size ? PKT3_DRAW_INDEX_INDIRECT_MULTI :
791                                                              PKT3_DRAW_INDIRECT_MULTI,
792                                              8, render_cond_bit));
793                         radeon_emit(cs, indirect->offset);
794                         radeon_emit(cs, (sh_base_reg + SI_SGPR_BASE_VERTEX * 4 - SI_SH_REG_OFFSET) >> 2);
795                         radeon_emit(cs, (sh_base_reg + SI_SGPR_START_INSTANCE * 4 - SI_SH_REG_OFFSET) >> 2);
796                         radeon_emit(cs, ((sh_base_reg + SI_SGPR_DRAWID * 4 - SI_SH_REG_OFFSET) >> 2) |
797                                         S_2C3_DRAW_INDEX_ENABLE(1) |
798                                         S_2C3_COUNT_INDIRECT_ENABLE(!!indirect->indirect_draw_count));
799                         radeon_emit(cs, indirect->draw_count);
800                         radeon_emit(cs, count_va);
801                         radeon_emit(cs, count_va >> 32);
802                         radeon_emit(cs, indirect->stride);
803                         radeon_emit(cs, di_src_sel);
804                 }
805         } else {
806                 int base_vertex;
807
808                 radeon_emit(cs, PKT3(PKT3_NUM_INSTANCES, 0, 0));
809                 radeon_emit(cs, info->instance_count);
810
811                 /* Base vertex and start instance. */
812                 base_vertex = index_size ? info->index_bias : info->start;
813
814                 if (sctx->num_vs_blit_sgprs) {
815                         /* Re-emit draw constants after we leave u_blitter. */
816                         si_invalidate_draw_sh_constants(sctx);
817
818                         /* Blit VS doesn't use BASE_VERTEX, START_INSTANCE, and DRAWID. */
819                         radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_VS_BLIT_DATA * 4,
820                                               sctx->num_vs_blit_sgprs);
821                         radeon_emit_array(cs, sctx->vs_blit_sh_data,
822                                           sctx->num_vs_blit_sgprs);
823                 } else if (base_vertex != sctx->last_base_vertex ||
824                            sctx->last_base_vertex == SI_BASE_VERTEX_UNKNOWN ||
825                            info->start_instance != sctx->last_start_instance ||
826                            info->drawid != sctx->last_drawid ||
827                            sh_base_reg != sctx->last_sh_base_reg) {
828                         radeon_set_sh_reg_seq(cs, sh_base_reg + SI_SGPR_BASE_VERTEX * 4, 3);
829                         radeon_emit(cs, base_vertex);
830                         radeon_emit(cs, info->start_instance);
831                         radeon_emit(cs, info->drawid);
832
833                         sctx->last_base_vertex = base_vertex;
834                         sctx->last_start_instance = info->start_instance;
835                         sctx->last_drawid = info->drawid;
836                         sctx->last_sh_base_reg = sh_base_reg;
837                 }
838
839                 if (index_size) {
840                         index_va += info->start * index_size;
841
842                         radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_2, 4, render_cond_bit));
843                         radeon_emit(cs, index_max_size);
844                         radeon_emit(cs, index_va);
845                         radeon_emit(cs, index_va >> 32);
846                         radeon_emit(cs, info->count);
847                         radeon_emit(cs, V_0287F0_DI_SRC_SEL_DMA);
848                 } else {
849                         radeon_emit(cs, PKT3(PKT3_DRAW_INDEX_AUTO, 1, render_cond_bit));
850                         radeon_emit(cs, info->count);
851                         radeon_emit(cs, V_0287F0_DI_SRC_SEL_AUTO_INDEX |
852                                         S_0287F0_USE_OPAQUE(!!info->count_from_stream_output));
853                 }
854         }
855 }
856
857 static void si_emit_surface_sync(struct si_context *sctx,
858                                  unsigned cp_coher_cntl)
859 {
860         struct radeon_cmdbuf *cs = sctx->gfx_cs;
861
862         if (sctx->chip_class >= GFX9) {
863                 /* Flush caches and wait for the caches to assert idle. */
864                 radeon_emit(cs, PKT3(PKT3_ACQUIRE_MEM, 5, 0));
865                 radeon_emit(cs, cp_coher_cntl); /* CP_COHER_CNTL */
866                 radeon_emit(cs, 0xffffffff);    /* CP_COHER_SIZE */
867                 radeon_emit(cs, 0xffffff);      /* CP_COHER_SIZE_HI */
868                 radeon_emit(cs, 0);             /* CP_COHER_BASE */
869                 radeon_emit(cs, 0);             /* CP_COHER_BASE_HI */
870                 radeon_emit(cs, 0x0000000A);    /* POLL_INTERVAL */
871         } else {
872                 /* ACQUIRE_MEM is only required on a compute ring. */
873                 radeon_emit(cs, PKT3(PKT3_SURFACE_SYNC, 3, 0));
874                 radeon_emit(cs, cp_coher_cntl);   /* CP_COHER_CNTL */
875                 radeon_emit(cs, 0xffffffff);      /* CP_COHER_SIZE */
876                 radeon_emit(cs, 0);               /* CP_COHER_BASE */
877                 radeon_emit(cs, 0x0000000A);      /* POLL_INTERVAL */
878         }
879 }
880
881 void si_emit_cache_flush(struct si_context *sctx)
882 {
883         struct radeon_cmdbuf *cs = sctx->gfx_cs;
884         uint32_t flags = sctx->flags;
885         uint32_t cp_coher_cntl = 0;
886         uint32_t flush_cb_db = flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
887                                         SI_CONTEXT_FLUSH_AND_INV_DB);
888
889         if (flags & SI_CONTEXT_FLUSH_AND_INV_CB)
890                 sctx->num_cb_cache_flushes++;
891         if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
892                 sctx->num_db_cache_flushes++;
893
894         /* SI has a bug that it always flushes ICACHE and KCACHE if either
895          * bit is set. An alternative way is to write SQC_CACHES, but that
896          * doesn't seem to work reliably. Since the bug doesn't affect
897          * correctness (it only does more work than necessary) and
898          * the performance impact is likely negligible, there is no plan
899          * to add a workaround for it.
900          */
901
902         if (flags & SI_CONTEXT_INV_ICACHE)
903                 cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
904         if (flags & SI_CONTEXT_INV_SMEM_L1)
905                 cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
906
907         if (sctx->chip_class <= VI) {
908                 if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
909                         cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1) |
910                                          S_0085F0_CB0_DEST_BASE_ENA(1) |
911                                          S_0085F0_CB1_DEST_BASE_ENA(1) |
912                                          S_0085F0_CB2_DEST_BASE_ENA(1) |
913                                          S_0085F0_CB3_DEST_BASE_ENA(1) |
914                                          S_0085F0_CB4_DEST_BASE_ENA(1) |
915                                          S_0085F0_CB5_DEST_BASE_ENA(1) |
916                                          S_0085F0_CB6_DEST_BASE_ENA(1) |
917                                          S_0085F0_CB7_DEST_BASE_ENA(1);
918
919                         /* Necessary for DCC */
920                         if (sctx->chip_class == VI)
921                                 si_cp_release_mem(sctx,
922                                                   V_028A90_FLUSH_AND_INV_CB_DATA_TS,
923                                                   0, EOP_DST_SEL_MEM, EOP_INT_SEL_NONE,
924                                                   EOP_DATA_SEL_DISCARD, NULL,
925                                                   0, 0, SI_NOT_QUERY);
926                 }
927                 if (flags & SI_CONTEXT_FLUSH_AND_INV_DB)
928                         cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) |
929                                          S_0085F0_DB_DEST_BASE_ENA(1);
930         }
931
932         if (flags & SI_CONTEXT_FLUSH_AND_INV_CB) {
933                 /* Flush CMASK/FMASK/DCC. SURFACE_SYNC will wait for idle. */
934                 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
935                 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_CB_META) | EVENT_INDEX(0));
936         }
937         if (flags & (SI_CONTEXT_FLUSH_AND_INV_DB |
938                      SI_CONTEXT_FLUSH_AND_INV_DB_META)) {
939                 /* Flush HTILE. SURFACE_SYNC will wait for idle. */
940                 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
941                 radeon_emit(cs, EVENT_TYPE(V_028A90_FLUSH_AND_INV_DB_META) | EVENT_INDEX(0));
942         }
943
944         /* Wait for shader engines to go idle.
945          * VS and PS waits are unnecessary if SURFACE_SYNC is going to wait
946          * for everything including CB/DB cache flushes.
947          */
948         if (!flush_cb_db) {
949                 if (flags & SI_CONTEXT_PS_PARTIAL_FLUSH) {
950                         radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
951                         radeon_emit(cs, EVENT_TYPE(V_028A90_PS_PARTIAL_FLUSH) | EVENT_INDEX(4));
952                         /* Only count explicit shader flushes, not implicit ones
953                          * done by SURFACE_SYNC.
954                          */
955                         sctx->num_vs_flushes++;
956                         sctx->num_ps_flushes++;
957                 } else if (flags & SI_CONTEXT_VS_PARTIAL_FLUSH) {
958                         radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
959                         radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
960                         sctx->num_vs_flushes++;
961                 }
962         }
963
964         if (flags & SI_CONTEXT_CS_PARTIAL_FLUSH &&
965             sctx->compute_is_busy) {
966                 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
967                 radeon_emit(cs, EVENT_TYPE(V_028A90_CS_PARTIAL_FLUSH) | EVENT_INDEX(4));
968                 sctx->num_cs_flushes++;
969                 sctx->compute_is_busy = false;
970         }
971
972         /* VGT state synchronization. */
973         if (flags & SI_CONTEXT_VGT_FLUSH) {
974                 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
975                 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
976         }
977         if (flags & SI_CONTEXT_VGT_STREAMOUT_SYNC) {
978                 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
979                 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_STREAMOUT_SYNC) | EVENT_INDEX(0));
980         }
981
982         /* GFX9: Wait for idle if we're flushing CB or DB. ACQUIRE_MEM doesn't
983          * wait for idle on GFX9. We have to use a TS event.
984          */
985         if (sctx->chip_class >= GFX9 && flush_cb_db) {
986                 uint64_t va;
987                 unsigned tc_flags, cb_db_event;
988
989                 /* Set the CB/DB flush event. */
990                 switch (flush_cb_db) {
991                 case SI_CONTEXT_FLUSH_AND_INV_CB:
992                         cb_db_event = V_028A90_FLUSH_AND_INV_CB_DATA_TS;
993                         break;
994                 case SI_CONTEXT_FLUSH_AND_INV_DB:
995                         cb_db_event = V_028A90_FLUSH_AND_INV_DB_DATA_TS;
996                         break;
997                 default:
998                         /* both CB & DB */
999                         cb_db_event = V_028A90_CACHE_FLUSH_AND_INV_TS_EVENT;
1000                 }
1001
1002                 /* These are the only allowed combinations. If you need to
1003                  * do multiple operations at once, do them separately.
1004                  * All operations that invalidate L2 also seem to invalidate
1005                  * metadata. Volatile (VOL) and WC flushes are not listed here.
1006                  *
1007                  * TC    | TC_WB         = writeback & invalidate L2 & L1
1008                  * TC    | TC_WB | TC_NC = writeback & invalidate L2 for MTYPE == NC
1009                  *         TC_WB | TC_NC = writeback L2 for MTYPE == NC
1010                  * TC            | TC_NC = invalidate L2 for MTYPE == NC
1011                  * TC    | TC_MD         = writeback & invalidate L2 metadata (DCC, etc.)
1012                  * TCL1                  = invalidate L1
1013                  */
1014                 tc_flags = 0;
1015
1016                 if (flags & SI_CONTEXT_INV_L2_METADATA) {
1017                         tc_flags = EVENT_TC_ACTION_ENA |
1018                                    EVENT_TC_MD_ACTION_ENA;
1019                 }
1020
1021                 /* Ideally flush TC together with CB/DB. */
1022                 if (flags & SI_CONTEXT_INV_GLOBAL_L2) {
1023                         /* Writeback and invalidate everything in L2 & L1. */
1024                         tc_flags = EVENT_TC_ACTION_ENA |
1025                                    EVENT_TC_WB_ACTION_ENA;
1026
1027                         /* Clear the flags. */
1028                         flags &= ~(SI_CONTEXT_INV_GLOBAL_L2 |
1029                                    SI_CONTEXT_WRITEBACK_GLOBAL_L2 |
1030                                    SI_CONTEXT_INV_VMEM_L1);
1031                         sctx->num_L2_invalidates++;
1032                 }
1033
1034                 /* Do the flush (enqueue the event and wait for it). */
1035                 va = sctx->wait_mem_scratch->gpu_address;
1036                 sctx->wait_mem_number++;
1037
1038                 si_cp_release_mem(sctx, cb_db_event, tc_flags,
1039                                   EOP_DST_SEL_MEM,
1040                                   EOP_INT_SEL_SEND_DATA_AFTER_WR_CONFIRM,
1041                                   EOP_DATA_SEL_VALUE_32BIT,
1042                                   sctx->wait_mem_scratch, va,
1043                                   sctx->wait_mem_number, SI_NOT_QUERY);
1044                 si_cp_wait_mem(sctx, va, sctx->wait_mem_number, 0xffffffff, 0);
1045         }
1046
1047         /* Make sure ME is idle (it executes most packets) before continuing.
1048          * This prevents read-after-write hazards between PFP and ME.
1049          */
1050         if (cp_coher_cntl ||
1051             (flags & (SI_CONTEXT_CS_PARTIAL_FLUSH |
1052                             SI_CONTEXT_INV_VMEM_L1 |
1053                             SI_CONTEXT_INV_GLOBAL_L2 |
1054                             SI_CONTEXT_WRITEBACK_GLOBAL_L2))) {
1055                 radeon_emit(cs, PKT3(PKT3_PFP_SYNC_ME, 0, 0));
1056                 radeon_emit(cs, 0);
1057         }
1058
1059         /* SI-CI-VI only:
1060          *   When one of the CP_COHER_CNTL.DEST_BASE flags is set, SURFACE_SYNC
1061          *   waits for idle, so it should be last. SURFACE_SYNC is done in PFP.
1062          *
1063          * cp_coher_cntl should contain all necessary flags except TC flags
1064          * at this point.
1065          *
1066          * SI-CIK don't support L2 write-back.
1067          */
1068         if (flags & SI_CONTEXT_INV_GLOBAL_L2 ||
1069             (sctx->chip_class <= CIK &&
1070              (flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2))) {
1071                 /* Invalidate L1 & L2. (L1 is always invalidated on SI)
1072                  * WB must be set on VI+ when TC_ACTION is set.
1073                  */
1074                 si_emit_surface_sync(sctx, cp_coher_cntl |
1075                                      S_0085F0_TC_ACTION_ENA(1) |
1076                                      S_0085F0_TCL1_ACTION_ENA(1) |
1077                                      S_0301F0_TC_WB_ACTION_ENA(sctx->chip_class >= VI));
1078                 cp_coher_cntl = 0;
1079                 sctx->num_L2_invalidates++;
1080         } else {
1081                 /* L1 invalidation and L2 writeback must be done separately,
1082                  * because both operations can't be done together.
1083                  */
1084                 if (flags & SI_CONTEXT_WRITEBACK_GLOBAL_L2) {
1085                         /* WB = write-back
1086                          * NC = apply to non-coherent MTYPEs
1087                          *      (i.e. MTYPE <= 1, which is what we use everywhere)
1088                          *
1089                          * WB doesn't work without NC.
1090                          */
1091                         si_emit_surface_sync(sctx, cp_coher_cntl |
1092                                              S_0301F0_TC_WB_ACTION_ENA(1) |
1093                                              S_0301F0_TC_NC_ACTION_ENA(1));
1094                         cp_coher_cntl = 0;
1095                         sctx->num_L2_writebacks++;
1096                 }
1097                 if (flags & SI_CONTEXT_INV_VMEM_L1) {
1098                         /* Invalidate per-CU VMEM L1. */
1099                         si_emit_surface_sync(sctx, cp_coher_cntl |
1100                                              S_0085F0_TCL1_ACTION_ENA(1));
1101                         cp_coher_cntl = 0;
1102                 }
1103         }
1104
1105         /* If TC flushes haven't cleared this... */
1106         if (cp_coher_cntl)
1107                 si_emit_surface_sync(sctx, cp_coher_cntl);
1108
1109         if (flags & SI_CONTEXT_START_PIPELINE_STATS) {
1110                 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1111                 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_START) |
1112                                 EVENT_INDEX(0));
1113         } else if (flags & SI_CONTEXT_STOP_PIPELINE_STATS) {
1114                 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
1115                 radeon_emit(cs, EVENT_TYPE(V_028A90_PIPELINESTAT_STOP) |
1116                                 EVENT_INDEX(0));
1117         }
1118
1119         sctx->flags = 0;
1120 }
1121
1122 static void si_get_draw_start_count(struct si_context *sctx,
1123                                     const struct pipe_draw_info *info,
1124                                     unsigned *start, unsigned *count)
1125 {
1126         struct pipe_draw_indirect_info *indirect = info->indirect;
1127
1128         if (indirect) {
1129                 unsigned indirect_count;
1130                 struct pipe_transfer *transfer;
1131                 unsigned begin, end;
1132                 unsigned map_size;
1133                 unsigned *data;
1134
1135                 if (indirect->indirect_draw_count) {
1136                         data = pipe_buffer_map_range(&sctx->b,
1137                                         indirect->indirect_draw_count,
1138                                         indirect->indirect_draw_count_offset,
1139                                         sizeof(unsigned),
1140                                         PIPE_TRANSFER_READ, &transfer);
1141
1142                         indirect_count = *data;
1143
1144                         pipe_buffer_unmap(&sctx->b, transfer);
1145                 } else {
1146                         indirect_count = indirect->draw_count;
1147                 }
1148
1149                 if (!indirect_count) {
1150                         *start = *count = 0;
1151                         return;
1152                 }
1153
1154                 map_size = (indirect_count - 1) * indirect->stride + 3 * sizeof(unsigned);
1155                 data = pipe_buffer_map_range(&sctx->b, indirect->buffer,
1156                                              indirect->offset, map_size,
1157                                              PIPE_TRANSFER_READ, &transfer);
1158
1159                 begin = UINT_MAX;
1160                 end = 0;
1161
1162                 for (unsigned i = 0; i < indirect_count; ++i) {
1163                         unsigned count = data[0];
1164                         unsigned start = data[2];
1165
1166                         if (count > 0) {
1167                                 begin = MIN2(begin, start);
1168                                 end = MAX2(end, start + count);
1169                         }
1170
1171                         data += indirect->stride / sizeof(unsigned);
1172                 }
1173
1174                 pipe_buffer_unmap(&sctx->b, transfer);
1175
1176                 if (begin < end) {
1177                         *start = begin;
1178                         *count = end - begin;
1179                 } else {
1180                         *start = *count = 0;
1181                 }
1182         } else {
1183                 *start = info->start;
1184                 *count = info->count;
1185         }
1186 }
1187
1188 static void si_emit_all_states(struct si_context *sctx, const struct pipe_draw_info *info,
1189                                unsigned skip_atom_mask)
1190 {
1191         unsigned num_patches = 0;
1192         bool context_roll = false; /* set correctly for GFX9 only */
1193
1194         context_roll |= si_emit_rasterizer_prim_state(sctx);
1195         if (sctx->tes_shader.cso)
1196                 context_roll |= si_emit_derived_tess_state(sctx, info, &num_patches);
1197         if (info->count_from_stream_output)
1198                 context_roll = true;
1199
1200         /* Vega10/Raven scissor bug workaround. When any context register is
1201          * written (i.e. the GPU rolls the context), PA_SC_VPORT_SCISSOR
1202          * registers must be written too.
1203          */
1204         if ((sctx->family == CHIP_VEGA10 || sctx->family == CHIP_RAVEN) &&
1205             (context_roll ||
1206              sctx->dirty_atoms & si_atoms_that_roll_context() ||
1207              sctx->dirty_states & si_states_that_roll_context() ||
1208              si_prim_restart_index_changed(sctx, info))) {
1209                 sctx->scissors.dirty_mask = (1 << SI_MAX_VIEWPORTS) - 1;
1210                 si_mark_atom_dirty(sctx, &sctx->atoms.s.scissors);
1211         }
1212
1213         /* Emit state atoms. */
1214         unsigned mask = sctx->dirty_atoms & ~skip_atom_mask;
1215         while (mask)
1216                 sctx->atoms.array[u_bit_scan(&mask)].emit(sctx);
1217
1218         sctx->dirty_atoms &= skip_atom_mask;
1219
1220         /* Emit states. */
1221         mask = sctx->dirty_states;
1222         while (mask) {
1223                 unsigned i = u_bit_scan(&mask);
1224                 struct si_pm4_state *state = sctx->queued.array[i];
1225
1226                 if (!state || sctx->emitted.array[i] == state)
1227                         continue;
1228
1229                 si_pm4_emit(sctx, state);
1230                 sctx->emitted.array[i] = state;
1231         }
1232         sctx->dirty_states = 0;
1233
1234         /* Emit draw states. */
1235         si_emit_vs_state(sctx, info);
1236         si_emit_draw_registers(sctx, info, num_patches);
1237 }
1238
1239 void si_draw_vbo(struct pipe_context *ctx, const struct pipe_draw_info *info)
1240 {
1241         struct si_context *sctx = (struct si_context *)ctx;
1242         struct si_state_rasterizer *rs = sctx->queued.named.rasterizer;
1243         struct pipe_resource *indexbuf = info->index.resource;
1244         unsigned dirty_tex_counter;
1245         enum pipe_prim_type rast_prim;
1246         unsigned index_size = info->index_size;
1247         unsigned index_offset = info->indirect ? info->start * index_size : 0;
1248
1249         if (likely(!info->indirect)) {
1250                 /* SI-CI treat instance_count==0 as instance_count==1. There is
1251                  * no workaround for indirect draws, but we can at least skip
1252                  * direct draws.
1253                  */
1254                 if (unlikely(!info->instance_count))
1255                         return;
1256
1257                 /* Handle count == 0. */
1258                 if (unlikely(!info->count &&
1259                              (index_size || !info->count_from_stream_output)))
1260                         return;
1261         }
1262
1263         if (unlikely(!sctx->vs_shader.cso ||
1264                      !rs ||
1265                      (!sctx->ps_shader.cso && !rs->rasterizer_discard) ||
1266                      (!!sctx->tes_shader.cso != (info->mode == PIPE_PRIM_PATCHES)))) {
1267                 assert(0);
1268                 return;
1269         }
1270
1271         /* Recompute and re-emit the texture resource states if needed. */
1272         dirty_tex_counter = p_atomic_read(&sctx->screen->dirty_tex_counter);
1273         if (unlikely(dirty_tex_counter != sctx->last_dirty_tex_counter)) {
1274                 sctx->last_dirty_tex_counter = dirty_tex_counter;
1275                 sctx->framebuffer.dirty_cbufs |=
1276                         ((1 << sctx->framebuffer.state.nr_cbufs) - 1);
1277                 sctx->framebuffer.dirty_zsbuf = true;
1278                 si_mark_atom_dirty(sctx, &sctx->atoms.s.framebuffer);
1279                 si_update_all_texture_descriptors(sctx);
1280         }
1281
1282         si_decompress_textures(sctx, u_bit_consecutive(0, SI_NUM_GRAPHICS_SHADERS));
1283
1284         /* Set the rasterization primitive type.
1285          *
1286          * This must be done after si_decompress_textures, which can call
1287          * draw_vbo recursively, and before si_update_shaders, which uses
1288          * current_rast_prim for this draw_vbo call. */
1289         if (sctx->gs_shader.cso)
1290                 rast_prim = sctx->gs_shader.cso->gs_output_prim;
1291         else if (sctx->tes_shader.cso) {
1292                 if (sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_POINT_MODE])
1293                         rast_prim = PIPE_PRIM_POINTS;
1294                 else
1295                         rast_prim = sctx->tes_shader.cso->info.properties[TGSI_PROPERTY_TES_PRIM_MODE];
1296         } else
1297                 rast_prim = info->mode;
1298
1299         if (rast_prim != sctx->current_rast_prim) {
1300                 if (util_prim_is_points_or_lines(sctx->current_rast_prim) !=
1301                     util_prim_is_points_or_lines(rast_prim))
1302                         si_mark_atom_dirty(sctx, &sctx->atoms.s.guardband);
1303
1304                 sctx->current_rast_prim = rast_prim;
1305                 sctx->do_update_shaders = true;
1306         }
1307
1308         if (sctx->tes_shader.cso &&
1309             sctx->screen->has_ls_vgpr_init_bug) {
1310                 /* Determine whether the LS VGPR fix should be applied.
1311                  *
1312                  * It is only required when num input CPs > num output CPs,
1313                  * which cannot happen with the fixed function TCS. We should
1314                  * also update this bit when switching from TCS to fixed
1315                  * function TCS.
1316                  */
1317                 struct si_shader_selector *tcs = sctx->tcs_shader.cso;
1318                 bool ls_vgpr_fix =
1319                         tcs &&
1320                         info->vertices_per_patch >
1321                         tcs->info.properties[TGSI_PROPERTY_TCS_VERTICES_OUT];
1322
1323                 if (ls_vgpr_fix != sctx->ls_vgpr_fix) {
1324                         sctx->ls_vgpr_fix = ls_vgpr_fix;
1325                         sctx->do_update_shaders = true;
1326                 }
1327         }
1328
1329         if (sctx->gs_shader.cso) {
1330                 /* Determine whether the GS triangle strip adjacency fix should
1331                  * be applied. Rotate every other triangle if
1332                  * - triangle strips with adjacency are fed to the GS and
1333                  * - primitive restart is disabled (the rotation doesn't help
1334                  *   when the restart occurs after an odd number of triangles).
1335                  */
1336                 bool gs_tri_strip_adj_fix =
1337                         !sctx->tes_shader.cso &&
1338                         info->mode == PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY &&
1339                         !info->primitive_restart;
1340
1341                 if (gs_tri_strip_adj_fix != sctx->gs_tri_strip_adj_fix) {
1342                         sctx->gs_tri_strip_adj_fix = gs_tri_strip_adj_fix;
1343                         sctx->do_update_shaders = true;
1344                 }
1345         }
1346
1347         if (sctx->do_update_shaders && !si_update_shaders(sctx))
1348                 return;
1349
1350         if (index_size) {
1351                 /* Translate or upload, if needed. */
1352                 /* 8-bit indices are supported on VI. */
1353                 if (sctx->chip_class <= CIK && index_size == 1) {
1354                         unsigned start, count, start_offset, size, offset;
1355                         void *ptr;
1356
1357                         si_get_draw_start_count(sctx, info, &start, &count);
1358                         start_offset = start * 2;
1359                         size = count * 2;
1360
1361                         indexbuf = NULL;
1362                         u_upload_alloc(ctx->stream_uploader, start_offset,
1363                                        size,
1364                                        si_optimal_tcc_alignment(sctx, size),
1365                                        &offset, &indexbuf, &ptr);
1366                         if (!indexbuf)
1367                                 return;
1368
1369                         util_shorten_ubyte_elts_to_userptr(&sctx->b, info, 0, 0,
1370                                                            index_offset + start,
1371                                                            count, ptr);
1372
1373                         /* info->start will be added by the drawing code */
1374                         index_offset = offset - start_offset;
1375                         index_size = 2;
1376                 } else if (info->has_user_indices) {
1377                         unsigned start_offset;
1378
1379                         assert(!info->indirect);
1380                         start_offset = info->start * index_size;
1381
1382                         indexbuf = NULL;
1383                         u_upload_data(ctx->stream_uploader, start_offset,
1384                                       info->count * index_size,
1385                                       sctx->screen->info.tcc_cache_line_size,
1386                                       (char*)info->index.user + start_offset,
1387                                       &index_offset, &indexbuf);
1388                         if (!indexbuf)
1389                                 return;
1390
1391                         /* info->start will be added by the drawing code */
1392                         index_offset -= start_offset;
1393                 } else if (sctx->chip_class <= CIK &&
1394                            r600_resource(indexbuf)->TC_L2_dirty) {
1395                         /* VI reads index buffers through TC L2, so it doesn't
1396                          * need this. */
1397                         sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
1398                         r600_resource(indexbuf)->TC_L2_dirty = false;
1399                 }
1400         }
1401
1402         if (info->indirect) {
1403                 struct pipe_draw_indirect_info *indirect = info->indirect;
1404
1405                 /* Add the buffer size for memory checking in need_cs_space. */
1406                 si_context_add_resource_size(sctx, indirect->buffer);
1407
1408                 /* Indirect buffers use TC L2 on GFX9, but not older hw. */
1409                 if (sctx->chip_class <= VI) {
1410                         if (r600_resource(indirect->buffer)->TC_L2_dirty) {
1411                                 sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
1412                                 r600_resource(indirect->buffer)->TC_L2_dirty = false;
1413                         }
1414
1415                         if (indirect->indirect_draw_count &&
1416                             r600_resource(indirect->indirect_draw_count)->TC_L2_dirty) {
1417                                 sctx->flags |= SI_CONTEXT_WRITEBACK_GLOBAL_L2;
1418                                 r600_resource(indirect->indirect_draw_count)->TC_L2_dirty = false;
1419                         }
1420                 }
1421         }
1422
1423         si_need_gfx_cs_space(sctx);
1424
1425         /* Since we've called si_context_add_resource_size for vertex buffers,
1426          * this must be called after si_need_cs_space, because we must let
1427          * need_cs_space flush before we add buffers to the buffer list.
1428          */
1429         if (!si_upload_vertex_buffer_descriptors(sctx))
1430                 return;
1431
1432         /* Use optimal packet order based on whether we need to sync the pipeline. */
1433         if (unlikely(sctx->flags & (SI_CONTEXT_FLUSH_AND_INV_CB |
1434                                       SI_CONTEXT_FLUSH_AND_INV_DB |
1435                                       SI_CONTEXT_PS_PARTIAL_FLUSH |
1436                                       SI_CONTEXT_CS_PARTIAL_FLUSH))) {
1437                 /* If we have to wait for idle, set all states first, so that all
1438                  * SET packets are processed in parallel with previous draw calls.
1439                  * Then draw and prefetch at the end. This ensures that the time
1440                  * the CUs are idle is very short.
1441                  */
1442                 unsigned masked_atoms = 0;
1443
1444                 if (unlikely(sctx->flags & SI_CONTEXT_FLUSH_FOR_RENDER_COND))
1445                         masked_atoms |= si_get_atom_bit(sctx, &sctx->atoms.s.render_cond);
1446
1447                 if (!si_upload_graphics_shader_descriptors(sctx))
1448                         return;
1449
1450                 /* Emit all states except possibly render condition. */
1451                 si_emit_all_states(sctx, info, masked_atoms);
1452                 si_emit_cache_flush(sctx);
1453                 /* <-- CUs are idle here. */
1454
1455                 if (si_is_atom_dirty(sctx, &sctx->atoms.s.render_cond))
1456                         sctx->atoms.s.render_cond.emit(sctx);
1457                 sctx->dirty_atoms = 0;
1458
1459                 si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset);
1460                 /* <-- CUs are busy here. */
1461
1462                 /* Start prefetches after the draw has been started. Both will run
1463                  * in parallel, but starting the draw first is more important.
1464                  */
1465                 if (sctx->chip_class >= CIK && sctx->prefetch_L2_mask)
1466                         cik_emit_prefetch_L2(sctx, false);
1467         } else {
1468                 /* If we don't wait for idle, start prefetches first, then set
1469                  * states, and draw at the end.
1470                  */
1471                 if (sctx->flags)
1472                         si_emit_cache_flush(sctx);
1473
1474                 /* Only prefetch the API VS and VBO descriptors. */
1475                 if (sctx->chip_class >= CIK && sctx->prefetch_L2_mask)
1476                         cik_emit_prefetch_L2(sctx, true);
1477
1478                 if (!si_upload_graphics_shader_descriptors(sctx))
1479                         return;
1480
1481                 si_emit_all_states(sctx, info, 0);
1482                 si_emit_draw_packets(sctx, info, indexbuf, index_size, index_offset);
1483
1484                 /* Prefetch the remaining shaders after the draw has been
1485                  * started. */
1486                 if (sctx->chip_class >= CIK && sctx->prefetch_L2_mask)
1487                         cik_emit_prefetch_L2(sctx, false);
1488         }
1489
1490         if (unlikely(sctx->current_saved_cs)) {
1491                 si_trace_emit(sctx);
1492                 si_log_draw_state(sctx, sctx->log);
1493         }
1494
1495         /* Workaround for a VGT hang when streamout is enabled.
1496          * It must be done after drawing. */
1497         if ((sctx->family == CHIP_HAWAII ||
1498              sctx->family == CHIP_TONGA ||
1499              sctx->family == CHIP_FIJI) &&
1500             si_get_strmout_en(sctx)) {
1501                 sctx->flags |= SI_CONTEXT_VGT_STREAMOUT_SYNC;
1502         }
1503
1504         if (unlikely(sctx->decompression_enabled)) {
1505                 sctx->num_decompress_calls++;
1506         } else {
1507                 sctx->num_draw_calls++;
1508                 if (sctx->framebuffer.state.nr_cbufs > 1)
1509                         sctx->num_mrt_draw_calls++;
1510                 if (info->primitive_restart)
1511                         sctx->num_prim_restart_calls++;
1512                 if (G_0286E8_WAVESIZE(sctx->spi_tmpring_size))
1513                         sctx->num_spill_draw_calls++;
1514         }
1515         if (index_size && indexbuf != info->index.resource)
1516                 pipe_resource_reference(&indexbuf, NULL);
1517 }
1518
1519 void si_draw_rectangle(struct blitter_context *blitter,
1520                        void *vertex_elements_cso,
1521                        blitter_get_vs_func get_vs,
1522                        int x1, int y1, int x2, int y2,
1523                        float depth, unsigned num_instances,
1524                        enum blitter_attrib_type type,
1525                        const union blitter_attrib *attrib)
1526 {
1527         struct pipe_context *pipe = util_blitter_get_pipe(blitter);
1528         struct si_context *sctx = (struct si_context*)pipe;
1529
1530         /* Pack position coordinates as signed int16. */
1531         sctx->vs_blit_sh_data[0] = (uint32_t)(x1 & 0xffff) |
1532                                    ((uint32_t)(y1 & 0xffff) << 16);
1533         sctx->vs_blit_sh_data[1] = (uint32_t)(x2 & 0xffff) |
1534                                    ((uint32_t)(y2 & 0xffff) << 16);
1535         sctx->vs_blit_sh_data[2] = fui(depth);
1536
1537         switch (type) {
1538         case UTIL_BLITTER_ATTRIB_COLOR:
1539                 memcpy(&sctx->vs_blit_sh_data[3], attrib->color,
1540                        sizeof(float)*4);
1541                 break;
1542         case UTIL_BLITTER_ATTRIB_TEXCOORD_XY:
1543         case UTIL_BLITTER_ATTRIB_TEXCOORD_XYZW:
1544                 memcpy(&sctx->vs_blit_sh_data[3], &attrib->texcoord,
1545                        sizeof(attrib->texcoord));
1546                 break;
1547         case UTIL_BLITTER_ATTRIB_NONE:;
1548         }
1549
1550         pipe->bind_vs_state(pipe, si_get_blitter_vs(sctx, type, num_instances));
1551
1552         struct pipe_draw_info info = {};
1553         info.mode = SI_PRIM_RECTANGLE_LIST;
1554         info.count = 3;
1555         info.instance_count = num_instances;
1556
1557         /* Don't set per-stage shader pointers for VS. */
1558         sctx->shader_pointers_dirty &= ~SI_DESCS_SHADER_MASK(VERTEX);
1559         sctx->vertex_buffer_pointer_dirty = false;
1560
1561         si_draw_vbo(pipe, &info);
1562 }
1563
1564 void si_trace_emit(struct si_context *sctx)
1565 {
1566         struct radeon_cmdbuf *cs = sctx->gfx_cs;
1567         uint64_t va = sctx->current_saved_cs->trace_buf->gpu_address;
1568         uint32_t trace_id = ++sctx->current_saved_cs->trace_id;
1569
1570         radeon_emit(cs, PKT3(PKT3_WRITE_DATA, 3, 0));
1571         radeon_emit(cs, S_370_DST_SEL(V_370_MEMORY_SYNC) |
1572                     S_370_WR_CONFIRM(1) |
1573                     S_370_ENGINE_SEL(V_370_ME));
1574         radeon_emit(cs, va);
1575         radeon_emit(cs, va >> 32);
1576         radeon_emit(cs, trace_id);
1577         radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
1578         radeon_emit(cs, AC_ENCODE_TRACE_POINT(trace_id));
1579
1580         if (sctx->log)
1581                 u_log_flush(sctx->log);
1582 }