OSDN Git Service

vc4: Add whitespace after each program stage dump.
[android-x86/external-mesa.git] / src / gallium / drivers / vc4 / vc4_program.c
1 /*
2  * Copyright (c) 2014 Scott Mansell
3  * Copyright © 2014 Broadcom
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24
25 #include <inttypes.h>
26 #include "util/u_format.h"
27 #include "util/u_hash.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "compiler/nir/nir.h"
35 #include "compiler/nir/nir_builder.h"
36 #include "nir/tgsi_to_nir.h"
37 #include "vc4_context.h"
38 #include "vc4_qpu.h"
39 #include "vc4_qir.h"
40 #ifdef USE_VC4_SIMULATOR
41 #include "simpenrose/simpenrose.h"
42 #endif
43
44 static struct qreg
45 ntq_get_src(struct vc4_compile *c, nir_src src, int i);
46
47 static void
48 resize_qreg_array(struct vc4_compile *c,
49                   struct qreg **regs,
50                   uint32_t *size,
51                   uint32_t decl_size)
52 {
53         if (*size >= decl_size)
54                 return;
55
56         uint32_t old_size = *size;
57         *size = MAX2(*size * 2, decl_size);
58         *regs = reralloc(c, *regs, struct qreg, *size);
59         if (!*regs) {
60                 fprintf(stderr, "Malloc failure\n");
61                 abort();
62         }
63
64         for (uint32_t i = old_size; i < *size; i++)
65                 (*regs)[i] = c->undef;
66 }
67
68 static struct qreg
69 indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
70 {
71         struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
72         uint32_t offset = intr->const_index[0];
73         struct vc4_compiler_ubo_range *range = NULL;
74         unsigned i;
75         for (i = 0; i < c->num_uniform_ranges; i++) {
76                 range = &c->ubo_ranges[i];
77                 if (offset >= range->src_offset &&
78                     offset < range->src_offset + range->size) {
79                         break;
80                 }
81         }
82         /* The driver-location-based offset always has to be within a declared
83          * uniform range.
84          */
85         assert(range);
86         if (!range->used) {
87                 range->used = true;
88                 range->dst_offset = c->next_ubo_dst_offset;
89                 c->next_ubo_dst_offset += range->size;
90                 c->num_ubo_ranges++;
91         }
92
93         offset -= range->src_offset;
94
95         /* Adjust for where we stored the TGSI register base. */
96         indirect_offset = qir_ADD(c, indirect_offset,
97                                   qir_uniform_ui(c, (range->dst_offset +
98                                                      offset)));
99
100         /* Clamp to [0, array size).  Note that MIN/MAX are signed. */
101         indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
102         indirect_offset = qir_MIN(c, indirect_offset,
103                                   qir_uniform_ui(c, (range->dst_offset +
104                                                      range->size - 4)));
105
106         qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
107         c->num_texture_samples++;
108         return qir_TEX_RESULT(c);
109 }
110
111 nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b,
112                                        enum quniform_contents contents)
113 {
114         nir_intrinsic_instr *intr =
115                 nir_intrinsic_instr_create(b->shader,
116                                            nir_intrinsic_load_uniform);
117         intr->const_index[0] = (VC4_NIR_STATE_UNIFORM_OFFSET + contents) * 4;
118         intr->num_components = 1;
119         intr->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
120         nir_ssa_dest_init(&intr->instr, &intr->dest, 1, 32, NULL);
121         nir_builder_instr_insert(b, &intr->instr);
122         return &intr->dest.ssa;
123 }
124
125 nir_ssa_def *
126 vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
127 {
128         switch (swiz) {
129         default:
130         case PIPE_SWIZZLE_NONE:
131                 fprintf(stderr, "warning: unknown swizzle\n");
132                 /* FALLTHROUGH */
133         case PIPE_SWIZZLE_0:
134                 return nir_imm_float(b, 0.0);
135         case PIPE_SWIZZLE_1:
136                 return nir_imm_float(b, 1.0);
137         case PIPE_SWIZZLE_X:
138         case PIPE_SWIZZLE_Y:
139         case PIPE_SWIZZLE_Z:
140         case PIPE_SWIZZLE_W:
141                 return srcs[swiz];
142         }
143 }
144
145 static struct qreg *
146 ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
147 {
148         struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
149                                           def->num_components);
150         _mesa_hash_table_insert(c->def_ht, def, qregs);
151         return qregs;
152 }
153
154 static struct qreg *
155 ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
156 {
157         if (dest->is_ssa) {
158                 struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
159                 for (int i = 0; i < dest->ssa.num_components; i++)
160                         qregs[i] = c->undef;
161                 return qregs;
162         } else {
163                 nir_register *reg = dest->reg.reg;
164                 assert(dest->reg.base_offset == 0);
165                 assert(reg->num_array_elems == 0);
166                 struct hash_entry *entry =
167                         _mesa_hash_table_search(c->def_ht, reg);
168                 return entry->data;
169         }
170 }
171
172 static struct qreg
173 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
174 {
175         struct hash_entry *entry;
176         if (src.is_ssa) {
177                 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
178                 assert(i < src.ssa->num_components);
179         } else {
180                 nir_register *reg = src.reg.reg;
181                 entry = _mesa_hash_table_search(c->def_ht, reg);
182                 assert(reg->num_array_elems == 0);
183                 assert(src.reg.base_offset == 0);
184                 assert(i < reg->num_components);
185         }
186
187         struct qreg *qregs = entry->data;
188         return qregs[i];
189 }
190
191 static struct qreg
192 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
193                 unsigned src)
194 {
195         assert(util_is_power_of_two(instr->dest.write_mask));
196         unsigned chan = ffs(instr->dest.write_mask) - 1;
197         struct qreg r = ntq_get_src(c, instr->src[src].src,
198                                     instr->src[src].swizzle[chan]);
199
200         assert(!instr->src[src].abs);
201         assert(!instr->src[src].negate);
202
203         return r;
204 };
205
206 static inline struct qreg
207 qir_SAT(struct vc4_compile *c, struct qreg val)
208 {
209         return qir_FMAX(c,
210                         qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
211                         qir_uniform_f(c, 0.0));
212 }
213
214 static struct qreg
215 ntq_rcp(struct vc4_compile *c, struct qreg x)
216 {
217         struct qreg r = qir_RCP(c, x);
218
219         /* Apply a Newton-Raphson step to improve the accuracy. */
220         r = qir_FMUL(c, r, qir_FSUB(c,
221                                     qir_uniform_f(c, 2.0),
222                                     qir_FMUL(c, x, r)));
223
224         return r;
225 }
226
227 static struct qreg
228 ntq_rsq(struct vc4_compile *c, struct qreg x)
229 {
230         struct qreg r = qir_RSQ(c, x);
231
232         /* Apply a Newton-Raphson step to improve the accuracy. */
233         r = qir_FMUL(c, r, qir_FSUB(c,
234                                     qir_uniform_f(c, 1.5),
235                                     qir_FMUL(c,
236                                              qir_uniform_f(c, 0.5),
237                                              qir_FMUL(c, x,
238                                                       qir_FMUL(c, r, r)))));
239
240         return r;
241 }
242
243 static struct qreg
244 qir_srgb_decode(struct vc4_compile *c, struct qreg srgb)
245 {
246         struct qreg low = qir_FMUL(c, srgb, qir_uniform_f(c, 1.0 / 12.92));
247         struct qreg high = qir_POW(c,
248                                    qir_FMUL(c,
249                                             qir_FADD(c,
250                                                      srgb,
251                                                      qir_uniform_f(c, 0.055)),
252                                             qir_uniform_f(c, 1.0 / 1.055)),
253                                    qir_uniform_f(c, 2.4));
254
255         qir_SF(c, qir_FSUB(c, srgb, qir_uniform_f(c, 0.04045)));
256         return qir_SEL(c, QPU_COND_NS, low, high);
257 }
258
259 static struct qreg
260 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
261 {
262         struct qreg src0_hi = qir_SHR(c, src0,
263                                       qir_uniform_ui(c, 24));
264         struct qreg src1_hi = qir_SHR(c, src1,
265                                       qir_uniform_ui(c, 24));
266
267         struct qreg hilo = qir_MUL24(c, src0_hi, src1);
268         struct qreg lohi = qir_MUL24(c, src0, src1_hi);
269         struct qreg lolo = qir_MUL24(c, src0, src1);
270
271         return qir_ADD(c, lolo, qir_SHL(c,
272                                         qir_ADD(c, hilo, lohi),
273                                         qir_uniform_ui(c, 24)));
274 }
275
276 static struct qreg
277 ntq_scale_depth_texture(struct vc4_compile *c, struct qreg src)
278 {
279         struct qreg depthf = qir_ITOF(c, qir_SHR(c, src,
280                                                  qir_uniform_ui(c, 8)));
281         return qir_FMUL(c, depthf, qir_uniform_f(c, 1.0f/0xffffff));
282 }
283
284 /**
285  * Emits a lowered TXF_MS from an MSAA texture.
286  *
287  * The addressing math has been lowered in NIR, and now we just need to read
288  * it like a UBO.
289  */
290 static void
291 ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr)
292 {
293         uint32_t tile_width = 32;
294         uint32_t tile_height = 32;
295         uint32_t tile_size = (tile_height * tile_width *
296                               VC4_MAX_SAMPLES * sizeof(uint32_t));
297
298         unsigned unit = instr->texture_index;
299         uint32_t w = align(c->key->tex[unit].msaa_width, tile_width);
300         uint32_t w_tiles = w / tile_width;
301         uint32_t h = align(c->key->tex[unit].msaa_height, tile_height);
302         uint32_t h_tiles = h / tile_height;
303         uint32_t size = w_tiles * h_tiles * tile_size;
304
305         struct qreg addr;
306         assert(instr->num_srcs == 1);
307         assert(instr->src[0].src_type == nir_tex_src_coord);
308         addr = ntq_get_src(c, instr->src[0].src, 0);
309
310         /* Perform the clamping required by kernel validation. */
311         addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
312         addr = qir_MIN(c, addr,  qir_uniform_ui(c, size - 4));
313
314         qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
315
316         struct qreg tex = qir_TEX_RESULT(c);
317         c->num_texture_samples++;
318
319         struct qreg *dest = ntq_get_dest(c, &instr->dest);
320         enum pipe_format format = c->key->tex[unit].format;
321         if (util_format_is_depth_or_stencil(format)) {
322                 struct qreg scaled = ntq_scale_depth_texture(c, tex);
323                 for (int i = 0; i < 4; i++)
324                         dest[i] = scaled;
325         } else {
326                 for (int i = 0; i < 4; i++)
327                         dest[i] = qir_UNPACK_8_F(c, tex, i);
328         }
329
330         for (int i = 0; i < 4; i++) {
331                 if (c->tex_srgb_decode[unit] & (1 << i))
332                         dest[i] = qir_srgb_decode(c, dest[i]);
333         }
334 }
335
336 static void
337 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
338 {
339         struct qreg s, t, r, lod, proj, compare;
340         bool is_txb = false, is_txl = false, has_proj = false;
341         unsigned unit = instr->texture_index;
342
343         if (instr->op == nir_texop_txf) {
344                 ntq_emit_txf(c, instr);
345                 return;
346         }
347
348         for (unsigned i = 0; i < instr->num_srcs; i++) {
349                 switch (instr->src[i].src_type) {
350                 case nir_tex_src_coord:
351                         s = ntq_get_src(c, instr->src[i].src, 0);
352                         if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
353                                 t = qir_uniform_f(c, 0.5);
354                         else
355                                 t = ntq_get_src(c, instr->src[i].src, 1);
356                         if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
357                                 r = ntq_get_src(c, instr->src[i].src, 2);
358                         break;
359                 case nir_tex_src_bias:
360                         lod = ntq_get_src(c, instr->src[i].src, 0);
361                         is_txb = true;
362                         break;
363                 case nir_tex_src_lod:
364                         lod = ntq_get_src(c, instr->src[i].src, 0);
365                         is_txl = true;
366                         break;
367                 case nir_tex_src_comparitor:
368                         compare = ntq_get_src(c, instr->src[i].src, 0);
369                         break;
370                 case nir_tex_src_projector:
371                         proj = qir_RCP(c, ntq_get_src(c, instr->src[i].src, 0));
372                         s = qir_FMUL(c, s, proj);
373                         t = qir_FMUL(c, t, proj);
374                         has_proj = true;
375                         break;
376                 default:
377                         unreachable("unknown texture source");
378                 }
379         }
380
381         struct qreg texture_u[] = {
382                 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
383                 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
384                 qir_uniform(c, QUNIFORM_CONSTANT, 0),
385                 qir_uniform(c, QUNIFORM_CONSTANT, 0),
386         };
387         uint32_t next_texture_u = 0;
388
389         /* There is no native support for GL texture rectangle coordinates, so
390          * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
391          * 1]).
392          */
393         if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
394                 s = qir_FMUL(c, s,
395                              qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
396                 t = qir_FMUL(c, t,
397                              qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
398         }
399
400         if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
401                 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
402                                            unit | (is_txl << 16));
403         }
404
405         if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
406                 qir_TEX_R(c, r, texture_u[next_texture_u++]);
407         } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
408                    c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
409                    c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
410                    c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
411                 qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit),
412                           texture_u[next_texture_u++]);
413         }
414
415         if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
416                 s = qir_SAT(c, s);
417         }
418
419         if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
420                 t = qir_SAT(c, t);
421         }
422
423         qir_TEX_T(c, t, texture_u[next_texture_u++]);
424
425         if (is_txl || is_txb)
426                 qir_TEX_B(c, lod, texture_u[next_texture_u++]);
427
428         qir_TEX_S(c, s, texture_u[next_texture_u++]);
429
430         c->num_texture_samples++;
431         struct qreg tex = qir_TEX_RESULT(c);
432
433         enum pipe_format format = c->key->tex[unit].format;
434
435         struct qreg *dest = ntq_get_dest(c, &instr->dest);
436         if (util_format_is_depth_or_stencil(format)) {
437                 struct qreg normalized = ntq_scale_depth_texture(c, tex);
438                 struct qreg depth_output;
439
440                 struct qreg u0 = qir_uniform_f(c, 0.0f);
441                 struct qreg u1 = qir_uniform_f(c, 1.0f);
442                 if (c->key->tex[unit].compare_mode) {
443                         if (has_proj)
444                                 compare = qir_FMUL(c, compare, proj);
445
446                         switch (c->key->tex[unit].compare_func) {
447                         case PIPE_FUNC_NEVER:
448                                 depth_output = qir_uniform_f(c, 0.0f);
449                                 break;
450                         case PIPE_FUNC_ALWAYS:
451                                 depth_output = u1;
452                                 break;
453                         case PIPE_FUNC_EQUAL:
454                                 qir_SF(c, qir_FSUB(c, compare, normalized));
455                                 depth_output = qir_SEL(c, QPU_COND_ZS, u1, u0);
456                                 break;
457                         case PIPE_FUNC_NOTEQUAL:
458                                 qir_SF(c, qir_FSUB(c, compare, normalized));
459                                 depth_output = qir_SEL(c, QPU_COND_ZC, u1, u0);
460                                 break;
461                         case PIPE_FUNC_GREATER:
462                                 qir_SF(c, qir_FSUB(c, compare, normalized));
463                                 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
464                                 break;
465                         case PIPE_FUNC_GEQUAL:
466                                 qir_SF(c, qir_FSUB(c, normalized, compare));
467                                 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
468                                 break;
469                         case PIPE_FUNC_LESS:
470                                 qir_SF(c, qir_FSUB(c, compare, normalized));
471                                 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
472                                 break;
473                         case PIPE_FUNC_LEQUAL:
474                                 qir_SF(c, qir_FSUB(c, normalized, compare));
475                                 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
476                                 break;
477                         }
478                 } else {
479                         depth_output = normalized;
480                 }
481
482                 for (int i = 0; i < 4; i++)
483                         dest[i] = depth_output;
484         } else {
485                 for (int i = 0; i < 4; i++)
486                         dest[i] = qir_UNPACK_8_F(c, tex, i);
487         }
488
489         for (int i = 0; i < 4; i++) {
490                 if (c->tex_srgb_decode[unit] & (1 << i))
491                         dest[i] = qir_srgb_decode(c, dest[i]);
492         }
493 }
494
495 /**
496  * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
497  * to zero).
498  */
499 static struct qreg
500 ntq_ffract(struct vc4_compile *c, struct qreg src)
501 {
502         struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
503         struct qreg diff = qir_FSUB(c, src, trunc);
504         qir_SF(c, diff);
505         return qir_SEL(c, QPU_COND_NS,
506                        qir_FADD(c, diff, qir_uniform_f(c, 1.0)), diff);
507 }
508
509 /**
510  * Computes floor(x), which is tricky because our FTOI truncates (rounds to
511  * zero).
512  */
513 static struct qreg
514 ntq_ffloor(struct vc4_compile *c, struct qreg src)
515 {
516         struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
517
518         /* This will be < 0 if we truncated and the truncation was of a value
519          * that was < 0 in the first place.
520          */
521         qir_SF(c, qir_FSUB(c, src, trunc));
522
523         return qir_SEL(c, QPU_COND_NS,
524                        qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)), trunc);
525 }
526
527 /**
528  * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
529  * zero).
530  */
531 static struct qreg
532 ntq_fceil(struct vc4_compile *c, struct qreg src)
533 {
534         struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
535
536         /* This will be < 0 if we truncated and the truncation was of a value
537          * that was > 0 in the first place.
538          */
539         qir_SF(c, qir_FSUB(c, trunc, src));
540
541         return qir_SEL(c, QPU_COND_NS,
542                        qir_FADD(c, trunc, qir_uniform_f(c, 1.0)), trunc);
543 }
544
545 static struct qreg
546 ntq_fsin(struct vc4_compile *c, struct qreg src)
547 {
548         float coeff[] = {
549                 -2.0 * M_PI,
550                 pow(2.0 * M_PI, 3) / (3 * 2 * 1),
551                 -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
552                 pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
553                 -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
554         };
555
556         struct qreg scaled_x =
557                 qir_FMUL(c,
558                          src,
559                          qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
560
561         struct qreg x = qir_FADD(c,
562                                  ntq_ffract(c, scaled_x),
563                                  qir_uniform_f(c, -0.5));
564         struct qreg x2 = qir_FMUL(c, x, x);
565         struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
566         for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
567                 x = qir_FMUL(c, x, x2);
568                 sum = qir_FADD(c,
569                                sum,
570                                qir_FMUL(c,
571                                         x,
572                                         qir_uniform_f(c, coeff[i])));
573         }
574         return sum;
575 }
576
577 static struct qreg
578 ntq_fcos(struct vc4_compile *c, struct qreg src)
579 {
580         float coeff[] = {
581                 -1.0f,
582                 pow(2.0 * M_PI, 2) / (2 * 1),
583                 -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
584                 pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
585                 -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
586                 pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
587         };
588
589         struct qreg scaled_x =
590                 qir_FMUL(c, src,
591                          qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
592         struct qreg x_frac = qir_FADD(c,
593                                       ntq_ffract(c, scaled_x),
594                                       qir_uniform_f(c, -0.5));
595
596         struct qreg sum = qir_uniform_f(c, coeff[0]);
597         struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
598         struct qreg x = x2; /* Current x^2, x^4, or x^6 */
599         for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
600                 if (i != 1)
601                         x = qir_FMUL(c, x, x2);
602
603                 struct qreg mul = qir_FMUL(c,
604                                            x,
605                                            qir_uniform_f(c, coeff[i]));
606                 if (i == 0)
607                         sum = mul;
608                 else
609                         sum = qir_FADD(c, sum, mul);
610         }
611         return sum;
612 }
613
614 static struct qreg
615 ntq_fsign(struct vc4_compile *c, struct qreg src)
616 {
617         struct qreg t = qir_get_temp(c);
618
619         qir_SF(c, src);
620         qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
621         qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
622         qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
623         return t;
624 }
625
626 static void
627 emit_vertex_input(struct vc4_compile *c, int attr)
628 {
629         enum pipe_format format = c->vs_key->attr_formats[attr];
630         uint32_t attr_size = util_format_get_blocksize(format);
631
632         c->vattr_sizes[attr] = align(attr_size, 4);
633         for (int i = 0; i < align(attr_size, 4) / 4; i++) {
634                 c->inputs[attr * 4 + i] =
635                         qir_MOV(c, qir_reg(QFILE_VPM, attr * 4 + i));
636                 c->num_inputs++;
637         }
638 }
639
640 static void
641 emit_fragcoord_input(struct vc4_compile *c, int attr)
642 {
643         c->inputs[attr * 4 + 0] = qir_ITOF(c, qir_reg(QFILE_FRAG_X, 0));
644         c->inputs[attr * 4 + 1] = qir_ITOF(c, qir_reg(QFILE_FRAG_Y, 0));
645         c->inputs[attr * 4 + 2] =
646                 qir_FMUL(c,
647                          qir_ITOF(c, qir_FRAG_Z(c)),
648                          qir_uniform_f(c, 1.0 / 0xffffff));
649         c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
650 }
651
652 static struct qreg
653 emit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
654                       uint8_t swizzle)
655 {
656         uint32_t i = c->num_input_slots++;
657         struct qreg vary = {
658                 QFILE_VARY,
659                 i
660         };
661
662         if (c->num_input_slots >= c->input_slots_array_size) {
663                 c->input_slots_array_size =
664                         MAX2(4, c->input_slots_array_size * 2);
665
666                 c->input_slots = reralloc(c, c->input_slots,
667                                           struct vc4_varying_slot,
668                                           c->input_slots_array_size);
669         }
670
671         c->input_slots[i].slot = slot;
672         c->input_slots[i].swizzle = swizzle;
673
674         return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
675 }
676
677 static void
678 emit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
679 {
680         for (int i = 0; i < 4; i++) {
681                 c->inputs[attr * 4 + i] =
682                         emit_fragment_varying(c, slot, i);
683                 c->num_inputs++;
684         }
685 }
686
687 static void
688 add_output(struct vc4_compile *c,
689            uint32_t decl_offset,
690            uint8_t slot,
691            uint8_t swizzle)
692 {
693         uint32_t old_array_size = c->outputs_array_size;
694         resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
695                           decl_offset + 1);
696
697         if (old_array_size != c->outputs_array_size) {
698                 c->output_slots = reralloc(c,
699                                            c->output_slots,
700                                            struct vc4_varying_slot,
701                                            c->outputs_array_size);
702         }
703
704         c->output_slots[decl_offset].slot = slot;
705         c->output_slots[decl_offset].swizzle = swizzle;
706 }
707
708 static void
709 declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size)
710 {
711         unsigned array_id = c->num_uniform_ranges++;
712         if (array_id >= c->ubo_ranges_array_size) {
713                 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
714                                                 array_id + 1);
715                 c->ubo_ranges = reralloc(c, c->ubo_ranges,
716                                          struct vc4_compiler_ubo_range,
717                                          c->ubo_ranges_array_size);
718         }
719
720         c->ubo_ranges[array_id].dst_offset = 0;
721         c->ubo_ranges[array_id].src_offset = start;
722         c->ubo_ranges[array_id].size = size;
723         c->ubo_ranges[array_id].used = false;
724 }
725
726 static bool
727 ntq_src_is_only_ssa_def_user(nir_src *src)
728 {
729         if (!src->is_ssa)
730                 return false;
731
732         if (!list_empty(&src->ssa->if_uses))
733                 return false;
734
735         return (src->ssa->uses.next == &src->use_link &&
736                 src->ssa->uses.next->next == &src->ssa->uses);
737 }
738
739 /**
740  * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
741  * bit set.
742  *
743  * However, as an optimization, it tries to find the instructions generating
744  * the sources to be packed and just emit the pack flag there, if possible.
745  */
746 static void
747 ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
748 {
749         struct qreg result = qir_get_temp(c);
750         struct nir_alu_instr *vec4 = NULL;
751
752         /* If packing from a vec4 op (as expected), identify it so that we can
753          * peek back at what generated its sources.
754          */
755         if (instr->src[0].src.is_ssa &&
756             instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
757             nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
758             nir_op_vec4) {
759                 vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
760         }
761
762         /* If the pack is replicating the same channel 4 times, use the 8888
763          * pack flag.  This is common for blending using the alpha
764          * channel.
765          */
766         if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
767             instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
768             instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
769                 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
770                 *dest = qir_PACK_8888_F(c,
771                                         ntq_get_src(c, instr->src[0].src,
772                                                     instr->src[0].swizzle[0]));
773                 return;
774         }
775
776         for (int i = 0; i < 4; i++) {
777                 int swiz = instr->src[0].swizzle[i];
778                 struct qreg src;
779                 if (vec4) {
780                         src = ntq_get_src(c, vec4->src[swiz].src,
781                                           vec4->src[swiz].swizzle[0]);
782                 } else {
783                         src = ntq_get_src(c, instr->src[0].src, swiz);
784                 }
785
786                 if (vec4 &&
787                     ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
788                     src.file == QFILE_TEMP &&
789                     c->defs[src.index] &&
790                     qir_is_mul(c->defs[src.index]) &&
791                     !c->defs[src.index]->dst.pack) {
792                         struct qinst *rewrite = c->defs[src.index];
793                         c->defs[src.index] = NULL;
794                         rewrite->dst = result;
795                         rewrite->dst.pack = QPU_PACK_MUL_8A + i;
796                         continue;
797                 }
798
799                 qir_PACK_8_F(c, result, src, i);
800         }
801
802         struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
803         *dest = result;
804 }
805
806 /** Handles sign-extended bitfield extracts for 16 bits. */
807 static struct qreg
808 ntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
809               struct qreg bits)
810 {
811         assert(bits.file == QFILE_UNIF &&
812                c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
813                c->uniform_data[bits.index] == 16);
814
815         assert(offset.file == QFILE_UNIF &&
816                c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
817         int offset_bit = c->uniform_data[offset.index];
818         assert(offset_bit % 16 == 0);
819
820         return qir_UNPACK_16_I(c, base, offset_bit / 16);
821 }
822
823 /** Handles unsigned bitfield extracts for 8 bits. */
824 static struct qreg
825 ntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
826               struct qreg bits)
827 {
828         assert(bits.file == QFILE_UNIF &&
829                c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
830                c->uniform_data[bits.index] == 8);
831
832         assert(offset.file == QFILE_UNIF &&
833                c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
834         int offset_bit = c->uniform_data[offset.index];
835         assert(offset_bit % 8 == 0);
836
837         return qir_UNPACK_8_I(c, base, offset_bit / 8);
838 }
839
840 /**
841  * If compare_instr is a valid comparison instruction, emits the
842  * compare_instr's comparison and returns the sel_instr's return value based
843  * on the compare_instr's result.
844  */
845 static bool
846 ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
847                     nir_alu_instr *compare_instr,
848                     nir_alu_instr *sel_instr)
849 {
850         enum qpu_cond cond;
851
852         switch (compare_instr->op) {
853         case nir_op_feq:
854         case nir_op_ieq:
855         case nir_op_seq:
856                 cond = QPU_COND_ZS;
857                 break;
858         case nir_op_fne:
859         case nir_op_ine:
860         case nir_op_sne:
861                 cond = QPU_COND_ZC;
862                 break;
863         case nir_op_fge:
864         case nir_op_ige:
865         case nir_op_uge:
866         case nir_op_sge:
867                 cond = QPU_COND_NC;
868                 break;
869         case nir_op_flt:
870         case nir_op_ilt:
871         case nir_op_slt:
872                 cond = QPU_COND_NS;
873                 break;
874         default:
875                 return false;
876         }
877
878         struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
879         struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
880
881         unsigned unsized_type =
882                 nir_alu_type_get_base_type(nir_op_infos[compare_instr->op].input_types[0]);
883         if (unsized_type == nir_type_float)
884                 qir_SF(c, qir_FSUB(c, src0, src1));
885         else
886                 qir_SF(c, qir_SUB(c, src0, src1));
887
888         switch (sel_instr->op) {
889         case nir_op_seq:
890         case nir_op_sne:
891         case nir_op_sge:
892         case nir_op_slt:
893                 *dest = qir_SEL(c, cond,
894                                 qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
895                 break;
896
897         case nir_op_bcsel:
898                 *dest = qir_SEL(c, cond,
899                                 ntq_get_alu_src(c, sel_instr, 1),
900                                 ntq_get_alu_src(c, sel_instr, 2));
901                 break;
902
903         default:
904                 *dest = qir_SEL(c, cond,
905                                 qir_uniform_ui(c, ~0), qir_uniform_ui(c, 0));
906                 break;
907         }
908
909         return true;
910 }
911
912 /**
913  * Attempts to fold a comparison generating a boolean result into the
914  * condition code for selecting between two values, instead of comparing the
915  * boolean result against 0 to generate the condition code.
916  */
917 static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
918                                   struct qreg *src)
919 {
920         if (!instr->src[0].src.is_ssa)
921                 goto out;
922         nir_alu_instr *compare =
923                 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
924         if (!compare)
925                 goto out;
926
927         struct qreg dest;
928         if (ntq_emit_comparison(c, &dest, compare, instr))
929                 return dest;
930
931 out:
932         qir_SF(c, src[0]);
933         return qir_SEL(c, QPU_COND_NS, src[1], src[2]);
934 }
935
936 static void
937 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
938 {
939         /* Vectors are special in that they have non-scalarized writemasks,
940          * and just take the first swizzle channel for each argument in order
941          * into each writemask channel.
942          */
943         if (instr->op == nir_op_vec2 ||
944             instr->op == nir_op_vec3 ||
945             instr->op == nir_op_vec4) {
946                 struct qreg srcs[4];
947                 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
948                         srcs[i] = ntq_get_src(c, instr->src[i].src,
949                                               instr->src[i].swizzle[0]);
950                 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
951                 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
952                         dest[i] = srcs[i];
953                 return;
954         }
955
956         if (instr->op == nir_op_pack_unorm_4x8) {
957                 ntq_emit_pack_unorm_4x8(c, instr);
958                 return;
959         }
960
961         if (instr->op == nir_op_unpack_unorm_4x8) {
962                 struct qreg src = ntq_get_src(c, instr->src[0].src,
963                                               instr->src[0].swizzle[0]);
964                 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
965                 for (int i = 0; i < 4; i++) {
966                         if (instr->dest.write_mask & (1 << i))
967                                 dest[i] = qir_UNPACK_8_F(c, src, i);
968                 }
969                 return;
970         }
971
972         /* General case: We can just grab the one used channel per src. */
973         struct qreg src[nir_op_infos[instr->op].num_inputs];
974         for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
975                 src[i] = ntq_get_alu_src(c, instr, i);
976         }
977
978         /* Pick the channel to store the output in. */
979         assert(!instr->dest.saturate);
980         struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
981         assert(util_is_power_of_two(instr->dest.write_mask));
982         dest += ffs(instr->dest.write_mask) - 1;
983
984         switch (instr->op) {
985         case nir_op_fmov:
986         case nir_op_imov:
987                 *dest = qir_MOV(c, src[0]);
988                 break;
989         case nir_op_fmul:
990                 *dest = qir_FMUL(c, src[0], src[1]);
991                 break;
992         case nir_op_fadd:
993                 *dest = qir_FADD(c, src[0], src[1]);
994                 break;
995         case nir_op_fsub:
996                 *dest = qir_FSUB(c, src[0], src[1]);
997                 break;
998         case nir_op_fmin:
999                 *dest = qir_FMIN(c, src[0], src[1]);
1000                 break;
1001         case nir_op_fmax:
1002                 *dest = qir_FMAX(c, src[0], src[1]);
1003                 break;
1004
1005         case nir_op_f2i:
1006         case nir_op_f2u:
1007                 *dest = qir_FTOI(c, src[0]);
1008                 break;
1009         case nir_op_i2f:
1010         case nir_op_u2f:
1011                 *dest = qir_ITOF(c, src[0]);
1012                 break;
1013         case nir_op_b2f:
1014                 *dest = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
1015                 break;
1016         case nir_op_b2i:
1017                 *dest = qir_AND(c, src[0], qir_uniform_ui(c, 1));
1018                 break;
1019         case nir_op_i2b:
1020         case nir_op_f2b:
1021                 qir_SF(c, src[0]);
1022                 *dest = qir_SEL(c, QPU_COND_ZC,
1023                                 qir_uniform_ui(c, ~0),
1024                                 qir_uniform_ui(c, 0));
1025                 break;
1026
1027         case nir_op_iadd:
1028                 *dest = qir_ADD(c, src[0], src[1]);
1029                 break;
1030         case nir_op_ushr:
1031                 *dest = qir_SHR(c, src[0], src[1]);
1032                 break;
1033         case nir_op_isub:
1034                 *dest = qir_SUB(c, src[0], src[1]);
1035                 break;
1036         case nir_op_ishr:
1037                 *dest = qir_ASR(c, src[0], src[1]);
1038                 break;
1039         case nir_op_ishl:
1040                 *dest = qir_SHL(c, src[0], src[1]);
1041                 break;
1042         case nir_op_imin:
1043                 *dest = qir_MIN(c, src[0], src[1]);
1044                 break;
1045         case nir_op_imax:
1046                 *dest = qir_MAX(c, src[0], src[1]);
1047                 break;
1048         case nir_op_iand:
1049                 *dest = qir_AND(c, src[0], src[1]);
1050                 break;
1051         case nir_op_ior:
1052                 *dest = qir_OR(c, src[0], src[1]);
1053                 break;
1054         case nir_op_ixor:
1055                 *dest = qir_XOR(c, src[0], src[1]);
1056                 break;
1057         case nir_op_inot:
1058                 *dest = qir_NOT(c, src[0]);
1059                 break;
1060
1061         case nir_op_imul:
1062                 *dest = ntq_umul(c, src[0], src[1]);
1063                 break;
1064
1065         case nir_op_seq:
1066         case nir_op_sne:
1067         case nir_op_sge:
1068         case nir_op_slt:
1069         case nir_op_feq:
1070         case nir_op_fne:
1071         case nir_op_fge:
1072         case nir_op_flt:
1073         case nir_op_ieq:
1074         case nir_op_ine:
1075         case nir_op_ige:
1076         case nir_op_uge:
1077         case nir_op_ilt:
1078                 if (!ntq_emit_comparison(c, dest, instr, instr)) {
1079                         fprintf(stderr, "Bad comparison instruction\n");
1080                 }
1081                 break;
1082
1083         case nir_op_bcsel:
1084                 *dest = ntq_emit_bcsel(c, instr, src);
1085                 break;
1086         case nir_op_fcsel:
1087                 qir_SF(c, src[0]);
1088                 *dest = qir_SEL(c, QPU_COND_ZC, src[1], src[2]);
1089                 break;
1090
1091         case nir_op_frcp:
1092                 *dest = ntq_rcp(c, src[0]);
1093                 break;
1094         case nir_op_frsq:
1095                 *dest = ntq_rsq(c, src[0]);
1096                 break;
1097         case nir_op_fexp2:
1098                 *dest = qir_EXP2(c, src[0]);
1099                 break;
1100         case nir_op_flog2:
1101                 *dest = qir_LOG2(c, src[0]);
1102                 break;
1103
1104         case nir_op_ftrunc:
1105                 *dest = qir_ITOF(c, qir_FTOI(c, src[0]));
1106                 break;
1107         case nir_op_fceil:
1108                 *dest = ntq_fceil(c, src[0]);
1109                 break;
1110         case nir_op_ffract:
1111                 *dest = ntq_ffract(c, src[0]);
1112                 break;
1113         case nir_op_ffloor:
1114                 *dest = ntq_ffloor(c, src[0]);
1115                 break;
1116
1117         case nir_op_fsin:
1118                 *dest = ntq_fsin(c, src[0]);
1119                 break;
1120         case nir_op_fcos:
1121                 *dest = ntq_fcos(c, src[0]);
1122                 break;
1123
1124         case nir_op_fsign:
1125                 *dest = ntq_fsign(c, src[0]);
1126                 break;
1127
1128         case nir_op_fabs:
1129                 *dest = qir_FMAXABS(c, src[0], src[0]);
1130                 break;
1131         case nir_op_iabs:
1132                 *dest = qir_MAX(c, src[0],
1133                                 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1134                 break;
1135
1136         case nir_op_ibitfield_extract:
1137                 *dest = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1138                 break;
1139
1140         case nir_op_ubitfield_extract:
1141                 *dest = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1142                 break;
1143
1144         case nir_op_usadd_4x8:
1145                 *dest = qir_V8ADDS(c, src[0], src[1]);
1146                 break;
1147
1148         case nir_op_ussub_4x8:
1149                 *dest = qir_V8SUBS(c, src[0], src[1]);
1150                 break;
1151
1152         case nir_op_umin_4x8:
1153                 *dest = qir_V8MIN(c, src[0], src[1]);
1154                 break;
1155
1156         case nir_op_umax_4x8:
1157                 *dest = qir_V8MAX(c, src[0], src[1]);
1158                 break;
1159
1160         case nir_op_umul_unorm_4x8:
1161                 *dest = qir_V8MULD(c, src[0], src[1]);
1162                 break;
1163
1164         default:
1165                 fprintf(stderr, "unknown NIR ALU inst: ");
1166                 nir_print_instr(&instr->instr, stderr);
1167                 fprintf(stderr, "\n");
1168                 abort();
1169         }
1170 }
1171
1172 static void
1173 emit_frag_end(struct vc4_compile *c)
1174 {
1175         struct qreg color;
1176         if (c->output_color_index != -1) {
1177                 color = c->outputs[c->output_color_index];
1178         } else {
1179                 color = qir_uniform_ui(c, 0);
1180         }
1181
1182         uint32_t discard_cond = QPU_COND_ALWAYS;
1183         if (c->discard.file != QFILE_NULL) {
1184                 qir_SF(c, c->discard);
1185                 discard_cond = QPU_COND_ZS;
1186         }
1187
1188         if (c->fs_key->stencil_enabled) {
1189                 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1190                              qir_uniform(c, QUNIFORM_STENCIL, 0));
1191                 if (c->fs_key->stencil_twoside) {
1192                         qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1193                                      qir_uniform(c, QUNIFORM_STENCIL, 1));
1194                 }
1195                 if (c->fs_key->stencil_full_writemasks) {
1196                         qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1197                                      qir_uniform(c, QUNIFORM_STENCIL, 2));
1198                 }
1199         }
1200
1201         if (c->output_sample_mask_index != -1) {
1202                 qir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1203         }
1204
1205         if (c->fs_key->depth_enabled) {
1206                 if (c->output_position_index != -1) {
1207                         qir_FTOI_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1208                                       qir_FMUL(c,
1209                                                c->outputs[c->output_position_index + 2],
1210                                                qir_uniform_f(c, 0xffffff)))->cond = discard_cond;
1211                 } else {
1212                         qir_MOV_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1213                                      qir_FRAG_Z(c))->cond = discard_cond;
1214                 }
1215         }
1216
1217         if (!c->msaa_per_sample_output) {
1218                 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE, 0),
1219                              color)->cond = discard_cond;
1220         } else {
1221                 for (int i = 0; i < VC4_MAX_SAMPLES; i++) {
1222                         qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE_MS, 0),
1223                                      c->sample_colors[i])->cond = discard_cond;
1224                 }
1225         }
1226 }
1227
1228 static void
1229 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1230 {
1231         struct qreg packed = qir_get_temp(c);
1232
1233         for (int i = 0; i < 2; i++) {
1234                 struct qreg scale =
1235                         qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1236
1237                 struct qreg packed_chan = packed;
1238                 packed_chan.pack = QPU_PACK_A_16A + i;
1239
1240                 qir_FTOI_dest(c, packed_chan,
1241                               qir_FMUL(c,
1242                                        qir_FMUL(c,
1243                                                 c->outputs[c->output_position_index + i],
1244                                                 scale),
1245                                        rcp_w));
1246         }
1247
1248         qir_VPM_WRITE(c, packed);
1249 }
1250
1251 static void
1252 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1253 {
1254         struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1255         struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1256
1257         qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1258                                                           c->outputs[c->output_position_index + 2],
1259                                                           zscale),
1260                                               rcp_w),
1261                                   zoffset));
1262 }
1263
1264 static void
1265 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1266 {
1267         qir_VPM_WRITE(c, rcp_w);
1268 }
1269
1270 static void
1271 emit_point_size_write(struct vc4_compile *c)
1272 {
1273         struct qreg point_size;
1274
1275         if (c->output_point_size_index != -1)
1276                 point_size = c->outputs[c->output_point_size_index];
1277         else
1278                 point_size = qir_uniform_f(c, 1.0);
1279
1280         /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1281          * BCM21553).
1282          */
1283         point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
1284
1285         qir_VPM_WRITE(c, point_size);
1286 }
1287
1288 /**
1289  * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1290  *
1291  * The simulator insists that there be at least one vertex attribute, so
1292  * vc4_draw.c will emit one if it wouldn't have otherwise.  The simulator also
1293  * insists that all vertex attributes loaded get read by the VS/CS, so we have
1294  * to consume it here.
1295  */
1296 static void
1297 emit_stub_vpm_read(struct vc4_compile *c)
1298 {
1299         if (c->num_inputs)
1300                 return;
1301
1302         c->vattr_sizes[0] = 4;
1303         (void)qir_MOV(c, qir_reg(QFILE_VPM, 0));
1304         c->num_inputs++;
1305 }
1306
1307 static void
1308 emit_vert_end(struct vc4_compile *c,
1309               struct vc4_varying_slot *fs_inputs,
1310               uint32_t num_fs_inputs)
1311 {
1312         struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1313
1314         emit_stub_vpm_read(c);
1315
1316         emit_scaled_viewport_write(c, rcp_w);
1317         emit_zs_write(c, rcp_w);
1318         emit_rcp_wc_write(c, rcp_w);
1319         if (c->vs_key->per_vertex_point_size)
1320                 emit_point_size_write(c);
1321
1322         for (int i = 0; i < num_fs_inputs; i++) {
1323                 struct vc4_varying_slot *input = &fs_inputs[i];
1324                 int j;
1325
1326                 for (j = 0; j < c->num_outputs; j++) {
1327                         struct vc4_varying_slot *output =
1328                                 &c->output_slots[j];
1329
1330                         if (input->slot == output->slot &&
1331                             input->swizzle == output->swizzle) {
1332                                 qir_VPM_WRITE(c, c->outputs[j]);
1333                                 break;
1334                         }
1335                 }
1336                 /* Emit padding if we didn't find a declared VS output for
1337                  * this FS input.
1338                  */
1339                 if (j == c->num_outputs)
1340                         qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1341         }
1342 }
1343
1344 static void
1345 emit_coord_end(struct vc4_compile *c)
1346 {
1347         struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1348
1349         emit_stub_vpm_read(c);
1350
1351         for (int i = 0; i < 4; i++)
1352                 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1353
1354         emit_scaled_viewport_write(c, rcp_w);
1355         emit_zs_write(c, rcp_w);
1356         emit_rcp_wc_write(c, rcp_w);
1357         if (c->vs_key->per_vertex_point_size)
1358                 emit_point_size_write(c);
1359 }
1360
1361 static void
1362 vc4_optimize_nir(struct nir_shader *s)
1363 {
1364         bool progress;
1365
1366         do {
1367                 progress = false;
1368
1369                 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1370                 NIR_PASS_V(s, nir_lower_alu_to_scalar);
1371
1372                 NIR_PASS(progress, s, nir_copy_prop);
1373                 NIR_PASS(progress, s, nir_opt_dce);
1374                 NIR_PASS(progress, s, nir_opt_cse);
1375                 NIR_PASS(progress, s, nir_opt_peephole_select);
1376                 NIR_PASS(progress, s, nir_opt_algebraic);
1377                 NIR_PASS(progress, s, nir_opt_constant_folding);
1378                 NIR_PASS(progress, s, nir_opt_undef);
1379         } while (progress);
1380 }
1381
1382 static int
1383 driver_location_compare(const void *in_a, const void *in_b)
1384 {
1385         const nir_variable *const *a = in_a;
1386         const nir_variable *const *b = in_b;
1387
1388         return (*a)->data.driver_location - (*b)->data.driver_location;
1389 }
1390
1391 static void
1392 ntq_setup_inputs(struct vc4_compile *c)
1393 {
1394         unsigned num_entries = 0;
1395         nir_foreach_variable(var, &c->s->inputs)
1396                 num_entries++;
1397
1398         nir_variable *vars[num_entries];
1399
1400         unsigned i = 0;
1401         nir_foreach_variable(var, &c->s->inputs)
1402                 vars[i++] = var;
1403
1404         /* Sort the variables so that we emit the input setup in
1405          * driver_location order.  This is required for VPM reads, whose data
1406          * is fetched into the VPM in driver_location (TGSI register index)
1407          * order.
1408          */
1409         qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1410
1411         for (unsigned i = 0; i < num_entries; i++) {
1412                 nir_variable *var = vars[i];
1413                 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1414                 unsigned loc = var->data.driver_location;
1415
1416                 assert(array_len == 1);
1417                 (void)array_len;
1418                 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1419                                   (loc + 1) * 4);
1420
1421                 if (c->stage == QSTAGE_FRAG) {
1422                         if (var->data.location == VARYING_SLOT_POS) {
1423                                 emit_fragcoord_input(c, loc);
1424                         } else if (var->data.location == VARYING_SLOT_FACE) {
1425                                 c->inputs[loc * 4 + 0] =
1426                                         qir_ITOF(c, qir_reg(QFILE_FRAG_REV_FLAG,
1427                                                             0));
1428                         } else if (var->data.location >= VARYING_SLOT_VAR0 &&
1429                                    (c->fs_key->point_sprite_mask &
1430                                     (1 << (var->data.location -
1431                                            VARYING_SLOT_VAR0)))) {
1432                                 c->inputs[loc * 4 + 0] = c->point_x;
1433                                 c->inputs[loc * 4 + 1] = c->point_y;
1434                         } else {
1435                                 emit_fragment_input(c, loc, var->data.location);
1436                         }
1437                 } else {
1438                         emit_vertex_input(c, loc);
1439                 }
1440         }
1441 }
1442
1443 static void
1444 ntq_setup_outputs(struct vc4_compile *c)
1445 {
1446         nir_foreach_variable(var, &c->s->outputs) {
1447                 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1448                 unsigned loc = var->data.driver_location * 4;
1449
1450                 assert(array_len == 1);
1451                 (void)array_len;
1452
1453                 for (int i = 0; i < 4; i++)
1454                         add_output(c, loc + i, var->data.location, i);
1455
1456                 if (c->stage == QSTAGE_FRAG) {
1457                         switch (var->data.location) {
1458                         case FRAG_RESULT_COLOR:
1459                         case FRAG_RESULT_DATA0:
1460                                 c->output_color_index = loc;
1461                                 break;
1462                         case FRAG_RESULT_DEPTH:
1463                                 c->output_position_index = loc;
1464                                 break;
1465                         case FRAG_RESULT_SAMPLE_MASK:
1466                                 c->output_sample_mask_index = loc;
1467                                 break;
1468                         }
1469                 } else {
1470                         switch (var->data.location) {
1471                         case VARYING_SLOT_POS:
1472                                 c->output_position_index = loc;
1473                                 break;
1474                         case VARYING_SLOT_PSIZ:
1475                                 c->output_point_size_index = loc;
1476                                 break;
1477                         }
1478                 }
1479         }
1480 }
1481
1482 static void
1483 ntq_setup_uniforms(struct vc4_compile *c)
1484 {
1485         nir_foreach_variable(var, &c->s->uniforms) {
1486                 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1487                 unsigned array_elem_size = 4 * sizeof(float);
1488
1489                 declare_uniform_range(c, var->data.driver_location * array_elem_size,
1490                                       array_len * array_elem_size);
1491
1492         }
1493 }
1494
1495 /**
1496  * Sets up the mapping from nir_register to struct qreg *.
1497  *
1498  * Each nir_register gets a struct qreg per 32-bit component being stored.
1499  */
1500 static void
1501 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1502 {
1503         foreach_list_typed(nir_register, nir_reg, node, list) {
1504                 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1505                 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1506                                                   array_len *
1507                                                   nir_reg->num_components);
1508
1509                 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1510
1511                 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1512                         qregs[i] = qir_uniform_ui(c, 0);
1513         }
1514 }
1515
1516 static void
1517 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1518 {
1519         struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1520         for (int i = 0; i < instr->def.num_components; i++)
1521                 qregs[i] = qir_uniform_ui(c, instr->value.u32[i]);
1522
1523         _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1524 }
1525
1526 static void
1527 ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1528 {
1529         struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1530
1531         /* QIR needs there to be *some* value, so pick 0 (same as for
1532          * ntq_setup_registers().
1533          */
1534         for (int i = 0; i < instr->def.num_components; i++)
1535                 qregs[i] = qir_uniform_ui(c, 0);
1536 }
1537
1538 static void
1539 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1540 {
1541         const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
1542         nir_const_value *const_offset;
1543         unsigned offset;
1544         struct qreg *dest = NULL;
1545
1546         if (info->has_dest) {
1547                 dest = ntq_get_dest(c, &instr->dest);
1548         }
1549
1550         switch (instr->intrinsic) {
1551         case nir_intrinsic_load_uniform:
1552                 assert(instr->num_components == 1);
1553                 const_offset = nir_src_as_const_value(instr->src[0]);
1554                 if (const_offset) {
1555                         offset = instr->const_index[0] + const_offset->u32[0];
1556                         assert(offset % 4 == 0);
1557                         /* We need dwords */
1558                         offset = offset / 4;
1559                         if (offset < VC4_NIR_STATE_UNIFORM_OFFSET) {
1560                                 *dest = qir_uniform(c, QUNIFORM_UNIFORM,
1561                                                     offset);
1562                         } else {
1563                                 *dest = qir_uniform(c, offset -
1564                                                     VC4_NIR_STATE_UNIFORM_OFFSET,
1565                                                     0);
1566                         }
1567                 } else {
1568                         *dest = indirect_uniform_load(c, instr);
1569                 }
1570                 break;
1571
1572         case nir_intrinsic_load_user_clip_plane:
1573                 for (int i = 0; i < instr->num_components; i++) {
1574                         dest[i] = qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1575                                               instr->const_index[0] * 4 + i);
1576                 }
1577                 break;
1578
1579         case nir_intrinsic_load_sample_mask_in:
1580                 *dest = qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0);
1581                 break;
1582
1583         case nir_intrinsic_load_input:
1584                 assert(instr->num_components == 1);
1585                 const_offset = nir_src_as_const_value(instr->src[0]);
1586                 assert(const_offset && "vc4 doesn't support indirect inputs");
1587                 if (instr->const_index[0] >= VC4_NIR_TLB_COLOR_READ_INPUT) {
1588                         assert(const_offset->u32[0] == 0);
1589                         /* Reads of the per-sample color need to be done in
1590                          * order.
1591                          */
1592                         int sample_index = (instr->const_index[0] -
1593                                            VC4_NIR_TLB_COLOR_READ_INPUT);
1594                         for (int i = 0; i <= sample_index; i++) {
1595                                 if (c->color_reads[i].file == QFILE_NULL) {
1596                                         c->color_reads[i] =
1597                                                 qir_TLB_COLOR_READ(c);
1598                                 }
1599                         }
1600                         *dest = c->color_reads[sample_index];
1601                 } else {
1602                         offset = instr->const_index[0] + const_offset->u32[0];
1603                         *dest = c->inputs[offset];
1604                 }
1605                 break;
1606
1607         case nir_intrinsic_store_output:
1608                 const_offset = nir_src_as_const_value(instr->src[1]);
1609                 assert(const_offset && "vc4 doesn't support indirect outputs");
1610                 offset = instr->const_index[0] + const_offset->u32[0];
1611
1612                 /* MSAA color outputs are the only case where we have an
1613                  * output that's not lowered to being a store of a single 32
1614                  * bit value.
1615                  */
1616                 if (c->stage == QSTAGE_FRAG && instr->num_components == 4) {
1617                         assert(offset == c->output_color_index);
1618                         for (int i = 0; i < 4; i++) {
1619                                 c->sample_colors[i] =
1620                                         qir_MOV(c, ntq_get_src(c, instr->src[0],
1621                                                                i));
1622                         }
1623                 } else {
1624                         assert(instr->num_components == 1);
1625                         c->outputs[offset] =
1626                                 qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1627                         c->num_outputs = MAX2(c->num_outputs, offset + 1);
1628                 }
1629                 break;
1630
1631         case nir_intrinsic_discard:
1632                 c->discard = qir_uniform_ui(c, ~0);
1633                 break;
1634
1635         case nir_intrinsic_discard_if:
1636                 if (c->discard.file == QFILE_NULL)
1637                         c->discard = qir_uniform_ui(c, 0);
1638                 c->discard = qir_OR(c, c->discard,
1639                                     ntq_get_src(c, instr->src[0], 0));
1640                 break;
1641
1642         default:
1643                 fprintf(stderr, "Unknown intrinsic: ");
1644                 nir_print_instr(&instr->instr, stderr);
1645                 fprintf(stderr, "\n");
1646                 break;
1647         }
1648 }
1649
1650 static void
1651 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1652 {
1653         fprintf(stderr, "general IF statements not handled.\n");
1654 }
1655
1656 static void
1657 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
1658 {
1659         switch (instr->type) {
1660         case nir_instr_type_alu:
1661                 ntq_emit_alu(c, nir_instr_as_alu(instr));
1662                 break;
1663
1664         case nir_instr_type_intrinsic:
1665                 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
1666                 break;
1667
1668         case nir_instr_type_load_const:
1669                 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
1670                 break;
1671
1672         case nir_instr_type_ssa_undef:
1673                 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
1674                 break;
1675
1676         case nir_instr_type_tex:
1677                 ntq_emit_tex(c, nir_instr_as_tex(instr));
1678                 break;
1679
1680         default:
1681                 fprintf(stderr, "Unknown NIR instr type: ");
1682                 nir_print_instr(instr, stderr);
1683                 fprintf(stderr, "\n");
1684                 abort();
1685         }
1686 }
1687
1688 static void
1689 ntq_emit_block(struct vc4_compile *c, nir_block *block)
1690 {
1691         nir_foreach_instr(instr, block) {
1692                 ntq_emit_instr(c, instr);
1693         }
1694 }
1695
1696 static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
1697
1698 static void
1699 ntq_emit_loop(struct vc4_compile *c, nir_loop *nloop)
1700 {
1701         fprintf(stderr, "LOOPS not fully handled. Rendering errors likely.\n");
1702         ntq_emit_cf_list(c, &nloop->body);
1703 }
1704
1705 static void
1706 ntq_emit_function(struct vc4_compile *c, nir_function_impl *func)
1707 {
1708         fprintf(stderr, "FUNCTIONS not handled.\n");
1709         abort();
1710 }
1711
1712 static void
1713 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
1714 {
1715         foreach_list_typed(nir_cf_node, node, node, list) {
1716                 switch (node->type) {
1717                 case nir_cf_node_block:
1718                         ntq_emit_block(c, nir_cf_node_as_block(node));
1719                         break;
1720
1721                 case nir_cf_node_if:
1722                         ntq_emit_if(c, nir_cf_node_as_if(node));
1723                         break;
1724
1725                 case nir_cf_node_loop:
1726                         ntq_emit_loop(c, nir_cf_node_as_loop(node));
1727                         break;
1728
1729                 case nir_cf_node_function:
1730                         ntq_emit_function(c, nir_cf_node_as_function(node));
1731                         break;
1732
1733                 default:
1734                         fprintf(stderr, "Unknown NIR node type\n");
1735                         abort();
1736                 }
1737         }
1738 }
1739
1740 static void
1741 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
1742 {
1743         ntq_setup_registers(c, &impl->registers);
1744         ntq_emit_cf_list(c, &impl->body);
1745 }
1746
1747 static void
1748 nir_to_qir(struct vc4_compile *c)
1749 {
1750         ntq_setup_inputs(c);
1751         ntq_setup_outputs(c);
1752         ntq_setup_uniforms(c);
1753         ntq_setup_registers(c, &c->s->registers);
1754
1755         /* Find the main function and emit the body. */
1756         nir_foreach_function(function, c->s) {
1757                 assert(strcmp(function->name, "main") == 0);
1758                 assert(function->impl);
1759                 ntq_emit_impl(c, function->impl);
1760         }
1761 }
1762
1763 static const nir_shader_compiler_options nir_options = {
1764         .lower_extract_byte = true,
1765         .lower_extract_word = true,
1766         .lower_ffma = true,
1767         .lower_flrp32 = true,
1768         .lower_fpow = true,
1769         .lower_fsat = true,
1770         .lower_fsqrt = true,
1771         .lower_negate = true,
1772 };
1773
1774 static bool
1775 count_nir_instrs_in_block(nir_block *block, void *state)
1776 {
1777         int *count = (int *) state;
1778         nir_foreach_instr(instr, block) {
1779                 *count = *count + 1;
1780         }
1781         return true;
1782 }
1783
1784 static int
1785 count_nir_instrs(nir_shader *nir)
1786 {
1787         int count = 0;
1788         nir_foreach_function(function, nir) {
1789                 if (!function->impl)
1790                         continue;
1791                 nir_foreach_block_call(function->impl, count_nir_instrs_in_block, &count);
1792         }
1793         return count;
1794 }
1795
1796 static struct vc4_compile *
1797 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
1798                        struct vc4_key *key)
1799 {
1800         struct vc4_compile *c = qir_compile_init();
1801
1802         c->stage = stage;
1803         c->shader_state = &key->shader_state->base;
1804         c->program_id = key->shader_state->program_id;
1805         c->variant_id = key->shader_state->compiled_variant_count++;
1806
1807         c->key = key;
1808         switch (stage) {
1809         case QSTAGE_FRAG:
1810                 c->fs_key = (struct vc4_fs_key *)key;
1811                 if (c->fs_key->is_points) {
1812                         c->point_x = emit_fragment_varying(c, ~0, 0);
1813                         c->point_y = emit_fragment_varying(c, ~0, 0);
1814                 } else if (c->fs_key->is_lines) {
1815                         c->line_x = emit_fragment_varying(c, ~0, 0);
1816                 }
1817                 break;
1818         case QSTAGE_VERT:
1819                 c->vs_key = (struct vc4_vs_key *)key;
1820                 break;
1821         case QSTAGE_COORD:
1822                 c->vs_key = (struct vc4_vs_key *)key;
1823                 break;
1824         }
1825
1826         const struct tgsi_token *tokens = key->shader_state->base.tokens;
1827
1828         if (vc4_debug & VC4_DEBUG_TGSI) {
1829                 fprintf(stderr, "%s prog %d/%d TGSI:\n",
1830                         qir_get_stage_name(c->stage),
1831                         c->program_id, c->variant_id);
1832                 tgsi_dump(tokens, 0);
1833         }
1834
1835         c->s = tgsi_to_nir(tokens, &nir_options);
1836         NIR_PASS_V(c->s, nir_opt_global_to_local);
1837         NIR_PASS_V(c->s, nir_convert_to_ssa);
1838
1839         if (stage == QSTAGE_FRAG)
1840                 NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
1841
1842         struct nir_lower_tex_options tex_options = {
1843                 /* We would need to implement txs, but we don't want the
1844                  * int/float conversions
1845                  */
1846                 .lower_rect = false,
1847
1848                 /* We want to use this, but we don't want to newton-raphson
1849                  * its rcp.
1850                  */
1851                 .lower_txp = false,
1852
1853                 /* Apply swizzles to all samplers. */
1854                 .swizzle_result = ~0,
1855         };
1856
1857         /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
1858          * The format swizzling applies before sRGB decode, and
1859          * ARB_texture_swizzle is the last thing before returning the sample.
1860          */
1861         for (int i = 0; i < ARRAY_SIZE(key->tex); i++) {
1862                 enum pipe_format format = c->key->tex[i].format;
1863
1864                 if (!format)
1865                         continue;
1866
1867                 const uint8_t *format_swizzle = vc4_get_format_swizzle(format);
1868
1869                 for (int j = 0; j < 4; j++) {
1870                         uint8_t arb_swiz = c->key->tex[i].swizzle[j];
1871
1872                         if (arb_swiz <= 3) {
1873                                 tex_options.swizzles[i][j] =
1874                                         format_swizzle[arb_swiz];
1875                         } else {
1876                                 tex_options.swizzles[i][j] = arb_swiz;
1877                         }
1878
1879                         /* If ARB_texture_swizzle is reading from the R, G, or
1880                          * B channels of an sRGB texture, then we need to
1881                          * apply sRGB decode to this channel at sample time.
1882                          */
1883                         if (arb_swiz < 3 && util_format_is_srgb(format)) {
1884                                 c->tex_srgb_decode[i] |= (1 << j);
1885                         }
1886
1887                 }
1888         }
1889
1890         NIR_PASS_V(c->s, nir_normalize_cubemap_coords);
1891         NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
1892
1893         if (c->fs_key && c->fs_key->light_twoside)
1894                 NIR_PASS_V(c->s, nir_lower_two_sided_color);
1895
1896         if (stage == QSTAGE_FRAG)
1897                 NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
1898         else
1899                 NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables);
1900
1901         NIR_PASS_V(c->s, vc4_nir_lower_io, c);
1902         NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
1903         NIR_PASS_V(c->s, nir_lower_idiv);
1904         NIR_PASS_V(c->s, nir_lower_load_const_to_scalar);
1905
1906         vc4_optimize_nir(c->s);
1907
1908         NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_local);
1909         NIR_PASS_V(c->s, nir_convert_from_ssa, true);
1910
1911         if (vc4_debug & VC4_DEBUG_SHADERDB) {
1912                 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
1913                         qir_get_stage_name(c->stage),
1914                         c->program_id, c->variant_id,
1915                         count_nir_instrs(c->s));
1916         }
1917
1918         if (vc4_debug & VC4_DEBUG_NIR) {
1919                 fprintf(stderr, "%s prog %d/%d NIR:\n",
1920                         qir_get_stage_name(c->stage),
1921                         c->program_id, c->variant_id);
1922                 nir_print_shader(c->s, stderr);
1923         }
1924
1925         nir_to_qir(c);
1926
1927         switch (stage) {
1928         case QSTAGE_FRAG:
1929                 emit_frag_end(c);
1930                 break;
1931         case QSTAGE_VERT:
1932                 emit_vert_end(c,
1933                               vc4->prog.fs->input_slots,
1934                               vc4->prog.fs->num_inputs);
1935                 break;
1936         case QSTAGE_COORD:
1937                 emit_coord_end(c);
1938                 break;
1939         }
1940
1941         if (vc4_debug & VC4_DEBUG_QIR) {
1942                 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
1943                         qir_get_stage_name(c->stage),
1944                         c->program_id, c->variant_id);
1945                 qir_dump(c);
1946                 fprintf(stderr, "\n");
1947         }
1948
1949         qir_optimize(c);
1950         qir_lower_uniforms(c);
1951
1952         qir_schedule_instructions(c);
1953
1954         if (vc4_debug & VC4_DEBUG_QIR) {
1955                 fprintf(stderr, "%s prog %d/%d QIR:\n",
1956                         qir_get_stage_name(c->stage),
1957                         c->program_id, c->variant_id);
1958                 qir_dump(c);
1959                 fprintf(stderr, "\n");
1960         }
1961
1962         qir_reorder_uniforms(c);
1963         vc4_generate_code(vc4, c);
1964
1965         if (vc4_debug & VC4_DEBUG_SHADERDB) {
1966                 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
1967                         qir_get_stage_name(c->stage),
1968                         c->program_id, c->variant_id,
1969                         c->qpu_inst_count);
1970                 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
1971                         qir_get_stage_name(c->stage),
1972                         c->program_id, c->variant_id,
1973                         c->num_uniforms);
1974         }
1975
1976         ralloc_free(c->s);
1977
1978         return c;
1979 }
1980
1981 static void *
1982 vc4_shader_state_create(struct pipe_context *pctx,
1983                         const struct pipe_shader_state *cso)
1984 {
1985         struct vc4_context *vc4 = vc4_context(pctx);
1986         struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
1987         if (!so)
1988                 return NULL;
1989
1990         so->base.tokens = tgsi_dup_tokens(cso->tokens);
1991         so->program_id = vc4->next_uncompiled_program_id++;
1992
1993         return so;
1994 }
1995
1996 static void
1997 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
1998                              struct vc4_compile *c)
1999 {
2000         int count = c->num_uniforms;
2001         struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
2002
2003         uinfo->count = count;
2004         uinfo->data = ralloc_array(shader, uint32_t, count);
2005         memcpy(uinfo->data, c->uniform_data,
2006                count * sizeof(*uinfo->data));
2007         uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
2008         memcpy(uinfo->contents, c->uniform_contents,
2009                count * sizeof(*uinfo->contents));
2010         uinfo->num_texture_samples = c->num_texture_samples;
2011
2012         vc4_set_shader_uniform_dirty_flags(shader);
2013 }
2014
2015 static struct vc4_compiled_shader *
2016 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
2017                         struct vc4_key *key)
2018 {
2019         struct hash_table *ht;
2020         uint32_t key_size;
2021         if (stage == QSTAGE_FRAG) {
2022                 ht = vc4->fs_cache;
2023                 key_size = sizeof(struct vc4_fs_key);
2024         } else {
2025                 ht = vc4->vs_cache;
2026                 key_size = sizeof(struct vc4_vs_key);
2027         }
2028
2029         struct vc4_compiled_shader *shader;
2030         struct hash_entry *entry = _mesa_hash_table_search(ht, key);
2031         if (entry)
2032                 return entry->data;
2033
2034         struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key);
2035         shader = rzalloc(NULL, struct vc4_compiled_shader);
2036
2037         shader->program_id = vc4->next_compiled_program_id++;
2038         if (stage == QSTAGE_FRAG) {
2039                 bool input_live[c->num_input_slots];
2040
2041                 memset(input_live, 0, sizeof(input_live));
2042                 list_for_each_entry(struct qinst, inst, &c->instructions, link) {
2043                         for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
2044                                 if (inst->src[i].file == QFILE_VARY)
2045                                         input_live[inst->src[i].index] = true;
2046                         }
2047                 }
2048
2049                 shader->input_slots = ralloc_array(shader,
2050                                                    struct vc4_varying_slot,
2051                                                    c->num_input_slots);
2052
2053                 for (int i = 0; i < c->num_input_slots; i++) {
2054                         struct vc4_varying_slot *slot = &c->input_slots[i];
2055
2056                         if (!input_live[i])
2057                                 continue;
2058
2059                         /* Skip non-VS-output inputs. */
2060                         if (slot->slot == (uint8_t)~0)
2061                                 continue;
2062
2063                         if (slot->slot == VARYING_SLOT_COL0 ||
2064                             slot->slot == VARYING_SLOT_COL1 ||
2065                             slot->slot == VARYING_SLOT_BFC0 ||
2066                             slot->slot == VARYING_SLOT_BFC1) {
2067                                 shader->color_inputs |= (1 << shader->num_inputs);
2068                         }
2069
2070                         shader->input_slots[shader->num_inputs] = *slot;
2071                         shader->num_inputs++;
2072                 }
2073         } else {
2074                 shader->num_inputs = c->num_inputs;
2075
2076                 shader->vattr_offsets[0] = 0;
2077                 for (int i = 0; i < 8; i++) {
2078                         shader->vattr_offsets[i + 1] =
2079                                 shader->vattr_offsets[i] + c->vattr_sizes[i];
2080
2081                         if (c->vattr_sizes[i])
2082                                 shader->vattrs_live |= (1 << i);
2083                 }
2084         }
2085
2086         copy_uniform_state_to_shader(shader, c);
2087         shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
2088                                          c->qpu_inst_count * sizeof(uint64_t));
2089
2090         /* Copy the compiler UBO range state to the compiled shader, dropping
2091          * out arrays that were never referenced by an indirect load.
2092          *
2093          * (Note that QIR dead code elimination of an array access still
2094          * leaves that array alive, though)
2095          */
2096         if (c->num_ubo_ranges) {
2097                 shader->num_ubo_ranges = c->num_ubo_ranges;
2098                 shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range,
2099                                                   c->num_ubo_ranges);
2100                 uint32_t j = 0;
2101                 for (int i = 0; i < c->num_uniform_ranges; i++) {
2102                         struct vc4_compiler_ubo_range *range =
2103                                 &c->ubo_ranges[i];
2104                         if (!range->used)
2105                                 continue;
2106
2107                         shader->ubo_ranges[j].dst_offset = range->dst_offset;
2108                         shader->ubo_ranges[j].src_offset = range->src_offset;
2109                         shader->ubo_ranges[j].size = range->size;
2110                         shader->ubo_size += c->ubo_ranges[i].size;
2111                         j++;
2112                 }
2113         }
2114         if (shader->ubo_size) {
2115                 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2116                         fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2117                                 qir_get_stage_name(c->stage),
2118                                 c->program_id, c->variant_id,
2119                                 shader->ubo_size / 4);
2120                 }
2121         }
2122
2123         qir_compile_destroy(c);
2124
2125         struct vc4_key *dup_key;
2126         dup_key = ralloc_size(shader, key_size);
2127         memcpy(dup_key, key, key_size);
2128         _mesa_hash_table_insert(ht, dup_key, shader);
2129
2130         return shader;
2131 }
2132
2133 static void
2134 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2135                      struct vc4_texture_stateobj *texstate)
2136 {
2137         for (int i = 0; i < texstate->num_textures; i++) {
2138                 struct pipe_sampler_view *sampler = texstate->textures[i];
2139                 struct pipe_sampler_state *sampler_state =
2140                         texstate->samplers[i];
2141
2142                 if (!sampler)
2143                         continue;
2144
2145                 key->tex[i].format = sampler->format;
2146                 key->tex[i].swizzle[0] = sampler->swizzle_r;
2147                 key->tex[i].swizzle[1] = sampler->swizzle_g;
2148                 key->tex[i].swizzle[2] = sampler->swizzle_b;
2149                 key->tex[i].swizzle[3] = sampler->swizzle_a;
2150
2151                 if (sampler->texture->nr_samples > 1) {
2152                         key->tex[i].msaa_width = sampler->texture->width0;
2153                         key->tex[i].msaa_height = sampler->texture->height0;
2154                 } else if (sampler){
2155                         key->tex[i].compare_mode = sampler_state->compare_mode;
2156                         key->tex[i].compare_func = sampler_state->compare_func;
2157                         key->tex[i].wrap_s = sampler_state->wrap_s;
2158                         key->tex[i].wrap_t = sampler_state->wrap_t;
2159                 }
2160         }
2161
2162         key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2163 }
2164
2165 static void
2166 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2167 {
2168         struct vc4_fs_key local_key;
2169         struct vc4_fs_key *key = &local_key;
2170
2171         if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2172                             VC4_DIRTY_BLEND |
2173                             VC4_DIRTY_FRAMEBUFFER |
2174                             VC4_DIRTY_ZSA |
2175                             VC4_DIRTY_RASTERIZER |
2176                             VC4_DIRTY_SAMPLE_MASK |
2177                             VC4_DIRTY_FRAGTEX |
2178                             VC4_DIRTY_TEXSTATE |
2179                             VC4_DIRTY_UNCOMPILED_FS))) {
2180                 return;
2181         }
2182
2183         memset(key, 0, sizeof(*key));
2184         vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2185         key->base.shader_state = vc4->prog.bind_fs;
2186         key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2187         key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2188                          prim_mode <= PIPE_PRIM_LINE_STRIP);
2189         key->blend = vc4->blend->rt[0];
2190         if (vc4->blend->logicop_enable) {
2191                 key->logicop_func = vc4->blend->logicop_func;
2192         } else {
2193                 key->logicop_func = PIPE_LOGICOP_COPY;
2194         }
2195         if (vc4->msaa) {
2196                 key->msaa = vc4->rasterizer->base.multisample;
2197                 key->sample_coverage = (vc4->rasterizer->base.multisample &&
2198                                         vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
2199                 key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
2200                 key->sample_alpha_to_one = vc4->blend->alpha_to_one;
2201         }
2202
2203         if (vc4->framebuffer.cbufs[0])
2204                 key->color_format = vc4->framebuffer.cbufs[0]->format;
2205
2206         key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2207         key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2208         key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2209         key->depth_enabled = (vc4->zsa->base.depth.enabled ||
2210                               key->stencil_enabled);
2211         if (vc4->zsa->base.alpha.enabled) {
2212                 key->alpha_test = true;
2213                 key->alpha_test_func = vc4->zsa->base.alpha.func;
2214         }
2215
2216         if (key->is_points) {
2217                 key->point_sprite_mask =
2218                         vc4->rasterizer->base.sprite_coord_enable;
2219                 key->point_coord_upper_left =
2220                         (vc4->rasterizer->base.sprite_coord_mode ==
2221                          PIPE_SPRITE_COORD_UPPER_LEFT);
2222         }
2223
2224         key->light_twoside = vc4->rasterizer->base.light_twoside;
2225
2226         struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2227         vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2228         if (vc4->prog.fs == old_fs)
2229                 return;
2230
2231         vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2232         if (vc4->rasterizer->base.flatshade &&
2233             old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
2234                 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2235         }
2236 }
2237
2238 static void
2239 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2240 {
2241         struct vc4_vs_key local_key;
2242         struct vc4_vs_key *key = &local_key;
2243
2244         if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2245                             VC4_DIRTY_RASTERIZER |
2246                             VC4_DIRTY_VERTTEX |
2247                             VC4_DIRTY_TEXSTATE |
2248                             VC4_DIRTY_VTXSTATE |
2249                             VC4_DIRTY_UNCOMPILED_VS |
2250                             VC4_DIRTY_COMPILED_FS))) {
2251                 return;
2252         }
2253
2254         memset(key, 0, sizeof(*key));
2255         vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2256         key->base.shader_state = vc4->prog.bind_vs;
2257         key->compiled_fs_id = vc4->prog.fs->program_id;
2258
2259         for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2260                 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2261
2262         key->per_vertex_point_size =
2263                 (prim_mode == PIPE_PRIM_POINTS &&
2264                  vc4->rasterizer->base.point_size_per_vertex);
2265
2266         struct vc4_compiled_shader *vs =
2267                 vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2268         if (vs != vc4->prog.vs) {
2269                 vc4->prog.vs = vs;
2270                 vc4->dirty |= VC4_DIRTY_COMPILED_VS;
2271         }
2272
2273         key->is_coord = true;
2274         struct vc4_compiled_shader *cs =
2275                 vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2276         if (cs != vc4->prog.cs) {
2277                 vc4->prog.cs = cs;
2278                 vc4->dirty |= VC4_DIRTY_COMPILED_CS;
2279         }
2280 }
2281
2282 void
2283 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2284 {
2285         vc4_update_compiled_fs(vc4, prim_mode);
2286         vc4_update_compiled_vs(vc4, prim_mode);
2287 }
2288
2289 static uint32_t
2290 fs_cache_hash(const void *key)
2291 {
2292         return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2293 }
2294
2295 static uint32_t
2296 vs_cache_hash(const void *key)
2297 {
2298         return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2299 }
2300
2301 static bool
2302 fs_cache_compare(const void *key1, const void *key2)
2303 {
2304         return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2305 }
2306
2307 static bool
2308 vs_cache_compare(const void *key1, const void *key2)
2309 {
2310         return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2311 }
2312
2313 static void
2314 delete_from_cache_if_matches(struct hash_table *ht,
2315                              struct hash_entry *entry,
2316                              struct vc4_uncompiled_shader *so)
2317 {
2318         const struct vc4_key *key = entry->key;
2319
2320         if (key->shader_state == so) {
2321                 struct vc4_compiled_shader *shader = entry->data;
2322                 _mesa_hash_table_remove(ht, entry);
2323                 vc4_bo_unreference(&shader->bo);
2324                 ralloc_free(shader);
2325         }
2326 }
2327
2328 static void
2329 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2330 {
2331         struct vc4_context *vc4 = vc4_context(pctx);
2332         struct vc4_uncompiled_shader *so = hwcso;
2333
2334         struct hash_entry *entry;
2335         hash_table_foreach(vc4->fs_cache, entry)
2336                 delete_from_cache_if_matches(vc4->fs_cache, entry, so);
2337         hash_table_foreach(vc4->vs_cache, entry)
2338                 delete_from_cache_if_matches(vc4->vs_cache, entry, so);
2339
2340         free((void *)so->base.tokens);
2341         free(so);
2342 }
2343
2344 static void
2345 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2346 {
2347         struct vc4_context *vc4 = vc4_context(pctx);
2348         vc4->prog.bind_fs = hwcso;
2349         vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2350 }
2351
2352 static void
2353 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2354 {
2355         struct vc4_context *vc4 = vc4_context(pctx);
2356         vc4->prog.bind_vs = hwcso;
2357         vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2358 }
2359
2360 void
2361 vc4_program_init(struct pipe_context *pctx)
2362 {
2363         struct vc4_context *vc4 = vc4_context(pctx);
2364
2365         pctx->create_vs_state = vc4_shader_state_create;
2366         pctx->delete_vs_state = vc4_shader_state_delete;
2367
2368         pctx->create_fs_state = vc4_shader_state_create;
2369         pctx->delete_fs_state = vc4_shader_state_delete;
2370
2371         pctx->bind_fs_state = vc4_fp_state_bind;
2372         pctx->bind_vs_state = vc4_vp_state_bind;
2373
2374         vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2375                                                 fs_cache_compare);
2376         vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2377                                                 vs_cache_compare);
2378 }
2379
2380 void
2381 vc4_program_fini(struct pipe_context *pctx)
2382 {
2383         struct vc4_context *vc4 = vc4_context(pctx);
2384
2385         struct hash_entry *entry;
2386         hash_table_foreach(vc4->fs_cache, entry) {
2387                 struct vc4_compiled_shader *shader = entry->data;
2388                 vc4_bo_unreference(&shader->bo);
2389                 ralloc_free(shader);
2390                 _mesa_hash_table_remove(vc4->fs_cache, entry);
2391         }
2392
2393         hash_table_foreach(vc4->vs_cache, entry) {
2394                 struct vc4_compiled_shader *shader = entry->data;
2395                 vc4_bo_unreference(&shader->bo);
2396                 ralloc_free(shader);
2397                 _mesa_hash_table_remove(vc4->vs_cache, entry);
2398         }
2399 }