OSDN Git Service

vc4: Track the last block we emitted at the top level.
[android-x86/external-mesa.git] / src / gallium / drivers / vc4 / vc4_program.c
1 /*
2  * Copyright (c) 2014 Scott Mansell
3  * Copyright © 2014 Broadcom
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24
25 #include <inttypes.h>
26 #include "util/u_format.h"
27 #include "util/crc32.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "compiler/nir/nir.h"
35 #include "compiler/nir/nir_builder.h"
36 #include "nir/tgsi_to_nir.h"
37 #include "vc4_context.h"
38 #include "vc4_qpu.h"
39 #include "vc4_qir.h"
40 #include "mesa/state_tracker/st_glsl_types.h"
41
42 static struct qreg
43 ntq_get_src(struct vc4_compile *c, nir_src src, int i);
44 static void
45 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
46
47 static void
48 resize_qreg_array(struct vc4_compile *c,
49                   struct qreg **regs,
50                   uint32_t *size,
51                   uint32_t decl_size)
52 {
53         if (*size >= decl_size)
54                 return;
55
56         uint32_t old_size = *size;
57         *size = MAX2(*size * 2, decl_size);
58         *regs = reralloc(c, *regs, struct qreg, *size);
59         if (!*regs) {
60                 fprintf(stderr, "Malloc failure\n");
61                 abort();
62         }
63
64         for (uint32_t i = old_size; i < *size; i++)
65                 (*regs)[i] = c->undef;
66 }
67
68 static void
69 ntq_emit_thrsw(struct vc4_compile *c)
70 {
71         if (!c->fs_threaded)
72                 return;
73
74         /* Always thread switch after each texture operation for now.
75          *
76          * We could do better by batching a bunch of texture fetches up and
77          * then doing one thread switch and collecting all their results
78          * afterward.
79          */
80         qir_emit_nondef(c, qir_inst(QOP_THRSW, c->undef,
81                                     c->undef, c->undef));
82         c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL);
83 }
84
85 static struct qreg
86 indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
87 {
88         struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
89         uint32_t offset = nir_intrinsic_base(intr);
90         struct vc4_compiler_ubo_range *range = NULL;
91         unsigned i;
92         for (i = 0; i < c->num_uniform_ranges; i++) {
93                 range = &c->ubo_ranges[i];
94                 if (offset >= range->src_offset &&
95                     offset < range->src_offset + range->size) {
96                         break;
97                 }
98         }
99         /* The driver-location-based offset always has to be within a declared
100          * uniform range.
101          */
102         assert(range);
103         if (!range->used) {
104                 range->used = true;
105                 range->dst_offset = c->next_ubo_dst_offset;
106                 c->next_ubo_dst_offset += range->size;
107                 c->num_ubo_ranges++;
108         }
109
110         offset -= range->src_offset;
111
112         /* Adjust for where we stored the TGSI register base. */
113         indirect_offset = qir_ADD(c, indirect_offset,
114                                   qir_uniform_ui(c, (range->dst_offset +
115                                                      offset)));
116
117         /* Clamp to [0, array size).  Note that MIN/MAX are signed. */
118         indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
119         indirect_offset = qir_MIN_NOIMM(c, indirect_offset,
120                                         qir_uniform_ui(c, (range->dst_offset +
121                                                            range->size - 4)));
122
123         qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
124                      indirect_offset,
125                      qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
126
127         c->num_texture_samples++;
128
129         ntq_emit_thrsw(c);
130
131         return qir_TEX_RESULT(c);
132 }
133
134 nir_ssa_def *
135 vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
136 {
137         switch (swiz) {
138         default:
139         case PIPE_SWIZZLE_NONE:
140                 fprintf(stderr, "warning: unknown swizzle\n");
141                 /* FALLTHROUGH */
142         case PIPE_SWIZZLE_0:
143                 return nir_imm_float(b, 0.0);
144         case PIPE_SWIZZLE_1:
145                 return nir_imm_float(b, 1.0);
146         case PIPE_SWIZZLE_X:
147         case PIPE_SWIZZLE_Y:
148         case PIPE_SWIZZLE_Z:
149         case PIPE_SWIZZLE_W:
150                 return srcs[swiz];
151         }
152 }
153
154 static struct qreg *
155 ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
156 {
157         struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
158                                           def->num_components);
159         _mesa_hash_table_insert(c->def_ht, def, qregs);
160         return qregs;
161 }
162
163 /**
164  * This function is responsible for getting QIR results into the associated
165  * storage for a NIR instruction.
166  *
167  * If it's a NIR SSA def, then we just set the associated hash table entry to
168  * the new result.
169  *
170  * If it's a NIR reg, then we need to update the existing qreg assigned to the
171  * NIR destination with the incoming value.  To do that without introducing
172  * new MOVs, we require that the incoming qreg either be a uniform, or be
173  * SSA-defined by the previous QIR instruction in the block and rewritable by
174  * this function.  That lets us sneak ahead and insert the SF flag beforehand
175  * (knowing that the previous instruction doesn't depend on flags) and rewrite
176  * its destination to be the NIR reg's destination
177  */
178 static void
179 ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
180                struct qreg result)
181 {
182         struct qinst *last_inst = NULL;
183         if (!list_empty(&c->cur_block->instructions))
184                 last_inst = (struct qinst *)c->cur_block->instructions.prev;
185
186         assert(result.file == QFILE_UNIF ||
187                (result.file == QFILE_TEMP &&
188                 last_inst && last_inst == c->defs[result.index]));
189
190         if (dest->is_ssa) {
191                 assert(chan < dest->ssa.num_components);
192
193                 struct qreg *qregs;
194                 struct hash_entry *entry =
195                         _mesa_hash_table_search(c->def_ht, &dest->ssa);
196
197                 if (entry)
198                         qregs = entry->data;
199                 else
200                         qregs = ntq_init_ssa_def(c, &dest->ssa);
201
202                 qregs[chan] = result;
203         } else {
204                 nir_register *reg = dest->reg.reg;
205                 assert(dest->reg.base_offset == 0);
206                 assert(reg->num_array_elems == 0);
207                 struct hash_entry *entry =
208                         _mesa_hash_table_search(c->def_ht, reg);
209                 struct qreg *qregs = entry->data;
210
211                 /* Insert a MOV if the source wasn't an SSA def in the
212                  * previous instruction.
213                  */
214                 if (result.file == QFILE_UNIF) {
215                         result = qir_MOV(c, result);
216                         last_inst = c->defs[result.index];
217                 }
218
219                 /* We know they're both temps, so just rewrite index. */
220                 c->defs[last_inst->dst.index] = NULL;
221                 last_inst->dst.index = qregs[chan].index;
222
223                 /* If we're in control flow, then make this update of the reg
224                  * conditional on the execution mask.
225                  */
226                 if (c->execute.file != QFILE_NULL) {
227                         last_inst->dst.index = qregs[chan].index;
228
229                         /* Set the flags to the current exec mask.  To insert
230                          * the SF, we temporarily remove our SSA instruction.
231                          */
232                         list_del(&last_inst->link);
233                         qir_SF(c, c->execute);
234                         list_addtail(&last_inst->link,
235                                      &c->cur_block->instructions);
236
237                         last_inst->cond = QPU_COND_ZS;
238                         last_inst->cond_is_exec_mask = true;
239                 }
240         }
241 }
242
243 static struct qreg *
244 ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
245 {
246         if (dest->is_ssa) {
247                 struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
248                 for (int i = 0; i < dest->ssa.num_components; i++)
249                         qregs[i] = c->undef;
250                 return qregs;
251         } else {
252                 nir_register *reg = dest->reg.reg;
253                 assert(dest->reg.base_offset == 0);
254                 assert(reg->num_array_elems == 0);
255                 struct hash_entry *entry =
256                         _mesa_hash_table_search(c->def_ht, reg);
257                 return entry->data;
258         }
259 }
260
261 static struct qreg
262 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
263 {
264         struct hash_entry *entry;
265         if (src.is_ssa) {
266                 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
267                 assert(i < src.ssa->num_components);
268         } else {
269                 nir_register *reg = src.reg.reg;
270                 entry = _mesa_hash_table_search(c->def_ht, reg);
271                 assert(reg->num_array_elems == 0);
272                 assert(src.reg.base_offset == 0);
273                 assert(i < reg->num_components);
274         }
275
276         struct qreg *qregs = entry->data;
277         return qregs[i];
278 }
279
280 static struct qreg
281 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
282                 unsigned src)
283 {
284         assert(util_is_power_of_two(instr->dest.write_mask));
285         unsigned chan = ffs(instr->dest.write_mask) - 1;
286         struct qreg r = ntq_get_src(c, instr->src[src].src,
287                                     instr->src[src].swizzle[chan]);
288
289         assert(!instr->src[src].abs);
290         assert(!instr->src[src].negate);
291
292         return r;
293 };
294
295 static inline struct qreg
296 qir_SAT(struct vc4_compile *c, struct qreg val)
297 {
298         return qir_FMAX(c,
299                         qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
300                         qir_uniform_f(c, 0.0));
301 }
302
303 static struct qreg
304 ntq_rcp(struct vc4_compile *c, struct qreg x)
305 {
306         struct qreg r = qir_RCP(c, x);
307
308         /* Apply a Newton-Raphson step to improve the accuracy. */
309         r = qir_FMUL(c, r, qir_FSUB(c,
310                                     qir_uniform_f(c, 2.0),
311                                     qir_FMUL(c, x, r)));
312
313         return r;
314 }
315
316 static struct qreg
317 ntq_rsq(struct vc4_compile *c, struct qreg x)
318 {
319         struct qreg r = qir_RSQ(c, x);
320
321         /* Apply a Newton-Raphson step to improve the accuracy. */
322         r = qir_FMUL(c, r, qir_FSUB(c,
323                                     qir_uniform_f(c, 1.5),
324                                     qir_FMUL(c,
325                                              qir_uniform_f(c, 0.5),
326                                              qir_FMUL(c, x,
327                                                       qir_FMUL(c, r, r)))));
328
329         return r;
330 }
331
332 static struct qreg
333 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
334 {
335         struct qreg src0_hi = qir_SHR(c, src0,
336                                       qir_uniform_ui(c, 24));
337         struct qreg src1_hi = qir_SHR(c, src1,
338                                       qir_uniform_ui(c, 24));
339
340         struct qreg hilo = qir_MUL24(c, src0_hi, src1);
341         struct qreg lohi = qir_MUL24(c, src0, src1_hi);
342         struct qreg lolo = qir_MUL24(c, src0, src1);
343
344         return qir_ADD(c, lolo, qir_SHL(c,
345                                         qir_ADD(c, hilo, lohi),
346                                         qir_uniform_ui(c, 24)));
347 }
348
349 static struct qreg
350 ntq_scale_depth_texture(struct vc4_compile *c, struct qreg src)
351 {
352         struct qreg depthf = qir_ITOF(c, qir_SHR(c, src,
353                                                  qir_uniform_ui(c, 8)));
354         return qir_FMUL(c, depthf, qir_uniform_f(c, 1.0f/0xffffff));
355 }
356
357 /**
358  * Emits a lowered TXF_MS from an MSAA texture.
359  *
360  * The addressing math has been lowered in NIR, and now we just need to read
361  * it like a UBO.
362  */
363 static void
364 ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr)
365 {
366         uint32_t tile_width = 32;
367         uint32_t tile_height = 32;
368         uint32_t tile_size = (tile_height * tile_width *
369                               VC4_MAX_SAMPLES * sizeof(uint32_t));
370
371         unsigned unit = instr->texture_index;
372         uint32_t w = align(c->key->tex[unit].msaa_width, tile_width);
373         uint32_t w_tiles = w / tile_width;
374         uint32_t h = align(c->key->tex[unit].msaa_height, tile_height);
375         uint32_t h_tiles = h / tile_height;
376         uint32_t size = w_tiles * h_tiles * tile_size;
377
378         struct qreg addr;
379         assert(instr->num_srcs == 1);
380         assert(instr->src[0].src_type == nir_tex_src_coord);
381         addr = ntq_get_src(c, instr->src[0].src, 0);
382
383         /* Perform the clamping required by kernel validation. */
384         addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
385         addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4));
386
387         qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
388                      addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
389
390         ntq_emit_thrsw(c);
391
392         struct qreg tex = qir_TEX_RESULT(c);
393         c->num_texture_samples++;
394
395         enum pipe_format format = c->key->tex[unit].format;
396         if (util_format_is_depth_or_stencil(format)) {
397                 struct qreg scaled = ntq_scale_depth_texture(c, tex);
398                 for (int i = 0; i < 4; i++)
399                         ntq_store_dest(c, &instr->dest, i, qir_MOV(c, scaled));
400         } else {
401                 for (int i = 0; i < 4; i++)
402                         ntq_store_dest(c, &instr->dest, i,
403                                        qir_UNPACK_8_F(c, tex, i));
404         }
405 }
406
407 static void
408 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
409 {
410         struct qreg s, t, r, lod, compare;
411         bool is_txb = false, is_txl = false;
412         unsigned unit = instr->texture_index;
413
414         if (instr->op == nir_texop_txf) {
415                 ntq_emit_txf(c, instr);
416                 return;
417         }
418
419         for (unsigned i = 0; i < instr->num_srcs; i++) {
420                 switch (instr->src[i].src_type) {
421                 case nir_tex_src_coord:
422                         s = ntq_get_src(c, instr->src[i].src, 0);
423                         if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
424                                 t = qir_uniform_f(c, 0.5);
425                         else
426                                 t = ntq_get_src(c, instr->src[i].src, 1);
427                         if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
428                                 r = ntq_get_src(c, instr->src[i].src, 2);
429                         break;
430                 case nir_tex_src_bias:
431                         lod = ntq_get_src(c, instr->src[i].src, 0);
432                         is_txb = true;
433                         break;
434                 case nir_tex_src_lod:
435                         lod = ntq_get_src(c, instr->src[i].src, 0);
436                         is_txl = true;
437                         break;
438                 case nir_tex_src_comparator:
439                         compare = ntq_get_src(c, instr->src[i].src, 0);
440                         break;
441                 default:
442                         unreachable("unknown texture source");
443                 }
444         }
445
446         if (c->stage != QSTAGE_FRAG && !is_txl) {
447                 /* From the GLSL 1.20 spec:
448                  *
449                  *     "If it is mip-mapped and running on the vertex shader,
450                  *      then the base texture is used."
451                  */
452                 is_txl = true;
453                 lod = qir_uniform_ui(c, 0);
454         }
455
456         if (c->key->tex[unit].force_first_level) {
457                 lod = qir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL, unit);
458                 is_txl = true;
459                 is_txb = false;
460         }
461
462         struct qreg texture_u[] = {
463                 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
464                 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
465                 qir_uniform(c, QUNIFORM_CONSTANT, 0),
466                 qir_uniform(c, QUNIFORM_CONSTANT, 0),
467         };
468         uint32_t next_texture_u = 0;
469
470         /* There is no native support for GL texture rectangle coordinates, so
471          * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
472          * 1]).
473          */
474         if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
475                 s = qir_FMUL(c, s,
476                              qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
477                 t = qir_FMUL(c, t,
478                              qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
479         }
480
481         if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
482                 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
483                                            unit | (is_txl << 16));
484         }
485
486         struct qinst *tmu;
487         if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
488                 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r);
489                 tmu->src[qir_get_tex_uniform_src(tmu)] =
490                         texture_u[next_texture_u++];
491         } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
492                    c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
493                    c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
494                    c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
495                 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0),
496                                    qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR,
497                                                unit));
498                 tmu->src[qir_get_tex_uniform_src(tmu)] =
499                         texture_u[next_texture_u++];
500         }
501
502         if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
503                 s = qir_SAT(c, s);
504         }
505
506         if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
507                 t = qir_SAT(c, t);
508         }
509
510         tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t);
511         tmu->src[qir_get_tex_uniform_src(tmu)] =
512                 texture_u[next_texture_u++];
513
514         if (is_txl || is_txb) {
515                 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod);
516                 tmu->src[qir_get_tex_uniform_src(tmu)] =
517                         texture_u[next_texture_u++];
518         }
519
520         tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s);
521         tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++];
522
523         c->num_texture_samples++;
524
525         ntq_emit_thrsw(c);
526
527         struct qreg tex = qir_TEX_RESULT(c);
528
529         enum pipe_format format = c->key->tex[unit].format;
530
531         struct qreg *dest = ntq_get_dest(c, &instr->dest);
532         if (util_format_is_depth_or_stencil(format)) {
533                 struct qreg normalized = ntq_scale_depth_texture(c, tex);
534                 struct qreg depth_output;
535
536                 struct qreg u0 = qir_uniform_f(c, 0.0f);
537                 struct qreg u1 = qir_uniform_f(c, 1.0f);
538                 if (c->key->tex[unit].compare_mode) {
539                         /* From the GL_ARB_shadow spec:
540                          *
541                          *     "Let Dt (D subscript t) be the depth texture
542                          *      value, in the range [0, 1].  Let R be the
543                          *      interpolated texture coordinate clamped to the
544                          *      range [0, 1]."
545                          */
546                         compare = qir_SAT(c, compare);
547
548                         switch (c->key->tex[unit].compare_func) {
549                         case PIPE_FUNC_NEVER:
550                                 depth_output = qir_uniform_f(c, 0.0f);
551                                 break;
552                         case PIPE_FUNC_ALWAYS:
553                                 depth_output = u1;
554                                 break;
555                         case PIPE_FUNC_EQUAL:
556                                 qir_SF(c, qir_FSUB(c, compare, normalized));
557                                 depth_output = qir_SEL(c, QPU_COND_ZS, u1, u0);
558                                 break;
559                         case PIPE_FUNC_NOTEQUAL:
560                                 qir_SF(c, qir_FSUB(c, compare, normalized));
561                                 depth_output = qir_SEL(c, QPU_COND_ZC, u1, u0);
562                                 break;
563                         case PIPE_FUNC_GREATER:
564                                 qir_SF(c, qir_FSUB(c, compare, normalized));
565                                 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
566                                 break;
567                         case PIPE_FUNC_GEQUAL:
568                                 qir_SF(c, qir_FSUB(c, normalized, compare));
569                                 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
570                                 break;
571                         case PIPE_FUNC_LESS:
572                                 qir_SF(c, qir_FSUB(c, compare, normalized));
573                                 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
574                                 break;
575                         case PIPE_FUNC_LEQUAL:
576                                 qir_SF(c, qir_FSUB(c, normalized, compare));
577                                 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
578                                 break;
579                         }
580                 } else {
581                         depth_output = normalized;
582                 }
583
584                 for (int i = 0; i < 4; i++)
585                         dest[i] = depth_output;
586         } else {
587                 for (int i = 0; i < 4; i++)
588                         dest[i] = qir_UNPACK_8_F(c, tex, i);
589         }
590 }
591
592 /**
593  * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
594  * to zero).
595  */
596 static struct qreg
597 ntq_ffract(struct vc4_compile *c, struct qreg src)
598 {
599         struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
600         struct qreg diff = qir_FSUB(c, src, trunc);
601         qir_SF(c, diff);
602
603         qir_FADD_dest(c, diff,
604                       diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
605
606         return qir_MOV(c, diff);
607 }
608
609 /**
610  * Computes floor(x), which is tricky because our FTOI truncates (rounds to
611  * zero).
612  */
613 static struct qreg
614 ntq_ffloor(struct vc4_compile *c, struct qreg src)
615 {
616         struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
617
618         /* This will be < 0 if we truncated and the truncation was of a value
619          * that was < 0 in the first place.
620          */
621         qir_SF(c, qir_FSUB(c, src, result));
622
623         struct qinst *sub = qir_FSUB_dest(c, result,
624                                           result, qir_uniform_f(c, 1.0));
625         sub->cond = QPU_COND_NS;
626
627         return qir_MOV(c, result);
628 }
629
630 /**
631  * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
632  * zero).
633  */
634 static struct qreg
635 ntq_fceil(struct vc4_compile *c, struct qreg src)
636 {
637         struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
638
639         /* This will be < 0 if we truncated and the truncation was of a value
640          * that was > 0 in the first place.
641          */
642         qir_SF(c, qir_FSUB(c, result, src));
643
644         qir_FADD_dest(c, result,
645                       result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
646
647         return qir_MOV(c, result);
648 }
649
650 static struct qreg
651 ntq_fsin(struct vc4_compile *c, struct qreg src)
652 {
653         float coeff[] = {
654                 -2.0 * M_PI,
655                 pow(2.0 * M_PI, 3) / (3 * 2 * 1),
656                 -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
657                 pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
658                 -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
659         };
660
661         struct qreg scaled_x =
662                 qir_FMUL(c,
663                          src,
664                          qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
665
666         struct qreg x = qir_FADD(c,
667                                  ntq_ffract(c, scaled_x),
668                                  qir_uniform_f(c, -0.5));
669         struct qreg x2 = qir_FMUL(c, x, x);
670         struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
671         for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
672                 x = qir_FMUL(c, x, x2);
673                 sum = qir_FADD(c,
674                                sum,
675                                qir_FMUL(c,
676                                         x,
677                                         qir_uniform_f(c, coeff[i])));
678         }
679         return sum;
680 }
681
682 static struct qreg
683 ntq_fcos(struct vc4_compile *c, struct qreg src)
684 {
685         float coeff[] = {
686                 -1.0f,
687                 pow(2.0 * M_PI, 2) / (2 * 1),
688                 -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
689                 pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
690                 -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
691                 pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
692         };
693
694         struct qreg scaled_x =
695                 qir_FMUL(c, src,
696                          qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
697         struct qreg x_frac = qir_FADD(c,
698                                       ntq_ffract(c, scaled_x),
699                                       qir_uniform_f(c, -0.5));
700
701         struct qreg sum = qir_uniform_f(c, coeff[0]);
702         struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
703         struct qreg x = x2; /* Current x^2, x^4, or x^6 */
704         for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
705                 if (i != 1)
706                         x = qir_FMUL(c, x, x2);
707
708                 struct qreg mul = qir_FMUL(c,
709                                            x,
710                                            qir_uniform_f(c, coeff[i]));
711                 if (i == 0)
712                         sum = mul;
713                 else
714                         sum = qir_FADD(c, sum, mul);
715         }
716         return sum;
717 }
718
719 static struct qreg
720 ntq_fsign(struct vc4_compile *c, struct qreg src)
721 {
722         struct qreg t = qir_get_temp(c);
723
724         qir_SF(c, src);
725         qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
726         qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
727         qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
728         return qir_MOV(c, t);
729 }
730
731 static void
732 emit_vertex_input(struct vc4_compile *c, int attr)
733 {
734         enum pipe_format format = c->vs_key->attr_formats[attr];
735         uint32_t attr_size = util_format_get_blocksize(format);
736
737         c->vattr_sizes[attr] = align(attr_size, 4);
738         for (int i = 0; i < align(attr_size, 4) / 4; i++) {
739                 c->inputs[attr * 4 + i] =
740                         qir_MOV(c, qir_reg(QFILE_VPM, attr * 4 + i));
741                 c->num_inputs++;
742         }
743 }
744
745 static void
746 emit_fragcoord_input(struct vc4_compile *c, int attr)
747 {
748         c->inputs[attr * 4 + 0] = qir_ITOF(c, qir_reg(QFILE_FRAG_X, 0));
749         c->inputs[attr * 4 + 1] = qir_ITOF(c, qir_reg(QFILE_FRAG_Y, 0));
750         c->inputs[attr * 4 + 2] =
751                 qir_FMUL(c,
752                          qir_ITOF(c, qir_FRAG_Z(c)),
753                          qir_uniform_f(c, 1.0 / 0xffffff));
754         c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
755 }
756
757 static struct qreg
758 emit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
759                       uint8_t swizzle)
760 {
761         uint32_t i = c->num_input_slots++;
762         struct qreg vary = {
763                 QFILE_VARY,
764                 i
765         };
766
767         if (c->num_input_slots >= c->input_slots_array_size) {
768                 c->input_slots_array_size =
769                         MAX2(4, c->input_slots_array_size * 2);
770
771                 c->input_slots = reralloc(c, c->input_slots,
772                                           struct vc4_varying_slot,
773                                           c->input_slots_array_size);
774         }
775
776         c->input_slots[i].slot = slot;
777         c->input_slots[i].swizzle = swizzle;
778
779         return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
780 }
781
782 static void
783 emit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
784 {
785         for (int i = 0; i < 4; i++) {
786                 c->inputs[attr * 4 + i] =
787                         emit_fragment_varying(c, slot, i);
788                 c->num_inputs++;
789         }
790 }
791
792 static void
793 add_output(struct vc4_compile *c,
794            uint32_t decl_offset,
795            uint8_t slot,
796            uint8_t swizzle)
797 {
798         uint32_t old_array_size = c->outputs_array_size;
799         resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
800                           decl_offset + 1);
801
802         if (old_array_size != c->outputs_array_size) {
803                 c->output_slots = reralloc(c,
804                                            c->output_slots,
805                                            struct vc4_varying_slot,
806                                            c->outputs_array_size);
807         }
808
809         c->output_slots[decl_offset].slot = slot;
810         c->output_slots[decl_offset].swizzle = swizzle;
811 }
812
813 static void
814 declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size)
815 {
816         unsigned array_id = c->num_uniform_ranges++;
817         if (array_id >= c->ubo_ranges_array_size) {
818                 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
819                                                 array_id + 1);
820                 c->ubo_ranges = reralloc(c, c->ubo_ranges,
821                                          struct vc4_compiler_ubo_range,
822                                          c->ubo_ranges_array_size);
823         }
824
825         c->ubo_ranges[array_id].dst_offset = 0;
826         c->ubo_ranges[array_id].src_offset = start;
827         c->ubo_ranges[array_id].size = size;
828         c->ubo_ranges[array_id].used = false;
829 }
830
831 static bool
832 ntq_src_is_only_ssa_def_user(nir_src *src)
833 {
834         if (!src->is_ssa)
835                 return false;
836
837         if (!list_empty(&src->ssa->if_uses))
838                 return false;
839
840         return (src->ssa->uses.next == &src->use_link &&
841                 src->ssa->uses.next->next == &src->ssa->uses);
842 }
843
844 /**
845  * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
846  * bit set.
847  *
848  * However, as an optimization, it tries to find the instructions generating
849  * the sources to be packed and just emit the pack flag there, if possible.
850  */
851 static void
852 ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
853 {
854         struct qreg result = qir_get_temp(c);
855         struct nir_alu_instr *vec4 = NULL;
856
857         /* If packing from a vec4 op (as expected), identify it so that we can
858          * peek back at what generated its sources.
859          */
860         if (instr->src[0].src.is_ssa &&
861             instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
862             nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
863             nir_op_vec4) {
864                 vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
865         }
866
867         /* If the pack is replicating the same channel 4 times, use the 8888
868          * pack flag.  This is common for blending using the alpha
869          * channel.
870          */
871         if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
872             instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
873             instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
874                 struct qreg rep = ntq_get_src(c,
875                                               instr->src[0].src,
876                                               instr->src[0].swizzle[0]);
877                 ntq_store_dest(c, &instr->dest.dest, 0, qir_PACK_8888_F(c, rep));
878                 return;
879         }
880
881         for (int i = 0; i < 4; i++) {
882                 int swiz = instr->src[0].swizzle[i];
883                 struct qreg src;
884                 if (vec4) {
885                         src = ntq_get_src(c, vec4->src[swiz].src,
886                                           vec4->src[swiz].swizzle[0]);
887                 } else {
888                         src = ntq_get_src(c, instr->src[0].src, swiz);
889                 }
890
891                 if (vec4 &&
892                     ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
893                     src.file == QFILE_TEMP &&
894                     c->defs[src.index] &&
895                     qir_is_mul(c->defs[src.index]) &&
896                     !c->defs[src.index]->dst.pack) {
897                         struct qinst *rewrite = c->defs[src.index];
898                         c->defs[src.index] = NULL;
899                         rewrite->dst = result;
900                         rewrite->dst.pack = QPU_PACK_MUL_8A + i;
901                         continue;
902                 }
903
904                 qir_PACK_8_F(c, result, src, i);
905         }
906
907         ntq_store_dest(c, &instr->dest.dest, 0, qir_MOV(c, result));
908 }
909
910 /** Handles sign-extended bitfield extracts for 16 bits. */
911 static struct qreg
912 ntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
913               struct qreg bits)
914 {
915         assert(bits.file == QFILE_UNIF &&
916                c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
917                c->uniform_data[bits.index] == 16);
918
919         assert(offset.file == QFILE_UNIF &&
920                c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
921         int offset_bit = c->uniform_data[offset.index];
922         assert(offset_bit % 16 == 0);
923
924         return qir_UNPACK_16_I(c, base, offset_bit / 16);
925 }
926
927 /** Handles unsigned bitfield extracts for 8 bits. */
928 static struct qreg
929 ntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
930               struct qreg bits)
931 {
932         assert(bits.file == QFILE_UNIF &&
933                c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
934                c->uniform_data[bits.index] == 8);
935
936         assert(offset.file == QFILE_UNIF &&
937                c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
938         int offset_bit = c->uniform_data[offset.index];
939         assert(offset_bit % 8 == 0);
940
941         return qir_UNPACK_8_I(c, base, offset_bit / 8);
942 }
943
944 /**
945  * If compare_instr is a valid comparison instruction, emits the
946  * compare_instr's comparison and returns the sel_instr's return value based
947  * on the compare_instr's result.
948  */
949 static bool
950 ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
951                     nir_alu_instr *compare_instr,
952                     nir_alu_instr *sel_instr)
953 {
954         enum qpu_cond cond;
955
956         switch (compare_instr->op) {
957         case nir_op_feq:
958         case nir_op_ieq:
959         case nir_op_seq:
960                 cond = QPU_COND_ZS;
961                 break;
962         case nir_op_fne:
963         case nir_op_ine:
964         case nir_op_sne:
965                 cond = QPU_COND_ZC;
966                 break;
967         case nir_op_fge:
968         case nir_op_ige:
969         case nir_op_uge:
970         case nir_op_sge:
971                 cond = QPU_COND_NC;
972                 break;
973         case nir_op_flt:
974         case nir_op_ilt:
975         case nir_op_slt:
976                 cond = QPU_COND_NS;
977                 break;
978         default:
979                 return false;
980         }
981
982         struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
983         struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
984
985         unsigned unsized_type =
986                 nir_alu_type_get_base_type(nir_op_infos[compare_instr->op].input_types[0]);
987         if (unsized_type == nir_type_float)
988                 qir_SF(c, qir_FSUB(c, src0, src1));
989         else
990                 qir_SF(c, qir_SUB(c, src0, src1));
991
992         switch (sel_instr->op) {
993         case nir_op_seq:
994         case nir_op_sne:
995         case nir_op_sge:
996         case nir_op_slt:
997                 *dest = qir_SEL(c, cond,
998                                 qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
999                 break;
1000
1001         case nir_op_bcsel:
1002                 *dest = qir_SEL(c, cond,
1003                                 ntq_get_alu_src(c, sel_instr, 1),
1004                                 ntq_get_alu_src(c, sel_instr, 2));
1005                 break;
1006
1007         default:
1008                 *dest = qir_SEL(c, cond,
1009                                 qir_uniform_ui(c, ~0), qir_uniform_ui(c, 0));
1010                 break;
1011         }
1012
1013         /* Make the temporary for nir_store_dest(). */
1014         *dest = qir_MOV(c, *dest);
1015
1016         return true;
1017 }
1018
1019 /**
1020  * Attempts to fold a comparison generating a boolean result into the
1021  * condition code for selecting between two values, instead of comparing the
1022  * boolean result against 0 to generate the condition code.
1023  */
1024 static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
1025                                   struct qreg *src)
1026 {
1027         if (!instr->src[0].src.is_ssa)
1028                 goto out;
1029         if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
1030                 goto out;
1031         nir_alu_instr *compare =
1032                 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
1033         if (!compare)
1034                 goto out;
1035
1036         struct qreg dest;
1037         if (ntq_emit_comparison(c, &dest, compare, instr))
1038                 return dest;
1039
1040 out:
1041         qir_SF(c, src[0]);
1042         return qir_MOV(c, qir_SEL(c, QPU_COND_NS, src[1], src[2]));
1043 }
1044
1045 static struct qreg
1046 ntq_fddx(struct vc4_compile *c, struct qreg src)
1047 {
1048         /* Make sure that we have a bare temp to use for MUL rotation, so it
1049          * can be allocated to an accumulator.
1050          */
1051         if (src.pack || src.file != QFILE_TEMP)
1052                 src = qir_MOV(c, src);
1053
1054         struct qreg from_left = qir_ROT_MUL(c, src, 1);
1055         struct qreg from_right = qir_ROT_MUL(c, src, 15);
1056
1057         /* Distinguish left/right pixels of the quad. */
1058         qir_SF(c, qir_AND(c, qir_reg(QFILE_QPU_ELEMENT, 0),
1059                           qir_uniform_ui(c, 1)));
1060
1061         return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1062                                   qir_FSUB(c, from_right, src),
1063                                   qir_FSUB(c, src, from_left)));
1064 }
1065
1066 static struct qreg
1067 ntq_fddy(struct vc4_compile *c, struct qreg src)
1068 {
1069         if (src.pack || src.file != QFILE_TEMP)
1070                 src = qir_MOV(c, src);
1071
1072         struct qreg from_bottom = qir_ROT_MUL(c, src, 2);
1073         struct qreg from_top = qir_ROT_MUL(c, src, 14);
1074
1075         /* Distinguish top/bottom pixels of the quad. */
1076         qir_SF(c, qir_AND(c,
1077                           qir_reg(QFILE_QPU_ELEMENT, 0),
1078                           qir_uniform_ui(c, 2)));
1079
1080         return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1081                                   qir_FSUB(c, from_top, src),
1082                                   qir_FSUB(c, src, from_bottom)));
1083 }
1084
1085 static void
1086 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
1087 {
1088         /* This should always be lowered to ALU operations for VC4. */
1089         assert(!instr->dest.saturate);
1090
1091         /* Vectors are special in that they have non-scalarized writemasks,
1092          * and just take the first swizzle channel for each argument in order
1093          * into each writemask channel.
1094          */
1095         if (instr->op == nir_op_vec2 ||
1096             instr->op == nir_op_vec3 ||
1097             instr->op == nir_op_vec4) {
1098                 struct qreg srcs[4];
1099                 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1100                         srcs[i] = ntq_get_src(c, instr->src[i].src,
1101                                               instr->src[i].swizzle[0]);
1102                 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1103                         ntq_store_dest(c, &instr->dest.dest, i,
1104                                        qir_MOV(c, srcs[i]));
1105                 return;
1106         }
1107
1108         if (instr->op == nir_op_pack_unorm_4x8) {
1109                 ntq_emit_pack_unorm_4x8(c, instr);
1110                 return;
1111         }
1112
1113         if (instr->op == nir_op_unpack_unorm_4x8) {
1114                 struct qreg src = ntq_get_src(c, instr->src[0].src,
1115                                               instr->src[0].swizzle[0]);
1116                 for (int i = 0; i < 4; i++) {
1117                         if (instr->dest.write_mask & (1 << i))
1118                                 ntq_store_dest(c, &instr->dest.dest, i,
1119                                                qir_UNPACK_8_F(c, src, i));
1120                 }
1121                 return;
1122         }
1123
1124         /* General case: We can just grab the one used channel per src. */
1125         struct qreg src[nir_op_infos[instr->op].num_inputs];
1126         for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1127                 src[i] = ntq_get_alu_src(c, instr, i);
1128         }
1129
1130         struct qreg result;
1131
1132         switch (instr->op) {
1133         case nir_op_fmov:
1134         case nir_op_imov:
1135                 result = qir_MOV(c, src[0]);
1136                 break;
1137         case nir_op_fmul:
1138                 result = qir_FMUL(c, src[0], src[1]);
1139                 break;
1140         case nir_op_fadd:
1141                 result = qir_FADD(c, src[0], src[1]);
1142                 break;
1143         case nir_op_fsub:
1144                 result = qir_FSUB(c, src[0], src[1]);
1145                 break;
1146         case nir_op_fmin:
1147                 result = qir_FMIN(c, src[0], src[1]);
1148                 break;
1149         case nir_op_fmax:
1150                 result = qir_FMAX(c, src[0], src[1]);
1151                 break;
1152
1153         case nir_op_f2i:
1154         case nir_op_f2u:
1155                 result = qir_FTOI(c, src[0]);
1156                 break;
1157         case nir_op_i2f:
1158         case nir_op_u2f:
1159                 result = qir_ITOF(c, src[0]);
1160                 break;
1161         case nir_op_b2f:
1162                 result = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
1163                 break;
1164         case nir_op_b2i:
1165                 result = qir_AND(c, src[0], qir_uniform_ui(c, 1));
1166                 break;
1167         case nir_op_i2b:
1168         case nir_op_f2b:
1169                 qir_SF(c, src[0]);
1170                 result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC,
1171                                             qir_uniform_ui(c, ~0),
1172                                             qir_uniform_ui(c, 0)));
1173                 break;
1174
1175         case nir_op_iadd:
1176                 result = qir_ADD(c, src[0], src[1]);
1177                 break;
1178         case nir_op_ushr:
1179                 result = qir_SHR(c, src[0], src[1]);
1180                 break;
1181         case nir_op_isub:
1182                 result = qir_SUB(c, src[0], src[1]);
1183                 break;
1184         case nir_op_ishr:
1185                 result = qir_ASR(c, src[0], src[1]);
1186                 break;
1187         case nir_op_ishl:
1188                 result = qir_SHL(c, src[0], src[1]);
1189                 break;
1190         case nir_op_imin:
1191                 result = qir_MIN(c, src[0], src[1]);
1192                 break;
1193         case nir_op_imax:
1194                 result = qir_MAX(c, src[0], src[1]);
1195                 break;
1196         case nir_op_iand:
1197                 result = qir_AND(c, src[0], src[1]);
1198                 break;
1199         case nir_op_ior:
1200                 result = qir_OR(c, src[0], src[1]);
1201                 break;
1202         case nir_op_ixor:
1203                 result = qir_XOR(c, src[0], src[1]);
1204                 break;
1205         case nir_op_inot:
1206                 result = qir_NOT(c, src[0]);
1207                 break;
1208
1209         case nir_op_imul:
1210                 result = ntq_umul(c, src[0], src[1]);
1211                 break;
1212
1213         case nir_op_seq:
1214         case nir_op_sne:
1215         case nir_op_sge:
1216         case nir_op_slt:
1217         case nir_op_feq:
1218         case nir_op_fne:
1219         case nir_op_fge:
1220         case nir_op_flt:
1221         case nir_op_ieq:
1222         case nir_op_ine:
1223         case nir_op_ige:
1224         case nir_op_uge:
1225         case nir_op_ilt:
1226                 if (!ntq_emit_comparison(c, &result, instr, instr)) {
1227                         fprintf(stderr, "Bad comparison instruction\n");
1228                 }
1229                 break;
1230
1231         case nir_op_bcsel:
1232                 result = ntq_emit_bcsel(c, instr, src);
1233                 break;
1234         case nir_op_fcsel:
1235                 qir_SF(c, src[0]);
1236                 result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, src[1], src[2]));
1237                 break;
1238
1239         case nir_op_frcp:
1240                 result = ntq_rcp(c, src[0]);
1241                 break;
1242         case nir_op_frsq:
1243                 result = ntq_rsq(c, src[0]);
1244                 break;
1245         case nir_op_fexp2:
1246                 result = qir_EXP2(c, src[0]);
1247                 break;
1248         case nir_op_flog2:
1249                 result = qir_LOG2(c, src[0]);
1250                 break;
1251
1252         case nir_op_ftrunc:
1253                 result = qir_ITOF(c, qir_FTOI(c, src[0]));
1254                 break;
1255         case nir_op_fceil:
1256                 result = ntq_fceil(c, src[0]);
1257                 break;
1258         case nir_op_ffract:
1259                 result = ntq_ffract(c, src[0]);
1260                 break;
1261         case nir_op_ffloor:
1262                 result = ntq_ffloor(c, src[0]);
1263                 break;
1264
1265         case nir_op_fsin:
1266                 result = ntq_fsin(c, src[0]);
1267                 break;
1268         case nir_op_fcos:
1269                 result = ntq_fcos(c, src[0]);
1270                 break;
1271
1272         case nir_op_fsign:
1273                 result = ntq_fsign(c, src[0]);
1274                 break;
1275
1276         case nir_op_fabs:
1277                 result = qir_FMAXABS(c, src[0], src[0]);
1278                 break;
1279         case nir_op_iabs:
1280                 result = qir_MAX(c, src[0],
1281                                 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1282                 break;
1283
1284         case nir_op_ibitfield_extract:
1285                 result = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1286                 break;
1287
1288         case nir_op_ubitfield_extract:
1289                 result = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1290                 break;
1291
1292         case nir_op_usadd_4x8:
1293                 result = qir_V8ADDS(c, src[0], src[1]);
1294                 break;
1295
1296         case nir_op_ussub_4x8:
1297                 result = qir_V8SUBS(c, src[0], src[1]);
1298                 break;
1299
1300         case nir_op_umin_4x8:
1301                 result = qir_V8MIN(c, src[0], src[1]);
1302                 break;
1303
1304         case nir_op_umax_4x8:
1305                 result = qir_V8MAX(c, src[0], src[1]);
1306                 break;
1307
1308         case nir_op_umul_unorm_4x8:
1309                 result = qir_V8MULD(c, src[0], src[1]);
1310                 break;
1311
1312         case nir_op_fddx:
1313         case nir_op_fddx_coarse:
1314         case nir_op_fddx_fine:
1315                 result = ntq_fddx(c, src[0]);
1316                 break;
1317
1318         case nir_op_fddy:
1319         case nir_op_fddy_coarse:
1320         case nir_op_fddy_fine:
1321                 result = ntq_fddy(c, src[0]);
1322                 break;
1323
1324         default:
1325                 fprintf(stderr, "unknown NIR ALU inst: ");
1326                 nir_print_instr(&instr->instr, stderr);
1327                 fprintf(stderr, "\n");
1328                 abort();
1329         }
1330
1331         /* We have a scalar result, so the instruction should only have a
1332          * single channel written to.
1333          */
1334         assert(util_is_power_of_two(instr->dest.write_mask));
1335         ntq_store_dest(c, &instr->dest.dest,
1336                        ffs(instr->dest.write_mask) - 1, result);
1337 }
1338
1339 static void
1340 emit_frag_end(struct vc4_compile *c)
1341 {
1342         struct qreg color;
1343         if (c->output_color_index != -1) {
1344                 color = c->outputs[c->output_color_index];
1345         } else {
1346                 color = qir_uniform_ui(c, 0);
1347         }
1348
1349         uint32_t discard_cond = QPU_COND_ALWAYS;
1350         if (c->s->info->fs.uses_discard) {
1351                 qir_SF(c, c->discard);
1352                 discard_cond = QPU_COND_ZS;
1353         }
1354
1355         if (c->fs_key->stencil_enabled) {
1356                 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1357                              qir_uniform(c, QUNIFORM_STENCIL, 0));
1358                 if (c->fs_key->stencil_twoside) {
1359                         qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1360                                      qir_uniform(c, QUNIFORM_STENCIL, 1));
1361                 }
1362                 if (c->fs_key->stencil_full_writemasks) {
1363                         qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1364                                      qir_uniform(c, QUNIFORM_STENCIL, 2));
1365                 }
1366         }
1367
1368         if (c->output_sample_mask_index != -1) {
1369                 qir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1370         }
1371
1372         if (c->fs_key->depth_enabled) {
1373                 if (c->output_position_index != -1) {
1374                         qir_FTOI_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1375                                       qir_FMUL(c,
1376                                                c->outputs[c->output_position_index],
1377                                                qir_uniform_f(c, 0xffffff)))->cond = discard_cond;
1378                 } else {
1379                         qir_MOV_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1380                                      qir_FRAG_Z(c))->cond = discard_cond;
1381                 }
1382         }
1383
1384         if (!c->msaa_per_sample_output) {
1385                 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE, 0),
1386                              color)->cond = discard_cond;
1387         } else {
1388                 for (int i = 0; i < VC4_MAX_SAMPLES; i++) {
1389                         qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE_MS, 0),
1390                                      c->sample_colors[i])->cond = discard_cond;
1391                 }
1392         }
1393 }
1394
1395 static void
1396 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1397 {
1398         struct qreg packed = qir_get_temp(c);
1399
1400         for (int i = 0; i < 2; i++) {
1401                 struct qreg scale =
1402                         qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1403
1404                 struct qreg packed_chan = packed;
1405                 packed_chan.pack = QPU_PACK_A_16A + i;
1406
1407                 qir_FTOI_dest(c, packed_chan,
1408                               qir_FMUL(c,
1409                                        qir_FMUL(c,
1410                                                 c->outputs[c->output_position_index + i],
1411                                                 scale),
1412                                        rcp_w));
1413         }
1414
1415         qir_VPM_WRITE(c, packed);
1416 }
1417
1418 static void
1419 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1420 {
1421         struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1422         struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1423
1424         qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1425                                                           c->outputs[c->output_position_index + 2],
1426                                                           zscale),
1427                                               rcp_w),
1428                                   zoffset));
1429 }
1430
1431 static void
1432 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1433 {
1434         qir_VPM_WRITE(c, rcp_w);
1435 }
1436
1437 static void
1438 emit_point_size_write(struct vc4_compile *c)
1439 {
1440         struct qreg point_size;
1441
1442         if (c->output_point_size_index != -1)
1443                 point_size = c->outputs[c->output_point_size_index];
1444         else
1445                 point_size = qir_uniform_f(c, 1.0);
1446
1447         /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1448          * BCM21553).
1449          */
1450         point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
1451
1452         qir_VPM_WRITE(c, point_size);
1453 }
1454
1455 /**
1456  * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1457  *
1458  * The simulator insists that there be at least one vertex attribute, so
1459  * vc4_draw.c will emit one if it wouldn't have otherwise.  The simulator also
1460  * insists that all vertex attributes loaded get read by the VS/CS, so we have
1461  * to consume it here.
1462  */
1463 static void
1464 emit_stub_vpm_read(struct vc4_compile *c)
1465 {
1466         if (c->num_inputs)
1467                 return;
1468
1469         c->vattr_sizes[0] = 4;
1470         (void)qir_MOV(c, qir_reg(QFILE_VPM, 0));
1471         c->num_inputs++;
1472 }
1473
1474 static void
1475 emit_vert_end(struct vc4_compile *c,
1476               struct vc4_varying_slot *fs_inputs,
1477               uint32_t num_fs_inputs)
1478 {
1479         struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
1480
1481         emit_stub_vpm_read(c);
1482
1483         emit_scaled_viewport_write(c, rcp_w);
1484         emit_zs_write(c, rcp_w);
1485         emit_rcp_wc_write(c, rcp_w);
1486         if (c->vs_key->per_vertex_point_size)
1487                 emit_point_size_write(c);
1488
1489         for (int i = 0; i < num_fs_inputs; i++) {
1490                 struct vc4_varying_slot *input = &fs_inputs[i];
1491                 int j;
1492
1493                 for (j = 0; j < c->num_outputs; j++) {
1494                         struct vc4_varying_slot *output =
1495                                 &c->output_slots[j];
1496
1497                         if (input->slot == output->slot &&
1498                             input->swizzle == output->swizzle) {
1499                                 qir_VPM_WRITE(c, c->outputs[j]);
1500                                 break;
1501                         }
1502                 }
1503                 /* Emit padding if we didn't find a declared VS output for
1504                  * this FS input.
1505                  */
1506                 if (j == c->num_outputs)
1507                         qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1508         }
1509 }
1510
1511 static void
1512 emit_coord_end(struct vc4_compile *c)
1513 {
1514         struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
1515
1516         emit_stub_vpm_read(c);
1517
1518         for (int i = 0; i < 4; i++)
1519                 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1520
1521         emit_scaled_viewport_write(c, rcp_w);
1522         emit_zs_write(c, rcp_w);
1523         emit_rcp_wc_write(c, rcp_w);
1524         if (c->vs_key->per_vertex_point_size)
1525                 emit_point_size_write(c);
1526 }
1527
1528 static void
1529 vc4_optimize_nir(struct nir_shader *s)
1530 {
1531         bool progress;
1532
1533         do {
1534                 progress = false;
1535
1536                 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1537                 NIR_PASS(progress, s, nir_lower_alu_to_scalar);
1538                 NIR_PASS(progress, s, nir_lower_phis_to_scalar);
1539                 NIR_PASS(progress, s, nir_copy_prop);
1540                 NIR_PASS(progress, s, nir_opt_remove_phis);
1541                 NIR_PASS(progress, s, nir_opt_dce);
1542                 NIR_PASS(progress, s, nir_opt_dead_cf);
1543                 NIR_PASS(progress, s, nir_opt_cse);
1544                 NIR_PASS(progress, s, nir_opt_peephole_select, 8);
1545                 NIR_PASS(progress, s, nir_opt_algebraic);
1546                 NIR_PASS(progress, s, nir_opt_constant_folding);
1547                 NIR_PASS(progress, s, nir_opt_undef);
1548                 NIR_PASS(progress, s, nir_opt_loop_unroll,
1549                          nir_var_shader_in |
1550                          nir_var_shader_out |
1551                          nir_var_local);
1552         } while (progress);
1553 }
1554
1555 static int
1556 driver_location_compare(const void *in_a, const void *in_b)
1557 {
1558         const nir_variable *const *a = in_a;
1559         const nir_variable *const *b = in_b;
1560
1561         return (*a)->data.driver_location - (*b)->data.driver_location;
1562 }
1563
1564 static void
1565 ntq_setup_inputs(struct vc4_compile *c)
1566 {
1567         unsigned num_entries = 0;
1568         nir_foreach_variable(var, &c->s->inputs)
1569                 num_entries++;
1570
1571         nir_variable *vars[num_entries];
1572
1573         unsigned i = 0;
1574         nir_foreach_variable(var, &c->s->inputs)
1575                 vars[i++] = var;
1576
1577         /* Sort the variables so that we emit the input setup in
1578          * driver_location order.  This is required for VPM reads, whose data
1579          * is fetched into the VPM in driver_location (TGSI register index)
1580          * order.
1581          */
1582         qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1583
1584         for (unsigned i = 0; i < num_entries; i++) {
1585                 nir_variable *var = vars[i];
1586                 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1587                 unsigned loc = var->data.driver_location;
1588
1589                 assert(array_len == 1);
1590                 (void)array_len;
1591                 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1592                                   (loc + 1) * 4);
1593
1594                 if (c->stage == QSTAGE_FRAG) {
1595                         if (var->data.location == VARYING_SLOT_POS) {
1596                                 emit_fragcoord_input(c, loc);
1597                         } else if (var->data.location == VARYING_SLOT_PNTC ||
1598                                    (var->data.location >= VARYING_SLOT_VAR0 &&
1599                                     (c->fs_key->point_sprite_mask &
1600                                      (1 << (var->data.location -
1601                                             VARYING_SLOT_VAR0))))) {
1602                                 c->inputs[loc * 4 + 0] = c->point_x;
1603                                 c->inputs[loc * 4 + 1] = c->point_y;
1604                         } else {
1605                                 emit_fragment_input(c, loc, var->data.location);
1606                         }
1607                 } else {
1608                         emit_vertex_input(c, loc);
1609                 }
1610         }
1611 }
1612
1613 static void
1614 ntq_setup_outputs(struct vc4_compile *c)
1615 {
1616         nir_foreach_variable(var, &c->s->outputs) {
1617                 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1618                 unsigned loc = var->data.driver_location * 4;
1619
1620                 assert(array_len == 1);
1621                 (void)array_len;
1622
1623                 for (int i = 0; i < 4; i++)
1624                         add_output(c, loc + i, var->data.location, i);
1625
1626                 if (c->stage == QSTAGE_FRAG) {
1627                         switch (var->data.location) {
1628                         case FRAG_RESULT_COLOR:
1629                         case FRAG_RESULT_DATA0:
1630                                 c->output_color_index = loc;
1631                                 break;
1632                         case FRAG_RESULT_DEPTH:
1633                                 c->output_position_index = loc;
1634                                 break;
1635                         case FRAG_RESULT_SAMPLE_MASK:
1636                                 c->output_sample_mask_index = loc;
1637                                 break;
1638                         }
1639                 } else {
1640                         switch (var->data.location) {
1641                         case VARYING_SLOT_POS:
1642                                 c->output_position_index = loc;
1643                                 break;
1644                         case VARYING_SLOT_PSIZ:
1645                                 c->output_point_size_index = loc;
1646                                 break;
1647                         }
1648                 }
1649         }
1650 }
1651
1652 static void
1653 ntq_setup_uniforms(struct vc4_compile *c)
1654 {
1655         nir_foreach_variable(var, &c->s->uniforms) {
1656                 uint32_t vec4_count = st_glsl_type_size(var->type);
1657                 unsigned vec4_size = 4 * sizeof(float);
1658
1659                 declare_uniform_range(c, var->data.driver_location * vec4_size,
1660                                       vec4_count * vec4_size);
1661
1662         }
1663 }
1664
1665 /**
1666  * Sets up the mapping from nir_register to struct qreg *.
1667  *
1668  * Each nir_register gets a struct qreg per 32-bit component being stored.
1669  */
1670 static void
1671 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1672 {
1673         foreach_list_typed(nir_register, nir_reg, node, list) {
1674                 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1675                 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1676                                                   array_len *
1677                                                   nir_reg->num_components);
1678
1679                 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1680
1681                 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1682                         qregs[i] = qir_get_temp(c);
1683         }
1684 }
1685
1686 static void
1687 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1688 {
1689         struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1690         for (int i = 0; i < instr->def.num_components; i++)
1691                 qregs[i] = qir_uniform_ui(c, instr->value.u32[i]);
1692
1693         _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1694 }
1695
1696 static void
1697 ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1698 {
1699         struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1700
1701         /* QIR needs there to be *some* value, so pick 0 (same as for
1702          * ntq_setup_registers().
1703          */
1704         for (int i = 0; i < instr->def.num_components; i++)
1705                 qregs[i] = qir_uniform_ui(c, 0);
1706 }
1707
1708 static void
1709 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1710 {
1711         nir_const_value *const_offset;
1712         unsigned offset;
1713
1714         switch (instr->intrinsic) {
1715         case nir_intrinsic_load_uniform:
1716                 assert(instr->num_components == 1);
1717                 const_offset = nir_src_as_const_value(instr->src[0]);
1718                 if (const_offset) {
1719                         offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1720                         assert(offset % 4 == 0);
1721                         /* We need dwords */
1722                         offset = offset / 4;
1723                         ntq_store_dest(c, &instr->dest, 0,
1724                                        qir_uniform(c, QUNIFORM_UNIFORM,
1725                                                    offset));
1726                 } else {
1727                         ntq_store_dest(c, &instr->dest, 0,
1728                                        indirect_uniform_load(c, instr));
1729                 }
1730                 break;
1731
1732         case nir_intrinsic_load_user_clip_plane:
1733                 for (int i = 0; i < instr->num_components; i++) {
1734                         ntq_store_dest(c, &instr->dest, i,
1735                                        qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1736                                                    nir_intrinsic_ucp_id(instr) *
1737                                                    4 + i));
1738                 }
1739                 break;
1740
1741         case nir_intrinsic_load_blend_const_color_r_float:
1742         case nir_intrinsic_load_blend_const_color_g_float:
1743         case nir_intrinsic_load_blend_const_color_b_float:
1744         case nir_intrinsic_load_blend_const_color_a_float:
1745                 ntq_store_dest(c, &instr->dest, 0,
1746                                qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_X +
1747                                            (instr->intrinsic -
1748                                             nir_intrinsic_load_blend_const_color_r_float),
1749                                            0));
1750                 break;
1751
1752         case nir_intrinsic_load_blend_const_color_rgba8888_unorm:
1753                 ntq_store_dest(c, &instr->dest, 0,
1754                                qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_RGBA,
1755                                            0));
1756                 break;
1757
1758         case nir_intrinsic_load_blend_const_color_aaaa8888_unorm:
1759                 ntq_store_dest(c, &instr->dest, 0,
1760                                qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_AAAA,
1761                                            0));
1762                 break;
1763
1764         case nir_intrinsic_load_alpha_ref_float:
1765                 ntq_store_dest(c, &instr->dest, 0,
1766                                qir_uniform(c, QUNIFORM_ALPHA_REF, 0));
1767                 break;
1768
1769         case nir_intrinsic_load_sample_mask_in:
1770                 ntq_store_dest(c, &instr->dest, 0,
1771                                qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
1772                 break;
1773
1774         case nir_intrinsic_load_front_face:
1775                 /* The register contains 0 (front) or 1 (back), and we need to
1776                  * turn it into a NIR bool where true means front.
1777                  */
1778                 ntq_store_dest(c, &instr->dest, 0,
1779                                qir_ADD(c,
1780                                        qir_uniform_ui(c, -1),
1781                                        qir_reg(QFILE_FRAG_REV_FLAG, 0)));
1782                 break;
1783
1784         case nir_intrinsic_load_input:
1785                 assert(instr->num_components == 1);
1786                 const_offset = nir_src_as_const_value(instr->src[0]);
1787                 assert(const_offset && "vc4 doesn't support indirect inputs");
1788                 if (c->stage == QSTAGE_FRAG &&
1789                     nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) {
1790                         assert(const_offset->u32[0] == 0);
1791                         /* Reads of the per-sample color need to be done in
1792                          * order.
1793                          */
1794                         int sample_index = (nir_intrinsic_base(instr) -
1795                                            VC4_NIR_TLB_COLOR_READ_INPUT);
1796                         for (int i = 0; i <= sample_index; i++) {
1797                                 if (c->color_reads[i].file == QFILE_NULL) {
1798                                         c->color_reads[i] =
1799                                                 qir_TLB_COLOR_READ(c);
1800                                 }
1801                         }
1802                         ntq_store_dest(c, &instr->dest, 0,
1803                                        qir_MOV(c, c->color_reads[sample_index]));
1804                 } else {
1805                         offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1806                         int comp = nir_intrinsic_component(instr);
1807                         ntq_store_dest(c, &instr->dest, 0,
1808                                        qir_MOV(c, c->inputs[offset * 4 + comp]));
1809                 }
1810                 break;
1811
1812         case nir_intrinsic_store_output:
1813                 const_offset = nir_src_as_const_value(instr->src[1]);
1814                 assert(const_offset && "vc4 doesn't support indirect outputs");
1815                 offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1816
1817                 /* MSAA color outputs are the only case where we have an
1818                  * output that's not lowered to being a store of a single 32
1819                  * bit value.
1820                  */
1821                 if (c->stage == QSTAGE_FRAG && instr->num_components == 4) {
1822                         assert(offset == c->output_color_index);
1823                         for (int i = 0; i < 4; i++) {
1824                                 c->sample_colors[i] =
1825                                         qir_MOV(c, ntq_get_src(c, instr->src[0],
1826                                                                i));
1827                         }
1828                 } else {
1829                         offset = offset * 4 + nir_intrinsic_component(instr);
1830                         assert(instr->num_components == 1);
1831                         c->outputs[offset] =
1832                                 qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1833                         c->num_outputs = MAX2(c->num_outputs, offset + 1);
1834                 }
1835                 break;
1836
1837         case nir_intrinsic_discard:
1838                 if (c->execute.file != QFILE_NULL) {
1839                         qir_SF(c, c->execute);
1840                         qir_MOV_cond(c, QPU_COND_ZS, c->discard,
1841                                      qir_uniform_ui(c, ~0));
1842                 } else {
1843                         qir_MOV_dest(c, c->discard, qir_uniform_ui(c, ~0));
1844                 }
1845                 break;
1846
1847         case nir_intrinsic_discard_if: {
1848                 /* true (~0) if we're discarding */
1849                 struct qreg cond = ntq_get_src(c, instr->src[0], 0);
1850
1851                 if (c->execute.file != QFILE_NULL) {
1852                         /* execute == 0 means the channel is active.  Invert
1853                          * the condition so that we can use zero as "executing
1854                          * and discarding."
1855                          */
1856                         qir_SF(c, qir_AND(c, c->execute, qir_NOT(c, cond)));
1857                         qir_MOV_cond(c, QPU_COND_ZS, c->discard, cond);
1858                 } else {
1859                         qir_OR_dest(c, c->discard, c->discard,
1860                                     ntq_get_src(c, instr->src[0], 0));
1861                 }
1862
1863                 break;
1864         }
1865
1866         default:
1867                 fprintf(stderr, "Unknown intrinsic: ");
1868                 nir_print_instr(&instr->instr, stderr);
1869                 fprintf(stderr, "\n");
1870                 break;
1871         }
1872 }
1873
1874 /* Clears (activates) the execute flags for any channels whose jump target
1875  * matches this block.
1876  */
1877 static void
1878 ntq_activate_execute_for_block(struct vc4_compile *c)
1879 {
1880         qir_SF(c, qir_SUB(c,
1881                           c->execute,
1882                           qir_uniform_ui(c, c->cur_block->index)));
1883         qir_MOV_cond(c, QPU_COND_ZS, c->execute, qir_uniform_ui(c, 0));
1884 }
1885
1886 static void
1887 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1888 {
1889         if (!c->vc4->screen->has_control_flow) {
1890                 fprintf(stderr,
1891                         "IF statement support requires updated kernel.\n");
1892                 return;
1893         }
1894
1895         nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1896         bool empty_else_block =
1897                 (nir_else_block == nir_if_last_else_block(if_stmt) &&
1898                  exec_list_is_empty(&nir_else_block->instr_list));
1899
1900         struct qblock *then_block = qir_new_block(c);
1901         struct qblock *after_block = qir_new_block(c);
1902         struct qblock *else_block;
1903         if (empty_else_block)
1904                 else_block = after_block;
1905         else
1906                 else_block = qir_new_block(c);
1907
1908         bool was_top_level = false;
1909         if (c->execute.file == QFILE_NULL) {
1910                 c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
1911                 was_top_level = true;
1912         }
1913
1914         /* Set ZS for executing (execute == 0) and jumping (if->condition ==
1915          * 0) channels, and then update execute flags for those to point to
1916          * the ELSE block.
1917          */
1918         qir_SF(c, qir_OR(c,
1919                          c->execute,
1920                          ntq_get_src(c, if_stmt->condition, 0)));
1921         qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1922                      qir_uniform_ui(c, else_block->index));
1923
1924         /* Jump to ELSE if nothing is active for THEN, otherwise fall
1925          * through.
1926          */
1927         qir_SF(c, c->execute);
1928         qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZC);
1929         qir_link_blocks(c->cur_block, else_block);
1930         qir_link_blocks(c->cur_block, then_block);
1931
1932         /* Process the THEN block. */
1933         qir_set_emit_block(c, then_block);
1934         ntq_emit_cf_list(c, &if_stmt->then_list);
1935
1936         if (!empty_else_block) {
1937                 /* Handle the end of the THEN block.  First, all currently
1938                  * active channels update their execute flags to point to
1939                  * ENDIF
1940                  */
1941                 qir_SF(c, c->execute);
1942                 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1943                              qir_uniform_ui(c, after_block->index));
1944
1945                 /* If everything points at ENDIF, then jump there immediately. */
1946                 qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, after_block->index)));
1947                 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
1948                 qir_link_blocks(c->cur_block, after_block);
1949                 qir_link_blocks(c->cur_block, else_block);
1950
1951                 qir_set_emit_block(c, else_block);
1952                 ntq_activate_execute_for_block(c);
1953                 ntq_emit_cf_list(c, &if_stmt->else_list);
1954         }
1955
1956         qir_link_blocks(c->cur_block, after_block);
1957
1958         qir_set_emit_block(c, after_block);
1959         if (was_top_level) {
1960                 c->execute = c->undef;
1961                 c->last_top_block = c->cur_block;
1962         } else {
1963                 ntq_activate_execute_for_block(c);
1964         }
1965 }
1966
1967 static void
1968 ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump)
1969 {
1970         struct qblock *jump_block;
1971         switch (jump->type) {
1972         case nir_jump_break:
1973                 jump_block = c->loop_break_block;
1974                 break;
1975         case nir_jump_continue:
1976                 jump_block = c->loop_cont_block;
1977                 break;
1978         default:
1979                 unreachable("Unsupported jump type\n");
1980         }
1981
1982         qir_SF(c, c->execute);
1983         qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1984                      qir_uniform_ui(c, jump_block->index));
1985
1986         /* Jump to the destination block if everyone has taken the jump. */
1987         qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index)));
1988         qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
1989         struct qblock *new_block = qir_new_block(c);
1990         qir_link_blocks(c->cur_block, jump_block);
1991         qir_link_blocks(c->cur_block, new_block);
1992         qir_set_emit_block(c, new_block);
1993 }
1994
1995 static void
1996 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
1997 {
1998         switch (instr->type) {
1999         case nir_instr_type_alu:
2000                 ntq_emit_alu(c, nir_instr_as_alu(instr));
2001                 break;
2002
2003         case nir_instr_type_intrinsic:
2004                 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
2005                 break;
2006
2007         case nir_instr_type_load_const:
2008                 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
2009                 break;
2010
2011         case nir_instr_type_ssa_undef:
2012                 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
2013                 break;
2014
2015         case nir_instr_type_tex:
2016                 ntq_emit_tex(c, nir_instr_as_tex(instr));
2017                 break;
2018
2019         case nir_instr_type_jump:
2020                 ntq_emit_jump(c, nir_instr_as_jump(instr));
2021                 break;
2022
2023         default:
2024                 fprintf(stderr, "Unknown NIR instr type: ");
2025                 nir_print_instr(instr, stderr);
2026                 fprintf(stderr, "\n");
2027                 abort();
2028         }
2029 }
2030
2031 static void
2032 ntq_emit_block(struct vc4_compile *c, nir_block *block)
2033 {
2034         nir_foreach_instr(instr, block) {
2035                 ntq_emit_instr(c, instr);
2036         }
2037 }
2038
2039 static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
2040
2041 static void
2042 ntq_emit_loop(struct vc4_compile *c, nir_loop *loop)
2043 {
2044         if (!c->vc4->screen->has_control_flow) {
2045                 fprintf(stderr,
2046                         "loop support requires updated kernel.\n");
2047                 ntq_emit_cf_list(c, &loop->body);
2048                 return;
2049         }
2050
2051         bool was_top_level = false;
2052         if (c->execute.file == QFILE_NULL) {
2053                 c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
2054                 was_top_level = true;
2055         }
2056
2057         struct qblock *save_loop_cont_block = c->loop_cont_block;
2058         struct qblock *save_loop_break_block = c->loop_break_block;
2059
2060         c->loop_cont_block = qir_new_block(c);
2061         c->loop_break_block = qir_new_block(c);
2062
2063         qir_link_blocks(c->cur_block, c->loop_cont_block);
2064         qir_set_emit_block(c, c->loop_cont_block);
2065         ntq_activate_execute_for_block(c);
2066
2067         ntq_emit_cf_list(c, &loop->body);
2068
2069         /* If anything had explicitly continued, or is here at the end of the
2070          * loop, then we need to loop again.  SF updates are masked by the
2071          * instruction's condition, so we can do the OR of the two conditions
2072          * within SF.
2073          */
2074         qir_SF(c, c->execute);
2075         struct qinst *cont_check =
2076                 qir_SUB_dest(c,
2077                              c->undef,
2078                              c->execute,
2079                              qir_uniform_ui(c, c->loop_cont_block->index));
2080         cont_check->cond = QPU_COND_ZC;
2081         cont_check->sf = true;
2082
2083         qir_BRANCH(c, QPU_COND_BRANCH_ANY_ZS);
2084         qir_link_blocks(c->cur_block, c->loop_cont_block);
2085         qir_link_blocks(c->cur_block, c->loop_break_block);
2086
2087         qir_set_emit_block(c, c->loop_break_block);
2088         if (was_top_level) {
2089                 c->execute = c->undef;
2090                 c->last_top_block = c->cur_block;
2091         } else {
2092                 ntq_activate_execute_for_block(c);
2093         }
2094
2095         c->loop_break_block = save_loop_break_block;
2096         c->loop_cont_block = save_loop_cont_block;
2097 }
2098
2099 static void
2100 ntq_emit_function(struct vc4_compile *c, nir_function_impl *func)
2101 {
2102         fprintf(stderr, "FUNCTIONS not handled.\n");
2103         abort();
2104 }
2105
2106 static void
2107 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
2108 {
2109         foreach_list_typed(nir_cf_node, node, node, list) {
2110                 switch (node->type) {
2111                 case nir_cf_node_block:
2112                         ntq_emit_block(c, nir_cf_node_as_block(node));
2113                         break;
2114
2115                 case nir_cf_node_if:
2116                         ntq_emit_if(c, nir_cf_node_as_if(node));
2117                         break;
2118
2119                 case nir_cf_node_loop:
2120                         ntq_emit_loop(c, nir_cf_node_as_loop(node));
2121                         break;
2122
2123                 case nir_cf_node_function:
2124                         ntq_emit_function(c, nir_cf_node_as_function(node));
2125                         break;
2126
2127                 default:
2128                         fprintf(stderr, "Unknown NIR node type\n");
2129                         abort();
2130                 }
2131         }
2132 }
2133
2134 static void
2135 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
2136 {
2137         ntq_setup_registers(c, &impl->registers);
2138         ntq_emit_cf_list(c, &impl->body);
2139 }
2140
2141 static void
2142 nir_to_qir(struct vc4_compile *c)
2143 {
2144         if (c->stage == QSTAGE_FRAG && c->s->info->fs.uses_discard)
2145                 c->discard = qir_MOV(c, qir_uniform_ui(c, 0));
2146
2147         ntq_setup_inputs(c);
2148         ntq_setup_outputs(c);
2149         ntq_setup_uniforms(c);
2150         ntq_setup_registers(c, &c->s->registers);
2151
2152         /* Find the main function and emit the body. */
2153         nir_foreach_function(function, c->s) {
2154                 assert(strcmp(function->name, "main") == 0);
2155                 assert(function->impl);
2156                 ntq_emit_impl(c, function->impl);
2157         }
2158 }
2159
2160 static const nir_shader_compiler_options nir_options = {
2161         .lower_extract_byte = true,
2162         .lower_extract_word = true,
2163         .lower_ffma = true,
2164         .lower_flrp32 = true,
2165         .lower_fpow = true,
2166         .lower_fsat = true,
2167         .lower_fsqrt = true,
2168         .lower_negate = true,
2169         .native_integers = true,
2170         .max_unroll_iterations = 32,
2171 };
2172
2173 const void *
2174 vc4_screen_get_compiler_options(struct pipe_screen *pscreen,
2175                                 enum pipe_shader_ir ir, unsigned shader)
2176 {
2177         return &nir_options;
2178 }
2179
2180 static int
2181 count_nir_instrs(nir_shader *nir)
2182 {
2183         int count = 0;
2184         nir_foreach_function(function, nir) {
2185                 if (!function->impl)
2186                         continue;
2187                 nir_foreach_block(block, function->impl) {
2188                         nir_foreach_instr(instr, block)
2189                                 count++;
2190                 }
2191         }
2192         return count;
2193 }
2194
2195 static struct vc4_compile *
2196 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
2197                struct vc4_key *key, bool fs_threaded)
2198 {
2199         struct vc4_compile *c = qir_compile_init();
2200
2201         c->vc4 = vc4;
2202         c->stage = stage;
2203         c->shader_state = &key->shader_state->base;
2204         c->program_id = key->shader_state->program_id;
2205         c->variant_id =
2206                 p_atomic_inc_return(&key->shader_state->compiled_variant_count);
2207         c->fs_threaded = fs_threaded;
2208
2209         c->key = key;
2210         switch (stage) {
2211         case QSTAGE_FRAG:
2212                 c->fs_key = (struct vc4_fs_key *)key;
2213                 if (c->fs_key->is_points) {
2214                         c->point_x = emit_fragment_varying(c, ~0, 0);
2215                         c->point_y = emit_fragment_varying(c, ~0, 0);
2216                 } else if (c->fs_key->is_lines) {
2217                         c->line_x = emit_fragment_varying(c, ~0, 0);
2218                 }
2219                 break;
2220         case QSTAGE_VERT:
2221                 c->vs_key = (struct vc4_vs_key *)key;
2222                 break;
2223         case QSTAGE_COORD:
2224                 c->vs_key = (struct vc4_vs_key *)key;
2225                 break;
2226         }
2227
2228         c->s = nir_shader_clone(c, key->shader_state->base.ir.nir);
2229
2230         if (stage == QSTAGE_FRAG)
2231                 NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
2232
2233         struct nir_lower_tex_options tex_options = {
2234                 /* We would need to implement txs, but we don't want the
2235                  * int/float conversions
2236                  */
2237                 .lower_rect = false,
2238
2239                 .lower_txp = ~0,
2240
2241                 /* Apply swizzles to all samplers. */
2242                 .swizzle_result = ~0,
2243         };
2244
2245         /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
2246          * The format swizzling applies before sRGB decode, and
2247          * ARB_texture_swizzle is the last thing before returning the sample.
2248          */
2249         for (int i = 0; i < ARRAY_SIZE(key->tex); i++) {
2250                 enum pipe_format format = c->key->tex[i].format;
2251
2252                 if (!format)
2253                         continue;
2254
2255                 const uint8_t *format_swizzle = vc4_get_format_swizzle(format);
2256
2257                 for (int j = 0; j < 4; j++) {
2258                         uint8_t arb_swiz = c->key->tex[i].swizzle[j];
2259
2260                         if (arb_swiz <= 3) {
2261                                 tex_options.swizzles[i][j] =
2262                                         format_swizzle[arb_swiz];
2263                         } else {
2264                                 tex_options.swizzles[i][j] = arb_swiz;
2265                         }
2266                 }
2267
2268                 if (util_format_is_srgb(format))
2269                         tex_options.lower_srgb |= (1 << i);
2270         }
2271
2272         NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
2273
2274         if (c->fs_key && c->fs_key->light_twoside)
2275                 NIR_PASS_V(c->s, nir_lower_two_sided_color);
2276
2277         if (c->vs_key && c->vs_key->clamp_color)
2278                 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
2279
2280         if (c->key->ucp_enables) {
2281                 if (stage == QSTAGE_FRAG) {
2282                         NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
2283                 } else {
2284                         NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables);
2285                         NIR_PASS_V(c->s, nir_lower_io_to_scalar,
2286                                    nir_var_shader_out);
2287                 }
2288         }
2289
2290         /* FS input scalarizing must happen after nir_lower_two_sided_color,
2291          * which only handles a vec4 at a time.  Similarly, VS output
2292          * scalarizing must happen after nir_lower_clip_vs.
2293          */
2294         if (c->stage == QSTAGE_FRAG)
2295                 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
2296         else
2297                 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
2298
2299         NIR_PASS_V(c->s, vc4_nir_lower_io, c);
2300         NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
2301         NIR_PASS_V(c->s, nir_lower_idiv);
2302
2303         vc4_optimize_nir(c->s);
2304
2305         NIR_PASS_V(c->s, nir_convert_from_ssa, true);
2306
2307         if (vc4_debug & VC4_DEBUG_SHADERDB) {
2308                 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
2309                         qir_get_stage_name(c->stage),
2310                         c->program_id, c->variant_id,
2311                         count_nir_instrs(c->s));
2312         }
2313
2314         if (vc4_debug & VC4_DEBUG_NIR) {
2315                 fprintf(stderr, "%s prog %d/%d NIR:\n",
2316                         qir_get_stage_name(c->stage),
2317                         c->program_id, c->variant_id);
2318                 nir_print_shader(c->s, stderr);
2319         }
2320
2321         nir_to_qir(c);
2322
2323         switch (stage) {
2324         case QSTAGE_FRAG:
2325                 /* FS threading requires that the thread execute
2326                  * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating
2327                  * (with no other THRSW afterwards, obviously).  If we didn't
2328                  * fetch a texture at a top level block, this wouldn't be
2329                  * true.
2330                  */
2331                 if (c->fs_threaded && !c->last_thrsw_at_top_level) {
2332                         c->failed = true;
2333                         return c;
2334                 }
2335
2336                 emit_frag_end(c);
2337                 break;
2338         case QSTAGE_VERT:
2339                 emit_vert_end(c,
2340                               c->vs_key->fs_inputs->input_slots,
2341                               c->vs_key->fs_inputs->num_inputs);
2342                 break;
2343         case QSTAGE_COORD:
2344                 emit_coord_end(c);
2345                 break;
2346         }
2347
2348         if (vc4_debug & VC4_DEBUG_QIR) {
2349                 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
2350                         qir_get_stage_name(c->stage),
2351                         c->program_id, c->variant_id);
2352                 qir_dump(c);
2353                 fprintf(stderr, "\n");
2354         }
2355
2356         qir_optimize(c);
2357         qir_lower_uniforms(c);
2358
2359         qir_schedule_instructions(c);
2360         qir_emit_uniform_stream_resets(c);
2361
2362         if (vc4_debug & VC4_DEBUG_QIR) {
2363                 fprintf(stderr, "%s prog %d/%d QIR:\n",
2364                         qir_get_stage_name(c->stage),
2365                         c->program_id, c->variant_id);
2366                 qir_dump(c);
2367                 fprintf(stderr, "\n");
2368         }
2369
2370         qir_reorder_uniforms(c);
2371         vc4_generate_code(vc4, c);
2372
2373         if (vc4_debug & VC4_DEBUG_SHADERDB) {
2374                 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2375                         qir_get_stage_name(c->stage),
2376                         c->program_id, c->variant_id,
2377                         c->qpu_inst_count);
2378                 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2379                         qir_get_stage_name(c->stage),
2380                         c->program_id, c->variant_id,
2381                         c->num_uniforms);
2382         }
2383
2384         ralloc_free(c->s);
2385
2386         return c;
2387 }
2388
2389 static void *
2390 vc4_shader_state_create(struct pipe_context *pctx,
2391                         const struct pipe_shader_state *cso)
2392 {
2393         struct vc4_context *vc4 = vc4_context(pctx);
2394         struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
2395         if (!so)
2396                 return NULL;
2397
2398         so->program_id = vc4->next_uncompiled_program_id++;
2399
2400         nir_shader *s;
2401
2402         if (cso->type == PIPE_SHADER_IR_NIR) {
2403                 /* The backend takes ownership of the NIR shader on state
2404                  * creation.
2405                  */
2406                 s = cso->ir.nir;
2407         } else {
2408                 assert(cso->type == PIPE_SHADER_IR_TGSI);
2409
2410                 if (vc4_debug & VC4_DEBUG_TGSI) {
2411                         fprintf(stderr, "prog %d TGSI:\n",
2412                                 so->program_id);
2413                         tgsi_dump(cso->tokens, 0);
2414                         fprintf(stderr, "\n");
2415                 }
2416                 s = tgsi_to_nir(cso->tokens, &nir_options);
2417         }
2418
2419         NIR_PASS_V(s, nir_opt_global_to_local);
2420         NIR_PASS_V(s, nir_lower_regs_to_ssa);
2421         NIR_PASS_V(s, nir_normalize_cubemap_coords);
2422
2423         NIR_PASS_V(s, nir_lower_load_const_to_scalar);
2424
2425         vc4_optimize_nir(s);
2426
2427         NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local);
2428
2429         /* Garbage collect dead instructions */
2430         nir_sweep(s);
2431
2432         so->base.type = PIPE_SHADER_IR_NIR;
2433         so->base.ir.nir = s;
2434
2435         if (vc4_debug & VC4_DEBUG_NIR) {
2436                 fprintf(stderr, "%s prog %d NIR:\n",
2437                         gl_shader_stage_name(s->stage),
2438                         so->program_id);
2439                 nir_print_shader(s, stderr);
2440                 fprintf(stderr, "\n");
2441         }
2442
2443         return so;
2444 }
2445
2446 static void
2447 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
2448                              struct vc4_compile *c)
2449 {
2450         int count = c->num_uniforms;
2451         struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
2452
2453         uinfo->count = count;
2454         uinfo->data = ralloc_array(shader, uint32_t, count);
2455         memcpy(uinfo->data, c->uniform_data,
2456                count * sizeof(*uinfo->data));
2457         uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
2458         memcpy(uinfo->contents, c->uniform_contents,
2459                count * sizeof(*uinfo->contents));
2460         uinfo->num_texture_samples = c->num_texture_samples;
2461
2462         vc4_set_shader_uniform_dirty_flags(shader);
2463 }
2464
2465 static void
2466 vc4_setup_compiled_fs_inputs(struct vc4_context *vc4, struct vc4_compile *c,
2467                              struct vc4_compiled_shader *shader)
2468 {
2469         struct vc4_fs_inputs inputs;
2470
2471         memset(&inputs, 0, sizeof(inputs));
2472         inputs.input_slots = ralloc_array(shader,
2473                                           struct vc4_varying_slot,
2474                                           c->num_input_slots);
2475
2476         bool input_live[c->num_input_slots];
2477
2478         memset(input_live, 0, sizeof(input_live));
2479         qir_for_each_inst_inorder(inst, c) {
2480                 for (int i = 0; i < qir_get_nsrc(inst); i++) {
2481                         if (inst->src[i].file == QFILE_VARY)
2482                                 input_live[inst->src[i].index] = true;
2483                 }
2484         }
2485
2486         for (int i = 0; i < c->num_input_slots; i++) {
2487                 struct vc4_varying_slot *slot = &c->input_slots[i];
2488
2489                 if (!input_live[i])
2490                         continue;
2491
2492                 /* Skip non-VS-output inputs. */
2493                 if (slot->slot == (uint8_t)~0)
2494                         continue;
2495
2496                 if (slot->slot == VARYING_SLOT_COL0 ||
2497                     slot->slot == VARYING_SLOT_COL1 ||
2498                     slot->slot == VARYING_SLOT_BFC0 ||
2499                     slot->slot == VARYING_SLOT_BFC1) {
2500                         shader->color_inputs |= (1 << inputs.num_inputs);
2501                 }
2502
2503                 inputs.input_slots[inputs.num_inputs] = *slot;
2504                 inputs.num_inputs++;
2505         }
2506         shader->num_inputs = inputs.num_inputs;
2507
2508         /* Add our set of inputs to the set of all inputs seen.  This way, we
2509          * can have a single pointer that identifies an FS inputs set,
2510          * allowing VS to avoid recompiling when the FS is recompiled (or a
2511          * new one is bound using separate shader objects) but the inputs
2512          * don't change.
2513          */
2514         struct set_entry *entry = _mesa_set_search(vc4->fs_inputs_set, &inputs);
2515         if (entry) {
2516                 shader->fs_inputs = entry->key;
2517                 ralloc_free(inputs.input_slots);
2518         } else {
2519                 struct vc4_fs_inputs *alloc_inputs;
2520
2521                 alloc_inputs = rzalloc(vc4->fs_inputs_set, struct vc4_fs_inputs);
2522                 memcpy(alloc_inputs, &inputs, sizeof(inputs));
2523                 ralloc_steal(alloc_inputs, inputs.input_slots);
2524                 _mesa_set_add(vc4->fs_inputs_set, alloc_inputs);
2525
2526                 shader->fs_inputs = alloc_inputs;
2527         }
2528 }
2529
2530 static struct vc4_compiled_shader *
2531 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
2532                         struct vc4_key *key)
2533 {
2534         struct hash_table *ht;
2535         uint32_t key_size;
2536         bool try_threading;
2537
2538         if (stage == QSTAGE_FRAG) {
2539                 ht = vc4->fs_cache;
2540                 key_size = sizeof(struct vc4_fs_key);
2541                 try_threading = vc4->screen->has_threaded_fs;
2542         } else {
2543                 ht = vc4->vs_cache;
2544                 key_size = sizeof(struct vc4_vs_key);
2545                 try_threading = false;
2546         }
2547
2548         struct vc4_compiled_shader *shader;
2549         struct hash_entry *entry = _mesa_hash_table_search(ht, key);
2550         if (entry)
2551                 return entry->data;
2552
2553         struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading);
2554         /* If the FS failed to compile threaded, fall back to single threaded. */
2555         if (try_threading && c->failed) {
2556                 qir_compile_destroy(c);
2557                 c = vc4_shader_ntq(vc4, stage, key, false);
2558         }
2559
2560         shader = rzalloc(NULL, struct vc4_compiled_shader);
2561
2562         shader->program_id = vc4->next_compiled_program_id++;
2563         if (stage == QSTAGE_FRAG) {
2564                 vc4_setup_compiled_fs_inputs(vc4, c, shader);
2565
2566                 /* Note: the temporary clone in c->s has been freed. */
2567                 nir_shader *orig_shader = key->shader_state->base.ir.nir;
2568                 if (orig_shader->info->outputs_written & (1 << FRAG_RESULT_DEPTH))
2569                         shader->disable_early_z = true;
2570         } else {
2571                 shader->num_inputs = c->num_inputs;
2572
2573                 shader->vattr_offsets[0] = 0;
2574                 for (int i = 0; i < 8; i++) {
2575                         shader->vattr_offsets[i + 1] =
2576                                 shader->vattr_offsets[i] + c->vattr_sizes[i];
2577
2578                         if (c->vattr_sizes[i])
2579                                 shader->vattrs_live |= (1 << i);
2580                 }
2581         }
2582
2583         shader->failed = c->failed;
2584         if (c->failed) {
2585                 shader->failed = true;
2586         } else {
2587                 copy_uniform_state_to_shader(shader, c);
2588                 shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
2589                                                  c->qpu_inst_count *
2590                                                  sizeof(uint64_t));
2591         }
2592
2593         shader->fs_threaded = c->fs_threaded;
2594
2595         /* Copy the compiler UBO range state to the compiled shader, dropping
2596          * out arrays that were never referenced by an indirect load.
2597          *
2598          * (Note that QIR dead code elimination of an array access still
2599          * leaves that array alive, though)
2600          */
2601         if (c->num_ubo_ranges) {
2602                 shader->num_ubo_ranges = c->num_ubo_ranges;
2603                 shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range,
2604                                                   c->num_ubo_ranges);
2605                 uint32_t j = 0;
2606                 for (int i = 0; i < c->num_uniform_ranges; i++) {
2607                         struct vc4_compiler_ubo_range *range =
2608                                 &c->ubo_ranges[i];
2609                         if (!range->used)
2610                                 continue;
2611
2612                         shader->ubo_ranges[j].dst_offset = range->dst_offset;
2613                         shader->ubo_ranges[j].src_offset = range->src_offset;
2614                         shader->ubo_ranges[j].size = range->size;
2615                         shader->ubo_size += c->ubo_ranges[i].size;
2616                         j++;
2617                 }
2618         }
2619         if (shader->ubo_size) {
2620                 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2621                         fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2622                                 qir_get_stage_name(c->stage),
2623                                 c->program_id, c->variant_id,
2624                                 shader->ubo_size / 4);
2625                 }
2626         }
2627
2628         qir_compile_destroy(c);
2629
2630         struct vc4_key *dup_key;
2631         dup_key = rzalloc_size(shader, key_size); /* TODO: don't use rzalloc */
2632         memcpy(dup_key, key, key_size);
2633         _mesa_hash_table_insert(ht, dup_key, shader);
2634
2635         return shader;
2636 }
2637
2638 static void
2639 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2640                      struct vc4_texture_stateobj *texstate)
2641 {
2642         for (int i = 0; i < texstate->num_textures; i++) {
2643                 struct pipe_sampler_view *sampler = texstate->textures[i];
2644                 struct vc4_sampler_view *vc4_sampler = vc4_sampler_view(sampler);
2645                 struct pipe_sampler_state *sampler_state =
2646                         texstate->samplers[i];
2647
2648                 if (!sampler)
2649                         continue;
2650
2651                 key->tex[i].format = sampler->format;
2652                 key->tex[i].swizzle[0] = sampler->swizzle_r;
2653                 key->tex[i].swizzle[1] = sampler->swizzle_g;
2654                 key->tex[i].swizzle[2] = sampler->swizzle_b;
2655                 key->tex[i].swizzle[3] = sampler->swizzle_a;
2656
2657                 if (sampler->texture->nr_samples > 1) {
2658                         key->tex[i].msaa_width = sampler->texture->width0;
2659                         key->tex[i].msaa_height = sampler->texture->height0;
2660                 } else if (sampler){
2661                         key->tex[i].compare_mode = sampler_state->compare_mode;
2662                         key->tex[i].compare_func = sampler_state->compare_func;
2663                         key->tex[i].wrap_s = sampler_state->wrap_s;
2664                         key->tex[i].wrap_t = sampler_state->wrap_t;
2665                         key->tex[i].force_first_level =
2666                                 vc4_sampler->force_first_level;
2667                 }
2668         }
2669
2670         key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2671 }
2672
2673 static void
2674 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2675 {
2676         struct vc4_job *job = vc4->job;
2677         struct vc4_fs_key local_key;
2678         struct vc4_fs_key *key = &local_key;
2679
2680         if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2681                             VC4_DIRTY_BLEND |
2682                             VC4_DIRTY_FRAMEBUFFER |
2683                             VC4_DIRTY_ZSA |
2684                             VC4_DIRTY_RASTERIZER |
2685                             VC4_DIRTY_SAMPLE_MASK |
2686                             VC4_DIRTY_FRAGTEX |
2687                             VC4_DIRTY_UNCOMPILED_FS))) {
2688                 return;
2689         }
2690
2691         memset(key, 0, sizeof(*key));
2692         vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2693         key->base.shader_state = vc4->prog.bind_fs;
2694         key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2695         key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2696                          prim_mode <= PIPE_PRIM_LINE_STRIP);
2697         key->blend = vc4->blend->rt[0];
2698         if (vc4->blend->logicop_enable) {
2699                 key->logicop_func = vc4->blend->logicop_func;
2700         } else {
2701                 key->logicop_func = PIPE_LOGICOP_COPY;
2702         }
2703         if (job->msaa) {
2704                 key->msaa = vc4->rasterizer->base.multisample;
2705                 key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
2706                 key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
2707                 key->sample_alpha_to_one = vc4->blend->alpha_to_one;
2708         }
2709
2710         if (vc4->framebuffer.cbufs[0])
2711                 key->color_format = vc4->framebuffer.cbufs[0]->format;
2712
2713         key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2714         key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2715         key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2716         key->depth_enabled = (vc4->zsa->base.depth.enabled ||
2717                               key->stencil_enabled);
2718         if (vc4->zsa->base.alpha.enabled) {
2719                 key->alpha_test = true;
2720                 key->alpha_test_func = vc4->zsa->base.alpha.func;
2721         }
2722
2723         if (key->is_points) {
2724                 key->point_sprite_mask =
2725                         vc4->rasterizer->base.sprite_coord_enable;
2726                 key->point_coord_upper_left =
2727                         (vc4->rasterizer->base.sprite_coord_mode ==
2728                          PIPE_SPRITE_COORD_UPPER_LEFT);
2729         }
2730
2731         key->light_twoside = vc4->rasterizer->base.light_twoside;
2732
2733         struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2734         vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2735         if (vc4->prog.fs == old_fs)
2736                 return;
2737
2738         vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2739
2740         if (vc4->rasterizer->base.flatshade &&
2741             old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
2742                 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2743         }
2744
2745         if (old_fs && vc4->prog.fs->fs_inputs != old_fs->fs_inputs)
2746                 vc4->dirty |= VC4_DIRTY_FS_INPUTS;
2747 }
2748
2749 static void
2750 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2751 {
2752         struct vc4_vs_key local_key;
2753         struct vc4_vs_key *key = &local_key;
2754
2755         if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2756                             VC4_DIRTY_RASTERIZER |
2757                             VC4_DIRTY_VERTTEX |
2758                             VC4_DIRTY_VTXSTATE |
2759                             VC4_DIRTY_UNCOMPILED_VS |
2760                             VC4_DIRTY_FS_INPUTS))) {
2761                 return;
2762         }
2763
2764         memset(key, 0, sizeof(*key));
2765         vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2766         key->base.shader_state = vc4->prog.bind_vs;
2767         key->fs_inputs = vc4->prog.fs->fs_inputs;
2768         key->clamp_color = vc4->rasterizer->base.clamp_vertex_color;
2769
2770         for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2771                 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2772
2773         key->per_vertex_point_size =
2774                 (prim_mode == PIPE_PRIM_POINTS &&
2775                  vc4->rasterizer->base.point_size_per_vertex);
2776
2777         struct vc4_compiled_shader *vs =
2778                 vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2779         if (vs != vc4->prog.vs) {
2780                 vc4->prog.vs = vs;
2781                 vc4->dirty |= VC4_DIRTY_COMPILED_VS;
2782         }
2783
2784         key->is_coord = true;
2785         /* Coord shaders don't care what the FS inputs are. */
2786         key->fs_inputs = NULL;
2787         struct vc4_compiled_shader *cs =
2788                 vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2789         if (cs != vc4->prog.cs) {
2790                 vc4->prog.cs = cs;
2791                 vc4->dirty |= VC4_DIRTY_COMPILED_CS;
2792         }
2793 }
2794
2795 bool
2796 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2797 {
2798         vc4_update_compiled_fs(vc4, prim_mode);
2799         vc4_update_compiled_vs(vc4, prim_mode);
2800
2801         return !(vc4->prog.cs->failed ||
2802                  vc4->prog.vs->failed ||
2803                  vc4->prog.fs->failed);
2804 }
2805
2806 static uint32_t
2807 fs_cache_hash(const void *key)
2808 {
2809         return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2810 }
2811
2812 static uint32_t
2813 vs_cache_hash(const void *key)
2814 {
2815         return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2816 }
2817
2818 static bool
2819 fs_cache_compare(const void *key1, const void *key2)
2820 {
2821         return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2822 }
2823
2824 static bool
2825 vs_cache_compare(const void *key1, const void *key2)
2826 {
2827         return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2828 }
2829
2830 static uint32_t
2831 fs_inputs_hash(const void *key)
2832 {
2833         const struct vc4_fs_inputs *inputs = key;
2834
2835         return _mesa_hash_data(inputs->input_slots,
2836                                sizeof(*inputs->input_slots) *
2837                                inputs->num_inputs);
2838 }
2839
2840 static bool
2841 fs_inputs_compare(const void *key1, const void *key2)
2842 {
2843         const struct vc4_fs_inputs *inputs1 = key1;
2844         const struct vc4_fs_inputs *inputs2 = key2;
2845
2846         return (inputs1->num_inputs == inputs2->num_inputs &&
2847                 memcmp(inputs1->input_slots,
2848                        inputs2->input_slots,
2849                        sizeof(*inputs1->input_slots) *
2850                        inputs1->num_inputs) == 0);
2851 }
2852
2853 static void
2854 delete_from_cache_if_matches(struct hash_table *ht,
2855                              struct hash_entry *entry,
2856                              struct vc4_uncompiled_shader *so)
2857 {
2858         const struct vc4_key *key = entry->key;
2859
2860         if (key->shader_state == so) {
2861                 struct vc4_compiled_shader *shader = entry->data;
2862                 _mesa_hash_table_remove(ht, entry);
2863                 vc4_bo_unreference(&shader->bo);
2864                 ralloc_free(shader);
2865         }
2866 }
2867
2868 static void
2869 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2870 {
2871         struct vc4_context *vc4 = vc4_context(pctx);
2872         struct vc4_uncompiled_shader *so = hwcso;
2873
2874         struct hash_entry *entry;
2875         hash_table_foreach(vc4->fs_cache, entry)
2876                 delete_from_cache_if_matches(vc4->fs_cache, entry, so);
2877         hash_table_foreach(vc4->vs_cache, entry)
2878                 delete_from_cache_if_matches(vc4->vs_cache, entry, so);
2879
2880         ralloc_free(so->base.ir.nir);
2881         free(so);
2882 }
2883
2884 static void
2885 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2886 {
2887         struct vc4_context *vc4 = vc4_context(pctx);
2888         vc4->prog.bind_fs = hwcso;
2889         vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2890 }
2891
2892 static void
2893 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2894 {
2895         struct vc4_context *vc4 = vc4_context(pctx);
2896         vc4->prog.bind_vs = hwcso;
2897         vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2898 }
2899
2900 void
2901 vc4_program_init(struct pipe_context *pctx)
2902 {
2903         struct vc4_context *vc4 = vc4_context(pctx);
2904
2905         pctx->create_vs_state = vc4_shader_state_create;
2906         pctx->delete_vs_state = vc4_shader_state_delete;
2907
2908         pctx->create_fs_state = vc4_shader_state_create;
2909         pctx->delete_fs_state = vc4_shader_state_delete;
2910
2911         pctx->bind_fs_state = vc4_fp_state_bind;
2912         pctx->bind_vs_state = vc4_vp_state_bind;
2913
2914         vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2915                                                 fs_cache_compare);
2916         vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2917                                                 vs_cache_compare);
2918         vc4->fs_inputs_set = _mesa_set_create(pctx, fs_inputs_hash,
2919                                               fs_inputs_compare);
2920 }
2921
2922 void
2923 vc4_program_fini(struct pipe_context *pctx)
2924 {
2925         struct vc4_context *vc4 = vc4_context(pctx);
2926
2927         struct hash_entry *entry;
2928         hash_table_foreach(vc4->fs_cache, entry) {
2929                 struct vc4_compiled_shader *shader = entry->data;
2930                 vc4_bo_unreference(&shader->bo);
2931                 ralloc_free(shader);
2932                 _mesa_hash_table_remove(vc4->fs_cache, entry);
2933         }
2934
2935         hash_table_foreach(vc4->vs_cache, entry) {
2936                 struct vc4_compiled_shader *shader = entry->data;
2937                 vc4_bo_unreference(&shader->bo);
2938                 ralloc_free(shader);
2939                 _mesa_hash_table_remove(vc4->vs_cache, entry);
2940         }
2941 }