2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_format.h"
27 #include "util/crc32.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "compiler/nir/nir.h"
35 #include "compiler/nir/nir_builder.h"
36 #include "nir/tgsi_to_nir.h"
37 #include "vc4_context.h"
40 #include "mesa/state_tracker/st_glsl_types.h"
43 ntq_get_src(struct vc4_compile *c, nir_src src, int i);
45 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
48 resize_qreg_array(struct vc4_compile *c,
53 if (*size >= decl_size)
56 uint32_t old_size = *size;
57 *size = MAX2(*size * 2, decl_size);
58 *regs = reralloc(c, *regs, struct qreg, *size);
60 fprintf(stderr, "Malloc failure\n");
64 for (uint32_t i = old_size; i < *size; i++)
65 (*regs)[i] = c->undef;
69 ntq_emit_thrsw(struct vc4_compile *c)
74 /* Always thread switch after each texture operation for now.
76 * We could do better by batching a bunch of texture fetches up and
77 * then doing one thread switch and collecting all their results
80 qir_emit_nondef(c, qir_inst(QOP_THRSW, c->undef,
82 c->last_thrsw_at_top_level = (c->execute.file == QFILE_NULL);
86 indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
88 struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
89 uint32_t offset = nir_intrinsic_base(intr);
90 struct vc4_compiler_ubo_range *range = NULL;
92 for (i = 0; i < c->num_uniform_ranges; i++) {
93 range = &c->ubo_ranges[i];
94 if (offset >= range->src_offset &&
95 offset < range->src_offset + range->size) {
99 /* The driver-location-based offset always has to be within a declared
105 range->dst_offset = c->next_ubo_dst_offset;
106 c->next_ubo_dst_offset += range->size;
110 offset -= range->src_offset;
112 /* Adjust for where we stored the TGSI register base. */
113 indirect_offset = qir_ADD(c, indirect_offset,
114 qir_uniform_ui(c, (range->dst_offset +
117 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
118 indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
119 indirect_offset = qir_MIN_NOIMM(c, indirect_offset,
120 qir_uniform_ui(c, (range->dst_offset +
123 qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
125 qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
127 c->num_texture_samples++;
131 return qir_TEX_RESULT(c);
135 vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
139 case PIPE_SWIZZLE_NONE:
140 fprintf(stderr, "warning: unknown swizzle\n");
143 return nir_imm_float(b, 0.0);
145 return nir_imm_float(b, 1.0);
155 ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
157 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
158 def->num_components);
159 _mesa_hash_table_insert(c->def_ht, def, qregs);
164 * This function is responsible for getting QIR results into the associated
165 * storage for a NIR instruction.
167 * If it's a NIR SSA def, then we just set the associated hash table entry to
170 * If it's a NIR reg, then we need to update the existing qreg assigned to the
171 * NIR destination with the incoming value. To do that without introducing
172 * new MOVs, we require that the incoming qreg either be a uniform, or be
173 * SSA-defined by the previous QIR instruction in the block and rewritable by
174 * this function. That lets us sneak ahead and insert the SF flag beforehand
175 * (knowing that the previous instruction doesn't depend on flags) and rewrite
176 * its destination to be the NIR reg's destination
179 ntq_store_dest(struct vc4_compile *c, nir_dest *dest, int chan,
182 struct qinst *last_inst = NULL;
183 if (!list_empty(&c->cur_block->instructions))
184 last_inst = (struct qinst *)c->cur_block->instructions.prev;
186 assert(result.file == QFILE_UNIF ||
187 (result.file == QFILE_TEMP &&
188 last_inst && last_inst == c->defs[result.index]));
191 assert(chan < dest->ssa.num_components);
194 struct hash_entry *entry =
195 _mesa_hash_table_search(c->def_ht, &dest->ssa);
200 qregs = ntq_init_ssa_def(c, &dest->ssa);
202 qregs[chan] = result;
204 nir_register *reg = dest->reg.reg;
205 assert(dest->reg.base_offset == 0);
206 assert(reg->num_array_elems == 0);
207 struct hash_entry *entry =
208 _mesa_hash_table_search(c->def_ht, reg);
209 struct qreg *qregs = entry->data;
211 /* Insert a MOV if the source wasn't an SSA def in the
212 * previous instruction.
214 if (result.file == QFILE_UNIF) {
215 result = qir_MOV(c, result);
216 last_inst = c->defs[result.index];
219 /* We know they're both temps, so just rewrite index. */
220 c->defs[last_inst->dst.index] = NULL;
221 last_inst->dst.index = qregs[chan].index;
223 /* If we're in control flow, then make this update of the reg
224 * conditional on the execution mask.
226 if (c->execute.file != QFILE_NULL) {
227 last_inst->dst.index = qregs[chan].index;
229 /* Set the flags to the current exec mask. To insert
230 * the SF, we temporarily remove our SSA instruction.
232 list_del(&last_inst->link);
233 qir_SF(c, c->execute);
234 list_addtail(&last_inst->link,
235 &c->cur_block->instructions);
237 last_inst->cond = QPU_COND_ZS;
238 last_inst->cond_is_exec_mask = true;
244 ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
247 struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
248 for (int i = 0; i < dest->ssa.num_components; i++)
252 nir_register *reg = dest->reg.reg;
253 assert(dest->reg.base_offset == 0);
254 assert(reg->num_array_elems == 0);
255 struct hash_entry *entry =
256 _mesa_hash_table_search(c->def_ht, reg);
262 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
264 struct hash_entry *entry;
266 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
267 assert(i < src.ssa->num_components);
269 nir_register *reg = src.reg.reg;
270 entry = _mesa_hash_table_search(c->def_ht, reg);
271 assert(reg->num_array_elems == 0);
272 assert(src.reg.base_offset == 0);
273 assert(i < reg->num_components);
276 struct qreg *qregs = entry->data;
281 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
284 assert(util_is_power_of_two(instr->dest.write_mask));
285 unsigned chan = ffs(instr->dest.write_mask) - 1;
286 struct qreg r = ntq_get_src(c, instr->src[src].src,
287 instr->src[src].swizzle[chan]);
289 assert(!instr->src[src].abs);
290 assert(!instr->src[src].negate);
295 static inline struct qreg
296 qir_SAT(struct vc4_compile *c, struct qreg val)
299 qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
300 qir_uniform_f(c, 0.0));
304 ntq_rcp(struct vc4_compile *c, struct qreg x)
306 struct qreg r = qir_RCP(c, x);
308 /* Apply a Newton-Raphson step to improve the accuracy. */
309 r = qir_FMUL(c, r, qir_FSUB(c,
310 qir_uniform_f(c, 2.0),
317 ntq_rsq(struct vc4_compile *c, struct qreg x)
319 struct qreg r = qir_RSQ(c, x);
321 /* Apply a Newton-Raphson step to improve the accuracy. */
322 r = qir_FMUL(c, r, qir_FSUB(c,
323 qir_uniform_f(c, 1.5),
325 qir_uniform_f(c, 0.5),
327 qir_FMUL(c, r, r)))));
333 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
335 struct qreg src0_hi = qir_SHR(c, src0,
336 qir_uniform_ui(c, 24));
337 struct qreg src1_hi = qir_SHR(c, src1,
338 qir_uniform_ui(c, 24));
340 struct qreg hilo = qir_MUL24(c, src0_hi, src1);
341 struct qreg lohi = qir_MUL24(c, src0, src1_hi);
342 struct qreg lolo = qir_MUL24(c, src0, src1);
344 return qir_ADD(c, lolo, qir_SHL(c,
345 qir_ADD(c, hilo, lohi),
346 qir_uniform_ui(c, 24)));
350 ntq_scale_depth_texture(struct vc4_compile *c, struct qreg src)
352 struct qreg depthf = qir_ITOF(c, qir_SHR(c, src,
353 qir_uniform_ui(c, 8)));
354 return qir_FMUL(c, depthf, qir_uniform_f(c, 1.0f/0xffffff));
358 * Emits a lowered TXF_MS from an MSAA texture.
360 * The addressing math has been lowered in NIR, and now we just need to read
364 ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr)
366 uint32_t tile_width = 32;
367 uint32_t tile_height = 32;
368 uint32_t tile_size = (tile_height * tile_width *
369 VC4_MAX_SAMPLES * sizeof(uint32_t));
371 unsigned unit = instr->texture_index;
372 uint32_t w = align(c->key->tex[unit].msaa_width, tile_width);
373 uint32_t w_tiles = w / tile_width;
374 uint32_t h = align(c->key->tex[unit].msaa_height, tile_height);
375 uint32_t h_tiles = h / tile_height;
376 uint32_t size = w_tiles * h_tiles * tile_size;
379 assert(instr->num_srcs == 1);
380 assert(instr->src[0].src_type == nir_tex_src_coord);
381 addr = ntq_get_src(c, instr->src[0].src, 0);
383 /* Perform the clamping required by kernel validation. */
384 addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
385 addr = qir_MIN_NOIMM(c, addr, qir_uniform_ui(c, size - 4));
387 qir_ADD_dest(c, qir_reg(QFILE_TEX_S_DIRECT, 0),
388 addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
392 struct qreg tex = qir_TEX_RESULT(c);
393 c->num_texture_samples++;
395 enum pipe_format format = c->key->tex[unit].format;
396 if (util_format_is_depth_or_stencil(format)) {
397 struct qreg scaled = ntq_scale_depth_texture(c, tex);
398 for (int i = 0; i < 4; i++)
399 ntq_store_dest(c, &instr->dest, i, qir_MOV(c, scaled));
401 for (int i = 0; i < 4; i++)
402 ntq_store_dest(c, &instr->dest, i,
403 qir_UNPACK_8_F(c, tex, i));
408 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
410 struct qreg s, t, r, lod, compare;
411 bool is_txb = false, is_txl = false;
412 unsigned unit = instr->texture_index;
414 if (instr->op == nir_texop_txf) {
415 ntq_emit_txf(c, instr);
419 for (unsigned i = 0; i < instr->num_srcs; i++) {
420 switch (instr->src[i].src_type) {
421 case nir_tex_src_coord:
422 s = ntq_get_src(c, instr->src[i].src, 0);
423 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
424 t = qir_uniform_f(c, 0.5);
426 t = ntq_get_src(c, instr->src[i].src, 1);
427 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
428 r = ntq_get_src(c, instr->src[i].src, 2);
430 case nir_tex_src_bias:
431 lod = ntq_get_src(c, instr->src[i].src, 0);
434 case nir_tex_src_lod:
435 lod = ntq_get_src(c, instr->src[i].src, 0);
438 case nir_tex_src_comparator:
439 compare = ntq_get_src(c, instr->src[i].src, 0);
442 unreachable("unknown texture source");
446 if (c->stage != QSTAGE_FRAG && !is_txl) {
447 /* From the GLSL 1.20 spec:
449 * "If it is mip-mapped and running on the vertex shader,
450 * then the base texture is used."
453 lod = qir_uniform_ui(c, 0);
456 if (c->key->tex[unit].force_first_level) {
457 lod = qir_uniform(c, QUNIFORM_TEXTURE_FIRST_LEVEL, unit);
462 struct qreg texture_u[] = {
463 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
464 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
465 qir_uniform(c, QUNIFORM_CONSTANT, 0),
466 qir_uniform(c, QUNIFORM_CONSTANT, 0),
468 uint32_t next_texture_u = 0;
470 /* There is no native support for GL texture rectangle coordinates, so
471 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
474 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
476 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
478 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
481 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
482 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
483 unit | (is_txl << 16));
487 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
488 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0), r);
489 tmu->src[qir_get_tex_uniform_src(tmu)] =
490 texture_u[next_texture_u++];
491 } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
492 c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
493 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
494 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
495 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_R, 0),
496 qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR,
498 tmu->src[qir_get_tex_uniform_src(tmu)] =
499 texture_u[next_texture_u++];
502 if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
506 if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
510 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_T, 0), t);
511 tmu->src[qir_get_tex_uniform_src(tmu)] =
512 texture_u[next_texture_u++];
514 if (is_txl || is_txb) {
515 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_B, 0), lod);
516 tmu->src[qir_get_tex_uniform_src(tmu)] =
517 texture_u[next_texture_u++];
520 tmu = qir_MOV_dest(c, qir_reg(QFILE_TEX_S, 0), s);
521 tmu->src[qir_get_tex_uniform_src(tmu)] = texture_u[next_texture_u++];
523 c->num_texture_samples++;
527 struct qreg tex = qir_TEX_RESULT(c);
529 enum pipe_format format = c->key->tex[unit].format;
531 struct qreg *dest = ntq_get_dest(c, &instr->dest);
532 if (util_format_is_depth_or_stencil(format)) {
533 struct qreg normalized = ntq_scale_depth_texture(c, tex);
534 struct qreg depth_output;
536 struct qreg u0 = qir_uniform_f(c, 0.0f);
537 struct qreg u1 = qir_uniform_f(c, 1.0f);
538 if (c->key->tex[unit].compare_mode) {
539 /* From the GL_ARB_shadow spec:
541 * "Let Dt (D subscript t) be the depth texture
542 * value, in the range [0, 1]. Let R be the
543 * interpolated texture coordinate clamped to the
546 compare = qir_SAT(c, compare);
548 switch (c->key->tex[unit].compare_func) {
549 case PIPE_FUNC_NEVER:
550 depth_output = qir_uniform_f(c, 0.0f);
552 case PIPE_FUNC_ALWAYS:
555 case PIPE_FUNC_EQUAL:
556 qir_SF(c, qir_FSUB(c, compare, normalized));
557 depth_output = qir_SEL(c, QPU_COND_ZS, u1, u0);
559 case PIPE_FUNC_NOTEQUAL:
560 qir_SF(c, qir_FSUB(c, compare, normalized));
561 depth_output = qir_SEL(c, QPU_COND_ZC, u1, u0);
563 case PIPE_FUNC_GREATER:
564 qir_SF(c, qir_FSUB(c, compare, normalized));
565 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
567 case PIPE_FUNC_GEQUAL:
568 qir_SF(c, qir_FSUB(c, normalized, compare));
569 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
572 qir_SF(c, qir_FSUB(c, compare, normalized));
573 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
575 case PIPE_FUNC_LEQUAL:
576 qir_SF(c, qir_FSUB(c, normalized, compare));
577 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
581 depth_output = normalized;
584 for (int i = 0; i < 4; i++)
585 dest[i] = depth_output;
587 for (int i = 0; i < 4; i++)
588 dest[i] = qir_UNPACK_8_F(c, tex, i);
593 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
597 ntq_ffract(struct vc4_compile *c, struct qreg src)
599 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
600 struct qreg diff = qir_FSUB(c, src, trunc);
603 qir_FADD_dest(c, diff,
604 diff, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
606 return qir_MOV(c, diff);
610 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
614 ntq_ffloor(struct vc4_compile *c, struct qreg src)
616 struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
618 /* This will be < 0 if we truncated and the truncation was of a value
619 * that was < 0 in the first place.
621 qir_SF(c, qir_FSUB(c, src, result));
623 struct qinst *sub = qir_FSUB_dest(c, result,
624 result, qir_uniform_f(c, 1.0));
625 sub->cond = QPU_COND_NS;
627 return qir_MOV(c, result);
631 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
635 ntq_fceil(struct vc4_compile *c, struct qreg src)
637 struct qreg result = qir_ITOF(c, qir_FTOI(c, src));
639 /* This will be < 0 if we truncated and the truncation was of a value
640 * that was > 0 in the first place.
642 qir_SF(c, qir_FSUB(c, result, src));
644 qir_FADD_dest(c, result,
645 result, qir_uniform_f(c, 1.0))->cond = QPU_COND_NS;
647 return qir_MOV(c, result);
651 ntq_fsin(struct vc4_compile *c, struct qreg src)
655 pow(2.0 * M_PI, 3) / (3 * 2 * 1),
656 -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
657 pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
658 -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
661 struct qreg scaled_x =
664 qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
666 struct qreg x = qir_FADD(c,
667 ntq_ffract(c, scaled_x),
668 qir_uniform_f(c, -0.5));
669 struct qreg x2 = qir_FMUL(c, x, x);
670 struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
671 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
672 x = qir_FMUL(c, x, x2);
677 qir_uniform_f(c, coeff[i])));
683 ntq_fcos(struct vc4_compile *c, struct qreg src)
687 pow(2.0 * M_PI, 2) / (2 * 1),
688 -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
689 pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
690 -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
691 pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
694 struct qreg scaled_x =
696 qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
697 struct qreg x_frac = qir_FADD(c,
698 ntq_ffract(c, scaled_x),
699 qir_uniform_f(c, -0.5));
701 struct qreg sum = qir_uniform_f(c, coeff[0]);
702 struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
703 struct qreg x = x2; /* Current x^2, x^4, or x^6 */
704 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
706 x = qir_FMUL(c, x, x2);
708 struct qreg mul = qir_FMUL(c,
710 qir_uniform_f(c, coeff[i]));
714 sum = qir_FADD(c, sum, mul);
720 ntq_fsign(struct vc4_compile *c, struct qreg src)
722 struct qreg t = qir_get_temp(c);
725 qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
726 qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
727 qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
728 return qir_MOV(c, t);
732 emit_vertex_input(struct vc4_compile *c, int attr)
734 enum pipe_format format = c->vs_key->attr_formats[attr];
735 uint32_t attr_size = util_format_get_blocksize(format);
737 c->vattr_sizes[attr] = align(attr_size, 4);
738 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
739 c->inputs[attr * 4 + i] =
740 qir_MOV(c, qir_reg(QFILE_VPM, attr * 4 + i));
746 emit_fragcoord_input(struct vc4_compile *c, int attr)
748 c->inputs[attr * 4 + 0] = qir_ITOF(c, qir_reg(QFILE_FRAG_X, 0));
749 c->inputs[attr * 4 + 1] = qir_ITOF(c, qir_reg(QFILE_FRAG_Y, 0));
750 c->inputs[attr * 4 + 2] =
752 qir_ITOF(c, qir_FRAG_Z(c)),
753 qir_uniform_f(c, 1.0 / 0xffffff));
754 c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
758 emit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
761 uint32_t i = c->num_input_slots++;
767 if (c->num_input_slots >= c->input_slots_array_size) {
768 c->input_slots_array_size =
769 MAX2(4, c->input_slots_array_size * 2);
771 c->input_slots = reralloc(c, c->input_slots,
772 struct vc4_varying_slot,
773 c->input_slots_array_size);
776 c->input_slots[i].slot = slot;
777 c->input_slots[i].swizzle = swizzle;
779 return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
783 emit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
785 for (int i = 0; i < 4; i++) {
786 c->inputs[attr * 4 + i] =
787 emit_fragment_varying(c, slot, i);
793 add_output(struct vc4_compile *c,
794 uint32_t decl_offset,
798 uint32_t old_array_size = c->outputs_array_size;
799 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
802 if (old_array_size != c->outputs_array_size) {
803 c->output_slots = reralloc(c,
805 struct vc4_varying_slot,
806 c->outputs_array_size);
809 c->output_slots[decl_offset].slot = slot;
810 c->output_slots[decl_offset].swizzle = swizzle;
814 declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size)
816 unsigned array_id = c->num_uniform_ranges++;
817 if (array_id >= c->ubo_ranges_array_size) {
818 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
820 c->ubo_ranges = reralloc(c, c->ubo_ranges,
821 struct vc4_compiler_ubo_range,
822 c->ubo_ranges_array_size);
825 c->ubo_ranges[array_id].dst_offset = 0;
826 c->ubo_ranges[array_id].src_offset = start;
827 c->ubo_ranges[array_id].size = size;
828 c->ubo_ranges[array_id].used = false;
832 ntq_src_is_only_ssa_def_user(nir_src *src)
837 if (!list_empty(&src->ssa->if_uses))
840 return (src->ssa->uses.next == &src->use_link &&
841 src->ssa->uses.next->next == &src->ssa->uses);
845 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
848 * However, as an optimization, it tries to find the instructions generating
849 * the sources to be packed and just emit the pack flag there, if possible.
852 ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
854 struct qreg result = qir_get_temp(c);
855 struct nir_alu_instr *vec4 = NULL;
857 /* If packing from a vec4 op (as expected), identify it so that we can
858 * peek back at what generated its sources.
860 if (instr->src[0].src.is_ssa &&
861 instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
862 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
864 vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
867 /* If the pack is replicating the same channel 4 times, use the 8888
868 * pack flag. This is common for blending using the alpha
871 if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
872 instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
873 instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
874 struct qreg rep = ntq_get_src(c,
876 instr->src[0].swizzle[0]);
877 ntq_store_dest(c, &instr->dest.dest, 0, qir_PACK_8888_F(c, rep));
881 for (int i = 0; i < 4; i++) {
882 int swiz = instr->src[0].swizzle[i];
885 src = ntq_get_src(c, vec4->src[swiz].src,
886 vec4->src[swiz].swizzle[0]);
888 src = ntq_get_src(c, instr->src[0].src, swiz);
892 ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
893 src.file == QFILE_TEMP &&
894 c->defs[src.index] &&
895 qir_is_mul(c->defs[src.index]) &&
896 !c->defs[src.index]->dst.pack) {
897 struct qinst *rewrite = c->defs[src.index];
898 c->defs[src.index] = NULL;
899 rewrite->dst = result;
900 rewrite->dst.pack = QPU_PACK_MUL_8A + i;
904 qir_PACK_8_F(c, result, src, i);
907 ntq_store_dest(c, &instr->dest.dest, 0, qir_MOV(c, result));
910 /** Handles sign-extended bitfield extracts for 16 bits. */
912 ntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
915 assert(bits.file == QFILE_UNIF &&
916 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
917 c->uniform_data[bits.index] == 16);
919 assert(offset.file == QFILE_UNIF &&
920 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
921 int offset_bit = c->uniform_data[offset.index];
922 assert(offset_bit % 16 == 0);
924 return qir_UNPACK_16_I(c, base, offset_bit / 16);
927 /** Handles unsigned bitfield extracts for 8 bits. */
929 ntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
932 assert(bits.file == QFILE_UNIF &&
933 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
934 c->uniform_data[bits.index] == 8);
936 assert(offset.file == QFILE_UNIF &&
937 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
938 int offset_bit = c->uniform_data[offset.index];
939 assert(offset_bit % 8 == 0);
941 return qir_UNPACK_8_I(c, base, offset_bit / 8);
945 * If compare_instr is a valid comparison instruction, emits the
946 * compare_instr's comparison and returns the sel_instr's return value based
947 * on the compare_instr's result.
950 ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
951 nir_alu_instr *compare_instr,
952 nir_alu_instr *sel_instr)
956 switch (compare_instr->op) {
982 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
983 struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
985 unsigned unsized_type =
986 nir_alu_type_get_base_type(nir_op_infos[compare_instr->op].input_types[0]);
987 if (unsized_type == nir_type_float)
988 qir_SF(c, qir_FSUB(c, src0, src1));
990 qir_SF(c, qir_SUB(c, src0, src1));
992 switch (sel_instr->op) {
997 *dest = qir_SEL(c, cond,
998 qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
1002 *dest = qir_SEL(c, cond,
1003 ntq_get_alu_src(c, sel_instr, 1),
1004 ntq_get_alu_src(c, sel_instr, 2));
1008 *dest = qir_SEL(c, cond,
1009 qir_uniform_ui(c, ~0), qir_uniform_ui(c, 0));
1013 /* Make the temporary for nir_store_dest(). */
1014 *dest = qir_MOV(c, *dest);
1020 * Attempts to fold a comparison generating a boolean result into the
1021 * condition code for selecting between two values, instead of comparing the
1022 * boolean result against 0 to generate the condition code.
1024 static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
1027 if (!instr->src[0].src.is_ssa)
1029 if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
1031 nir_alu_instr *compare =
1032 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
1037 if (ntq_emit_comparison(c, &dest, compare, instr))
1042 return qir_MOV(c, qir_SEL(c, QPU_COND_NS, src[1], src[2]));
1046 ntq_fddx(struct vc4_compile *c, struct qreg src)
1048 /* Make sure that we have a bare temp to use for MUL rotation, so it
1049 * can be allocated to an accumulator.
1051 if (src.pack || src.file != QFILE_TEMP)
1052 src = qir_MOV(c, src);
1054 struct qreg from_left = qir_ROT_MUL(c, src, 1);
1055 struct qreg from_right = qir_ROT_MUL(c, src, 15);
1057 /* Distinguish left/right pixels of the quad. */
1058 qir_SF(c, qir_AND(c, qir_reg(QFILE_QPU_ELEMENT, 0),
1059 qir_uniform_ui(c, 1)));
1061 return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1062 qir_FSUB(c, from_right, src),
1063 qir_FSUB(c, src, from_left)));
1067 ntq_fddy(struct vc4_compile *c, struct qreg src)
1069 if (src.pack || src.file != QFILE_TEMP)
1070 src = qir_MOV(c, src);
1072 struct qreg from_bottom = qir_ROT_MUL(c, src, 2);
1073 struct qreg from_top = qir_ROT_MUL(c, src, 14);
1075 /* Distinguish top/bottom pixels of the quad. */
1076 qir_SF(c, qir_AND(c,
1077 qir_reg(QFILE_QPU_ELEMENT, 0),
1078 qir_uniform_ui(c, 2)));
1080 return qir_MOV(c, qir_SEL(c, QPU_COND_ZS,
1081 qir_FSUB(c, from_top, src),
1082 qir_FSUB(c, src, from_bottom)));
1086 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
1088 /* This should always be lowered to ALU operations for VC4. */
1089 assert(!instr->dest.saturate);
1091 /* Vectors are special in that they have non-scalarized writemasks,
1092 * and just take the first swizzle channel for each argument in order
1093 * into each writemask channel.
1095 if (instr->op == nir_op_vec2 ||
1096 instr->op == nir_op_vec3 ||
1097 instr->op == nir_op_vec4) {
1098 struct qreg srcs[4];
1099 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1100 srcs[i] = ntq_get_src(c, instr->src[i].src,
1101 instr->src[i].swizzle[0]);
1102 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
1103 ntq_store_dest(c, &instr->dest.dest, i,
1104 qir_MOV(c, srcs[i]));
1108 if (instr->op == nir_op_pack_unorm_4x8) {
1109 ntq_emit_pack_unorm_4x8(c, instr);
1113 if (instr->op == nir_op_unpack_unorm_4x8) {
1114 struct qreg src = ntq_get_src(c, instr->src[0].src,
1115 instr->src[0].swizzle[0]);
1116 for (int i = 0; i < 4; i++) {
1117 if (instr->dest.write_mask & (1 << i))
1118 ntq_store_dest(c, &instr->dest.dest, i,
1119 qir_UNPACK_8_F(c, src, i));
1124 /* General case: We can just grab the one used channel per src. */
1125 struct qreg src[nir_op_infos[instr->op].num_inputs];
1126 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
1127 src[i] = ntq_get_alu_src(c, instr, i);
1132 switch (instr->op) {
1135 result = qir_MOV(c, src[0]);
1138 result = qir_FMUL(c, src[0], src[1]);
1141 result = qir_FADD(c, src[0], src[1]);
1144 result = qir_FSUB(c, src[0], src[1]);
1147 result = qir_FMIN(c, src[0], src[1]);
1150 result = qir_FMAX(c, src[0], src[1]);
1155 result = qir_FTOI(c, src[0]);
1159 result = qir_ITOF(c, src[0]);
1162 result = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
1165 result = qir_AND(c, src[0], qir_uniform_ui(c, 1));
1170 result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC,
1171 qir_uniform_ui(c, ~0),
1172 qir_uniform_ui(c, 0)));
1176 result = qir_ADD(c, src[0], src[1]);
1179 result = qir_SHR(c, src[0], src[1]);
1182 result = qir_SUB(c, src[0], src[1]);
1185 result = qir_ASR(c, src[0], src[1]);
1188 result = qir_SHL(c, src[0], src[1]);
1191 result = qir_MIN(c, src[0], src[1]);
1194 result = qir_MAX(c, src[0], src[1]);
1197 result = qir_AND(c, src[0], src[1]);
1200 result = qir_OR(c, src[0], src[1]);
1203 result = qir_XOR(c, src[0], src[1]);
1206 result = qir_NOT(c, src[0]);
1210 result = ntq_umul(c, src[0], src[1]);
1226 if (!ntq_emit_comparison(c, &result, instr, instr)) {
1227 fprintf(stderr, "Bad comparison instruction\n");
1232 result = ntq_emit_bcsel(c, instr, src);
1236 result = qir_MOV(c, qir_SEL(c, QPU_COND_ZC, src[1], src[2]));
1240 result = ntq_rcp(c, src[0]);
1243 result = ntq_rsq(c, src[0]);
1246 result = qir_EXP2(c, src[0]);
1249 result = qir_LOG2(c, src[0]);
1253 result = qir_ITOF(c, qir_FTOI(c, src[0]));
1256 result = ntq_fceil(c, src[0]);
1259 result = ntq_ffract(c, src[0]);
1262 result = ntq_ffloor(c, src[0]);
1266 result = ntq_fsin(c, src[0]);
1269 result = ntq_fcos(c, src[0]);
1273 result = ntq_fsign(c, src[0]);
1277 result = qir_FMAXABS(c, src[0], src[0]);
1280 result = qir_MAX(c, src[0],
1281 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1284 case nir_op_ibitfield_extract:
1285 result = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1288 case nir_op_ubitfield_extract:
1289 result = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1292 case nir_op_usadd_4x8:
1293 result = qir_V8ADDS(c, src[0], src[1]);
1296 case nir_op_ussub_4x8:
1297 result = qir_V8SUBS(c, src[0], src[1]);
1300 case nir_op_umin_4x8:
1301 result = qir_V8MIN(c, src[0], src[1]);
1304 case nir_op_umax_4x8:
1305 result = qir_V8MAX(c, src[0], src[1]);
1308 case nir_op_umul_unorm_4x8:
1309 result = qir_V8MULD(c, src[0], src[1]);
1313 case nir_op_fddx_coarse:
1314 case nir_op_fddx_fine:
1315 result = ntq_fddx(c, src[0]);
1319 case nir_op_fddy_coarse:
1320 case nir_op_fddy_fine:
1321 result = ntq_fddy(c, src[0]);
1325 fprintf(stderr, "unknown NIR ALU inst: ");
1326 nir_print_instr(&instr->instr, stderr);
1327 fprintf(stderr, "\n");
1331 /* We have a scalar result, so the instruction should only have a
1332 * single channel written to.
1334 assert(util_is_power_of_two(instr->dest.write_mask));
1335 ntq_store_dest(c, &instr->dest.dest,
1336 ffs(instr->dest.write_mask) - 1, result);
1340 emit_frag_end(struct vc4_compile *c)
1343 if (c->output_color_index != -1) {
1344 color = c->outputs[c->output_color_index];
1346 color = qir_uniform_ui(c, 0);
1349 uint32_t discard_cond = QPU_COND_ALWAYS;
1350 if (c->s->info->fs.uses_discard) {
1351 qir_SF(c, c->discard);
1352 discard_cond = QPU_COND_ZS;
1355 if (c->fs_key->stencil_enabled) {
1356 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1357 qir_uniform(c, QUNIFORM_STENCIL, 0));
1358 if (c->fs_key->stencil_twoside) {
1359 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1360 qir_uniform(c, QUNIFORM_STENCIL, 1));
1362 if (c->fs_key->stencil_full_writemasks) {
1363 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1364 qir_uniform(c, QUNIFORM_STENCIL, 2));
1368 if (c->output_sample_mask_index != -1) {
1369 qir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1372 if (c->fs_key->depth_enabled) {
1373 if (c->output_position_index != -1) {
1374 qir_FTOI_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1376 c->outputs[c->output_position_index],
1377 qir_uniform_f(c, 0xffffff)))->cond = discard_cond;
1379 qir_MOV_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1380 qir_FRAG_Z(c))->cond = discard_cond;
1384 if (!c->msaa_per_sample_output) {
1385 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE, 0),
1386 color)->cond = discard_cond;
1388 for (int i = 0; i < VC4_MAX_SAMPLES; i++) {
1389 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE_MS, 0),
1390 c->sample_colors[i])->cond = discard_cond;
1396 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1398 struct qreg packed = qir_get_temp(c);
1400 for (int i = 0; i < 2; i++) {
1402 qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1404 struct qreg packed_chan = packed;
1405 packed_chan.pack = QPU_PACK_A_16A + i;
1407 qir_FTOI_dest(c, packed_chan,
1410 c->outputs[c->output_position_index + i],
1415 qir_VPM_WRITE(c, packed);
1419 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1421 struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1422 struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1424 qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1425 c->outputs[c->output_position_index + 2],
1432 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1434 qir_VPM_WRITE(c, rcp_w);
1438 emit_point_size_write(struct vc4_compile *c)
1440 struct qreg point_size;
1442 if (c->output_point_size_index != -1)
1443 point_size = c->outputs[c->output_point_size_index];
1445 point_size = qir_uniform_f(c, 1.0);
1447 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1450 point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
1452 qir_VPM_WRITE(c, point_size);
1456 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1458 * The simulator insists that there be at least one vertex attribute, so
1459 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1460 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1461 * to consume it here.
1464 emit_stub_vpm_read(struct vc4_compile *c)
1469 c->vattr_sizes[0] = 4;
1470 (void)qir_MOV(c, qir_reg(QFILE_VPM, 0));
1475 emit_vert_end(struct vc4_compile *c,
1476 struct vc4_varying_slot *fs_inputs,
1477 uint32_t num_fs_inputs)
1479 struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
1481 emit_stub_vpm_read(c);
1483 emit_scaled_viewport_write(c, rcp_w);
1484 emit_zs_write(c, rcp_w);
1485 emit_rcp_wc_write(c, rcp_w);
1486 if (c->vs_key->per_vertex_point_size)
1487 emit_point_size_write(c);
1489 for (int i = 0; i < num_fs_inputs; i++) {
1490 struct vc4_varying_slot *input = &fs_inputs[i];
1493 for (j = 0; j < c->num_outputs; j++) {
1494 struct vc4_varying_slot *output =
1495 &c->output_slots[j];
1497 if (input->slot == output->slot &&
1498 input->swizzle == output->swizzle) {
1499 qir_VPM_WRITE(c, c->outputs[j]);
1503 /* Emit padding if we didn't find a declared VS output for
1506 if (j == c->num_outputs)
1507 qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1512 emit_coord_end(struct vc4_compile *c)
1514 struct qreg rcp_w = ntq_rcp(c, c->outputs[c->output_position_index + 3]);
1516 emit_stub_vpm_read(c);
1518 for (int i = 0; i < 4; i++)
1519 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1521 emit_scaled_viewport_write(c, rcp_w);
1522 emit_zs_write(c, rcp_w);
1523 emit_rcp_wc_write(c, rcp_w);
1524 if (c->vs_key->per_vertex_point_size)
1525 emit_point_size_write(c);
1529 vc4_optimize_nir(struct nir_shader *s)
1536 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1537 NIR_PASS(progress, s, nir_lower_alu_to_scalar);
1538 NIR_PASS(progress, s, nir_lower_phis_to_scalar);
1539 NIR_PASS(progress, s, nir_copy_prop);
1540 NIR_PASS(progress, s, nir_opt_remove_phis);
1541 NIR_PASS(progress, s, nir_opt_dce);
1542 NIR_PASS(progress, s, nir_opt_dead_cf);
1543 NIR_PASS(progress, s, nir_opt_cse);
1544 NIR_PASS(progress, s, nir_opt_peephole_select, 8);
1545 NIR_PASS(progress, s, nir_opt_algebraic);
1546 NIR_PASS(progress, s, nir_opt_constant_folding);
1547 NIR_PASS(progress, s, nir_opt_undef);
1548 NIR_PASS(progress, s, nir_opt_loop_unroll,
1550 nir_var_shader_out |
1556 driver_location_compare(const void *in_a, const void *in_b)
1558 const nir_variable *const *a = in_a;
1559 const nir_variable *const *b = in_b;
1561 return (*a)->data.driver_location - (*b)->data.driver_location;
1565 ntq_setup_inputs(struct vc4_compile *c)
1567 unsigned num_entries = 0;
1568 nir_foreach_variable(var, &c->s->inputs)
1571 nir_variable *vars[num_entries];
1574 nir_foreach_variable(var, &c->s->inputs)
1577 /* Sort the variables so that we emit the input setup in
1578 * driver_location order. This is required for VPM reads, whose data
1579 * is fetched into the VPM in driver_location (TGSI register index)
1582 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1584 for (unsigned i = 0; i < num_entries; i++) {
1585 nir_variable *var = vars[i];
1586 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1587 unsigned loc = var->data.driver_location;
1589 assert(array_len == 1);
1591 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1594 if (c->stage == QSTAGE_FRAG) {
1595 if (var->data.location == VARYING_SLOT_POS) {
1596 emit_fragcoord_input(c, loc);
1597 } else if (var->data.location == VARYING_SLOT_PNTC ||
1598 (var->data.location >= VARYING_SLOT_VAR0 &&
1599 (c->fs_key->point_sprite_mask &
1600 (1 << (var->data.location -
1601 VARYING_SLOT_VAR0))))) {
1602 c->inputs[loc * 4 + 0] = c->point_x;
1603 c->inputs[loc * 4 + 1] = c->point_y;
1605 emit_fragment_input(c, loc, var->data.location);
1608 emit_vertex_input(c, loc);
1614 ntq_setup_outputs(struct vc4_compile *c)
1616 nir_foreach_variable(var, &c->s->outputs) {
1617 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1618 unsigned loc = var->data.driver_location * 4;
1620 assert(array_len == 1);
1623 for (int i = 0; i < 4; i++)
1624 add_output(c, loc + i, var->data.location, i);
1626 if (c->stage == QSTAGE_FRAG) {
1627 switch (var->data.location) {
1628 case FRAG_RESULT_COLOR:
1629 case FRAG_RESULT_DATA0:
1630 c->output_color_index = loc;
1632 case FRAG_RESULT_DEPTH:
1633 c->output_position_index = loc;
1635 case FRAG_RESULT_SAMPLE_MASK:
1636 c->output_sample_mask_index = loc;
1640 switch (var->data.location) {
1641 case VARYING_SLOT_POS:
1642 c->output_position_index = loc;
1644 case VARYING_SLOT_PSIZ:
1645 c->output_point_size_index = loc;
1653 ntq_setup_uniforms(struct vc4_compile *c)
1655 nir_foreach_variable(var, &c->s->uniforms) {
1656 uint32_t vec4_count = st_glsl_type_size(var->type);
1657 unsigned vec4_size = 4 * sizeof(float);
1659 declare_uniform_range(c, var->data.driver_location * vec4_size,
1660 vec4_count * vec4_size);
1666 * Sets up the mapping from nir_register to struct qreg *.
1668 * Each nir_register gets a struct qreg per 32-bit component being stored.
1671 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1673 foreach_list_typed(nir_register, nir_reg, node, list) {
1674 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1675 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1677 nir_reg->num_components);
1679 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1681 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1682 qregs[i] = qir_get_temp(c);
1687 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1689 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1690 for (int i = 0; i < instr->def.num_components; i++)
1691 qregs[i] = qir_uniform_ui(c, instr->value.u32[i]);
1693 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1697 ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1699 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1701 /* QIR needs there to be *some* value, so pick 0 (same as for
1702 * ntq_setup_registers().
1704 for (int i = 0; i < instr->def.num_components; i++)
1705 qregs[i] = qir_uniform_ui(c, 0);
1709 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1711 nir_const_value *const_offset;
1714 switch (instr->intrinsic) {
1715 case nir_intrinsic_load_uniform:
1716 assert(instr->num_components == 1);
1717 const_offset = nir_src_as_const_value(instr->src[0]);
1719 offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1720 assert(offset % 4 == 0);
1721 /* We need dwords */
1722 offset = offset / 4;
1723 ntq_store_dest(c, &instr->dest, 0,
1724 qir_uniform(c, QUNIFORM_UNIFORM,
1727 ntq_store_dest(c, &instr->dest, 0,
1728 indirect_uniform_load(c, instr));
1732 case nir_intrinsic_load_user_clip_plane:
1733 for (int i = 0; i < instr->num_components; i++) {
1734 ntq_store_dest(c, &instr->dest, i,
1735 qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1736 nir_intrinsic_ucp_id(instr) *
1741 case nir_intrinsic_load_blend_const_color_r_float:
1742 case nir_intrinsic_load_blend_const_color_g_float:
1743 case nir_intrinsic_load_blend_const_color_b_float:
1744 case nir_intrinsic_load_blend_const_color_a_float:
1745 ntq_store_dest(c, &instr->dest, 0,
1746 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_X +
1748 nir_intrinsic_load_blend_const_color_r_float),
1752 case nir_intrinsic_load_blend_const_color_rgba8888_unorm:
1753 ntq_store_dest(c, &instr->dest, 0,
1754 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_RGBA,
1758 case nir_intrinsic_load_blend_const_color_aaaa8888_unorm:
1759 ntq_store_dest(c, &instr->dest, 0,
1760 qir_uniform(c, QUNIFORM_BLEND_CONST_COLOR_AAAA,
1764 case nir_intrinsic_load_alpha_ref_float:
1765 ntq_store_dest(c, &instr->dest, 0,
1766 qir_uniform(c, QUNIFORM_ALPHA_REF, 0));
1769 case nir_intrinsic_load_sample_mask_in:
1770 ntq_store_dest(c, &instr->dest, 0,
1771 qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0));
1774 case nir_intrinsic_load_front_face:
1775 /* The register contains 0 (front) or 1 (back), and we need to
1776 * turn it into a NIR bool where true means front.
1778 ntq_store_dest(c, &instr->dest, 0,
1780 qir_uniform_ui(c, -1),
1781 qir_reg(QFILE_FRAG_REV_FLAG, 0)));
1784 case nir_intrinsic_load_input:
1785 assert(instr->num_components == 1);
1786 const_offset = nir_src_as_const_value(instr->src[0]);
1787 assert(const_offset && "vc4 doesn't support indirect inputs");
1788 if (c->stage == QSTAGE_FRAG &&
1789 nir_intrinsic_base(instr) >= VC4_NIR_TLB_COLOR_READ_INPUT) {
1790 assert(const_offset->u32[0] == 0);
1791 /* Reads of the per-sample color need to be done in
1794 int sample_index = (nir_intrinsic_base(instr) -
1795 VC4_NIR_TLB_COLOR_READ_INPUT);
1796 for (int i = 0; i <= sample_index; i++) {
1797 if (c->color_reads[i].file == QFILE_NULL) {
1799 qir_TLB_COLOR_READ(c);
1802 ntq_store_dest(c, &instr->dest, 0,
1803 qir_MOV(c, c->color_reads[sample_index]));
1805 offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1806 int comp = nir_intrinsic_component(instr);
1807 ntq_store_dest(c, &instr->dest, 0,
1808 qir_MOV(c, c->inputs[offset * 4 + comp]));
1812 case nir_intrinsic_store_output:
1813 const_offset = nir_src_as_const_value(instr->src[1]);
1814 assert(const_offset && "vc4 doesn't support indirect outputs");
1815 offset = nir_intrinsic_base(instr) + const_offset->u32[0];
1817 /* MSAA color outputs are the only case where we have an
1818 * output that's not lowered to being a store of a single 32
1821 if (c->stage == QSTAGE_FRAG && instr->num_components == 4) {
1822 assert(offset == c->output_color_index);
1823 for (int i = 0; i < 4; i++) {
1824 c->sample_colors[i] =
1825 qir_MOV(c, ntq_get_src(c, instr->src[0],
1829 offset = offset * 4 + nir_intrinsic_component(instr);
1830 assert(instr->num_components == 1);
1831 c->outputs[offset] =
1832 qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1833 c->num_outputs = MAX2(c->num_outputs, offset + 1);
1837 case nir_intrinsic_discard:
1838 if (c->execute.file != QFILE_NULL) {
1839 qir_SF(c, c->execute);
1840 qir_MOV_cond(c, QPU_COND_ZS, c->discard,
1841 qir_uniform_ui(c, ~0));
1843 qir_MOV_dest(c, c->discard, qir_uniform_ui(c, ~0));
1847 case nir_intrinsic_discard_if: {
1848 /* true (~0) if we're discarding */
1849 struct qreg cond = ntq_get_src(c, instr->src[0], 0);
1851 if (c->execute.file != QFILE_NULL) {
1852 /* execute == 0 means the channel is active. Invert
1853 * the condition so that we can use zero as "executing
1856 qir_SF(c, qir_AND(c, c->execute, qir_NOT(c, cond)));
1857 qir_MOV_cond(c, QPU_COND_ZS, c->discard, cond);
1859 qir_OR_dest(c, c->discard, c->discard,
1860 ntq_get_src(c, instr->src[0], 0));
1867 fprintf(stderr, "Unknown intrinsic: ");
1868 nir_print_instr(&instr->instr, stderr);
1869 fprintf(stderr, "\n");
1874 /* Clears (activates) the execute flags for any channels whose jump target
1875 * matches this block.
1878 ntq_activate_execute_for_block(struct vc4_compile *c)
1880 qir_SF(c, qir_SUB(c,
1882 qir_uniform_ui(c, c->cur_block->index)));
1883 qir_MOV_cond(c, QPU_COND_ZS, c->execute, qir_uniform_ui(c, 0));
1887 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1889 if (!c->vc4->screen->has_control_flow) {
1891 "IF statement support requires updated kernel.\n");
1895 nir_block *nir_else_block = nir_if_first_else_block(if_stmt);
1896 bool empty_else_block =
1897 (nir_else_block == nir_if_last_else_block(if_stmt) &&
1898 exec_list_is_empty(&nir_else_block->instr_list));
1900 struct qblock *then_block = qir_new_block(c);
1901 struct qblock *after_block = qir_new_block(c);
1902 struct qblock *else_block;
1903 if (empty_else_block)
1904 else_block = after_block;
1906 else_block = qir_new_block(c);
1908 bool was_top_level = false;
1909 if (c->execute.file == QFILE_NULL) {
1910 c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
1911 was_top_level = true;
1914 /* Set ZS for executing (execute == 0) and jumping (if->condition ==
1915 * 0) channels, and then update execute flags for those to point to
1920 ntq_get_src(c, if_stmt->condition, 0)));
1921 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1922 qir_uniform_ui(c, else_block->index));
1924 /* Jump to ELSE if nothing is active for THEN, otherwise fall
1927 qir_SF(c, c->execute);
1928 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZC);
1929 qir_link_blocks(c->cur_block, else_block);
1930 qir_link_blocks(c->cur_block, then_block);
1932 /* Process the THEN block. */
1933 qir_set_emit_block(c, then_block);
1934 ntq_emit_cf_list(c, &if_stmt->then_list);
1936 if (!empty_else_block) {
1937 /* Handle the end of the THEN block. First, all currently
1938 * active channels update their execute flags to point to
1941 qir_SF(c, c->execute);
1942 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1943 qir_uniform_ui(c, after_block->index));
1945 /* If everything points at ENDIF, then jump there immediately. */
1946 qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, after_block->index)));
1947 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
1948 qir_link_blocks(c->cur_block, after_block);
1949 qir_link_blocks(c->cur_block, else_block);
1951 qir_set_emit_block(c, else_block);
1952 ntq_activate_execute_for_block(c);
1953 ntq_emit_cf_list(c, &if_stmt->else_list);
1956 qir_link_blocks(c->cur_block, after_block);
1958 qir_set_emit_block(c, after_block);
1959 if (was_top_level) {
1960 c->execute = c->undef;
1961 c->last_top_block = c->cur_block;
1963 ntq_activate_execute_for_block(c);
1968 ntq_emit_jump(struct vc4_compile *c, nir_jump_instr *jump)
1970 struct qblock *jump_block;
1971 switch (jump->type) {
1972 case nir_jump_break:
1973 jump_block = c->loop_break_block;
1975 case nir_jump_continue:
1976 jump_block = c->loop_cont_block;
1979 unreachable("Unsupported jump type\n");
1982 qir_SF(c, c->execute);
1983 qir_MOV_cond(c, QPU_COND_ZS, c->execute,
1984 qir_uniform_ui(c, jump_block->index));
1986 /* Jump to the destination block if everyone has taken the jump. */
1987 qir_SF(c, qir_SUB(c, c->execute, qir_uniform_ui(c, jump_block->index)));
1988 qir_BRANCH(c, QPU_COND_BRANCH_ALL_ZS);
1989 struct qblock *new_block = qir_new_block(c);
1990 qir_link_blocks(c->cur_block, jump_block);
1991 qir_link_blocks(c->cur_block, new_block);
1992 qir_set_emit_block(c, new_block);
1996 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
1998 switch (instr->type) {
1999 case nir_instr_type_alu:
2000 ntq_emit_alu(c, nir_instr_as_alu(instr));
2003 case nir_instr_type_intrinsic:
2004 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
2007 case nir_instr_type_load_const:
2008 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
2011 case nir_instr_type_ssa_undef:
2012 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
2015 case nir_instr_type_tex:
2016 ntq_emit_tex(c, nir_instr_as_tex(instr));
2019 case nir_instr_type_jump:
2020 ntq_emit_jump(c, nir_instr_as_jump(instr));
2024 fprintf(stderr, "Unknown NIR instr type: ");
2025 nir_print_instr(instr, stderr);
2026 fprintf(stderr, "\n");
2032 ntq_emit_block(struct vc4_compile *c, nir_block *block)
2034 nir_foreach_instr(instr, block) {
2035 ntq_emit_instr(c, instr);
2039 static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
2042 ntq_emit_loop(struct vc4_compile *c, nir_loop *loop)
2044 if (!c->vc4->screen->has_control_flow) {
2046 "loop support requires updated kernel.\n");
2047 ntq_emit_cf_list(c, &loop->body);
2051 bool was_top_level = false;
2052 if (c->execute.file == QFILE_NULL) {
2053 c->execute = qir_MOV(c, qir_uniform_ui(c, 0));
2054 was_top_level = true;
2057 struct qblock *save_loop_cont_block = c->loop_cont_block;
2058 struct qblock *save_loop_break_block = c->loop_break_block;
2060 c->loop_cont_block = qir_new_block(c);
2061 c->loop_break_block = qir_new_block(c);
2063 qir_link_blocks(c->cur_block, c->loop_cont_block);
2064 qir_set_emit_block(c, c->loop_cont_block);
2065 ntq_activate_execute_for_block(c);
2067 ntq_emit_cf_list(c, &loop->body);
2069 /* If anything had explicitly continued, or is here at the end of the
2070 * loop, then we need to loop again. SF updates are masked by the
2071 * instruction's condition, so we can do the OR of the two conditions
2074 qir_SF(c, c->execute);
2075 struct qinst *cont_check =
2079 qir_uniform_ui(c, c->loop_cont_block->index));
2080 cont_check->cond = QPU_COND_ZC;
2081 cont_check->sf = true;
2083 qir_BRANCH(c, QPU_COND_BRANCH_ANY_ZS);
2084 qir_link_blocks(c->cur_block, c->loop_cont_block);
2085 qir_link_blocks(c->cur_block, c->loop_break_block);
2087 qir_set_emit_block(c, c->loop_break_block);
2088 if (was_top_level) {
2089 c->execute = c->undef;
2090 c->last_top_block = c->cur_block;
2092 ntq_activate_execute_for_block(c);
2095 c->loop_break_block = save_loop_break_block;
2096 c->loop_cont_block = save_loop_cont_block;
2100 ntq_emit_function(struct vc4_compile *c, nir_function_impl *func)
2102 fprintf(stderr, "FUNCTIONS not handled.\n");
2107 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
2109 foreach_list_typed(nir_cf_node, node, node, list) {
2110 switch (node->type) {
2111 case nir_cf_node_block:
2112 ntq_emit_block(c, nir_cf_node_as_block(node));
2115 case nir_cf_node_if:
2116 ntq_emit_if(c, nir_cf_node_as_if(node));
2119 case nir_cf_node_loop:
2120 ntq_emit_loop(c, nir_cf_node_as_loop(node));
2123 case nir_cf_node_function:
2124 ntq_emit_function(c, nir_cf_node_as_function(node));
2128 fprintf(stderr, "Unknown NIR node type\n");
2135 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
2137 ntq_setup_registers(c, &impl->registers);
2138 ntq_emit_cf_list(c, &impl->body);
2142 nir_to_qir(struct vc4_compile *c)
2144 if (c->stage == QSTAGE_FRAG && c->s->info->fs.uses_discard)
2145 c->discard = qir_MOV(c, qir_uniform_ui(c, 0));
2147 ntq_setup_inputs(c);
2148 ntq_setup_outputs(c);
2149 ntq_setup_uniforms(c);
2150 ntq_setup_registers(c, &c->s->registers);
2152 /* Find the main function and emit the body. */
2153 nir_foreach_function(function, c->s) {
2154 assert(strcmp(function->name, "main") == 0);
2155 assert(function->impl);
2156 ntq_emit_impl(c, function->impl);
2160 static const nir_shader_compiler_options nir_options = {
2161 .lower_extract_byte = true,
2162 .lower_extract_word = true,
2164 .lower_flrp32 = true,
2167 .lower_fsqrt = true,
2168 .lower_negate = true,
2169 .native_integers = true,
2170 .max_unroll_iterations = 32,
2174 vc4_screen_get_compiler_options(struct pipe_screen *pscreen,
2175 enum pipe_shader_ir ir, unsigned shader)
2177 return &nir_options;
2181 count_nir_instrs(nir_shader *nir)
2184 nir_foreach_function(function, nir) {
2185 if (!function->impl)
2187 nir_foreach_block(block, function->impl) {
2188 nir_foreach_instr(instr, block)
2195 static struct vc4_compile *
2196 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
2197 struct vc4_key *key, bool fs_threaded)
2199 struct vc4_compile *c = qir_compile_init();
2203 c->shader_state = &key->shader_state->base;
2204 c->program_id = key->shader_state->program_id;
2206 p_atomic_inc_return(&key->shader_state->compiled_variant_count);
2207 c->fs_threaded = fs_threaded;
2212 c->fs_key = (struct vc4_fs_key *)key;
2213 if (c->fs_key->is_points) {
2214 c->point_x = emit_fragment_varying(c, ~0, 0);
2215 c->point_y = emit_fragment_varying(c, ~0, 0);
2216 } else if (c->fs_key->is_lines) {
2217 c->line_x = emit_fragment_varying(c, ~0, 0);
2221 c->vs_key = (struct vc4_vs_key *)key;
2224 c->vs_key = (struct vc4_vs_key *)key;
2228 c->s = nir_shader_clone(c, key->shader_state->base.ir.nir);
2230 if (stage == QSTAGE_FRAG)
2231 NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
2233 struct nir_lower_tex_options tex_options = {
2234 /* We would need to implement txs, but we don't want the
2235 * int/float conversions
2237 .lower_rect = false,
2241 /* Apply swizzles to all samplers. */
2242 .swizzle_result = ~0,
2245 /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
2246 * The format swizzling applies before sRGB decode, and
2247 * ARB_texture_swizzle is the last thing before returning the sample.
2249 for (int i = 0; i < ARRAY_SIZE(key->tex); i++) {
2250 enum pipe_format format = c->key->tex[i].format;
2255 const uint8_t *format_swizzle = vc4_get_format_swizzle(format);
2257 for (int j = 0; j < 4; j++) {
2258 uint8_t arb_swiz = c->key->tex[i].swizzle[j];
2260 if (arb_swiz <= 3) {
2261 tex_options.swizzles[i][j] =
2262 format_swizzle[arb_swiz];
2264 tex_options.swizzles[i][j] = arb_swiz;
2268 if (util_format_is_srgb(format))
2269 tex_options.lower_srgb |= (1 << i);
2272 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
2274 if (c->fs_key && c->fs_key->light_twoside)
2275 NIR_PASS_V(c->s, nir_lower_two_sided_color);
2277 if (c->vs_key && c->vs_key->clamp_color)
2278 NIR_PASS_V(c->s, nir_lower_clamp_color_outputs);
2280 if (c->key->ucp_enables) {
2281 if (stage == QSTAGE_FRAG) {
2282 NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
2284 NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables);
2285 NIR_PASS_V(c->s, nir_lower_io_to_scalar,
2286 nir_var_shader_out);
2290 /* FS input scalarizing must happen after nir_lower_two_sided_color,
2291 * which only handles a vec4 at a time. Similarly, VS output
2292 * scalarizing must happen after nir_lower_clip_vs.
2294 if (c->stage == QSTAGE_FRAG)
2295 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_in);
2297 NIR_PASS_V(c->s, nir_lower_io_to_scalar, nir_var_shader_out);
2299 NIR_PASS_V(c->s, vc4_nir_lower_io, c);
2300 NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
2301 NIR_PASS_V(c->s, nir_lower_idiv);
2303 vc4_optimize_nir(c->s);
2305 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
2307 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2308 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
2309 qir_get_stage_name(c->stage),
2310 c->program_id, c->variant_id,
2311 count_nir_instrs(c->s));
2314 if (vc4_debug & VC4_DEBUG_NIR) {
2315 fprintf(stderr, "%s prog %d/%d NIR:\n",
2316 qir_get_stage_name(c->stage),
2317 c->program_id, c->variant_id);
2318 nir_print_shader(c->s, stderr);
2325 /* FS threading requires that the thread execute
2326 * QPU_SIG_LAST_THREAD_SWITCH exactly once before terminating
2327 * (with no other THRSW afterwards, obviously). If we didn't
2328 * fetch a texture at a top level block, this wouldn't be
2331 if (c->fs_threaded && !c->last_thrsw_at_top_level) {
2340 c->vs_key->fs_inputs->input_slots,
2341 c->vs_key->fs_inputs->num_inputs);
2348 if (vc4_debug & VC4_DEBUG_QIR) {
2349 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
2350 qir_get_stage_name(c->stage),
2351 c->program_id, c->variant_id);
2353 fprintf(stderr, "\n");
2357 qir_lower_uniforms(c);
2359 qir_schedule_instructions(c);
2360 qir_emit_uniform_stream_resets(c);
2362 if (vc4_debug & VC4_DEBUG_QIR) {
2363 fprintf(stderr, "%s prog %d/%d QIR:\n",
2364 qir_get_stage_name(c->stage),
2365 c->program_id, c->variant_id);
2367 fprintf(stderr, "\n");
2370 qir_reorder_uniforms(c);
2371 vc4_generate_code(vc4, c);
2373 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2374 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
2375 qir_get_stage_name(c->stage),
2376 c->program_id, c->variant_id,
2378 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
2379 qir_get_stage_name(c->stage),
2380 c->program_id, c->variant_id,
2390 vc4_shader_state_create(struct pipe_context *pctx,
2391 const struct pipe_shader_state *cso)
2393 struct vc4_context *vc4 = vc4_context(pctx);
2394 struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
2398 so->program_id = vc4->next_uncompiled_program_id++;
2402 if (cso->type == PIPE_SHADER_IR_NIR) {
2403 /* The backend takes ownership of the NIR shader on state
2408 assert(cso->type == PIPE_SHADER_IR_TGSI);
2410 if (vc4_debug & VC4_DEBUG_TGSI) {
2411 fprintf(stderr, "prog %d TGSI:\n",
2413 tgsi_dump(cso->tokens, 0);
2414 fprintf(stderr, "\n");
2416 s = tgsi_to_nir(cso->tokens, &nir_options);
2419 NIR_PASS_V(s, nir_opt_global_to_local);
2420 NIR_PASS_V(s, nir_lower_regs_to_ssa);
2421 NIR_PASS_V(s, nir_normalize_cubemap_coords);
2423 NIR_PASS_V(s, nir_lower_load_const_to_scalar);
2425 vc4_optimize_nir(s);
2427 NIR_PASS_V(s, nir_remove_dead_variables, nir_var_local);
2429 /* Garbage collect dead instructions */
2432 so->base.type = PIPE_SHADER_IR_NIR;
2433 so->base.ir.nir = s;
2435 if (vc4_debug & VC4_DEBUG_NIR) {
2436 fprintf(stderr, "%s prog %d NIR:\n",
2437 gl_shader_stage_name(s->stage),
2439 nir_print_shader(s, stderr);
2440 fprintf(stderr, "\n");
2447 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
2448 struct vc4_compile *c)
2450 int count = c->num_uniforms;
2451 struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
2453 uinfo->count = count;
2454 uinfo->data = ralloc_array(shader, uint32_t, count);
2455 memcpy(uinfo->data, c->uniform_data,
2456 count * sizeof(*uinfo->data));
2457 uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
2458 memcpy(uinfo->contents, c->uniform_contents,
2459 count * sizeof(*uinfo->contents));
2460 uinfo->num_texture_samples = c->num_texture_samples;
2462 vc4_set_shader_uniform_dirty_flags(shader);
2466 vc4_setup_compiled_fs_inputs(struct vc4_context *vc4, struct vc4_compile *c,
2467 struct vc4_compiled_shader *shader)
2469 struct vc4_fs_inputs inputs;
2471 memset(&inputs, 0, sizeof(inputs));
2472 inputs.input_slots = ralloc_array(shader,
2473 struct vc4_varying_slot,
2474 c->num_input_slots);
2476 bool input_live[c->num_input_slots];
2478 memset(input_live, 0, sizeof(input_live));
2479 qir_for_each_inst_inorder(inst, c) {
2480 for (int i = 0; i < qir_get_nsrc(inst); i++) {
2481 if (inst->src[i].file == QFILE_VARY)
2482 input_live[inst->src[i].index] = true;
2486 for (int i = 0; i < c->num_input_slots; i++) {
2487 struct vc4_varying_slot *slot = &c->input_slots[i];
2492 /* Skip non-VS-output inputs. */
2493 if (slot->slot == (uint8_t)~0)
2496 if (slot->slot == VARYING_SLOT_COL0 ||
2497 slot->slot == VARYING_SLOT_COL1 ||
2498 slot->slot == VARYING_SLOT_BFC0 ||
2499 slot->slot == VARYING_SLOT_BFC1) {
2500 shader->color_inputs |= (1 << inputs.num_inputs);
2503 inputs.input_slots[inputs.num_inputs] = *slot;
2504 inputs.num_inputs++;
2506 shader->num_inputs = inputs.num_inputs;
2508 /* Add our set of inputs to the set of all inputs seen. This way, we
2509 * can have a single pointer that identifies an FS inputs set,
2510 * allowing VS to avoid recompiling when the FS is recompiled (or a
2511 * new one is bound using separate shader objects) but the inputs
2514 struct set_entry *entry = _mesa_set_search(vc4->fs_inputs_set, &inputs);
2516 shader->fs_inputs = entry->key;
2517 ralloc_free(inputs.input_slots);
2519 struct vc4_fs_inputs *alloc_inputs;
2521 alloc_inputs = rzalloc(vc4->fs_inputs_set, struct vc4_fs_inputs);
2522 memcpy(alloc_inputs, &inputs, sizeof(inputs));
2523 ralloc_steal(alloc_inputs, inputs.input_slots);
2524 _mesa_set_add(vc4->fs_inputs_set, alloc_inputs);
2526 shader->fs_inputs = alloc_inputs;
2530 static struct vc4_compiled_shader *
2531 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
2532 struct vc4_key *key)
2534 struct hash_table *ht;
2538 if (stage == QSTAGE_FRAG) {
2540 key_size = sizeof(struct vc4_fs_key);
2541 try_threading = vc4->screen->has_threaded_fs;
2544 key_size = sizeof(struct vc4_vs_key);
2545 try_threading = false;
2548 struct vc4_compiled_shader *shader;
2549 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
2553 struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key, try_threading);
2554 /* If the FS failed to compile threaded, fall back to single threaded. */
2555 if (try_threading && c->failed) {
2556 qir_compile_destroy(c);
2557 c = vc4_shader_ntq(vc4, stage, key, false);
2560 shader = rzalloc(NULL, struct vc4_compiled_shader);
2562 shader->program_id = vc4->next_compiled_program_id++;
2563 if (stage == QSTAGE_FRAG) {
2564 vc4_setup_compiled_fs_inputs(vc4, c, shader);
2566 /* Note: the temporary clone in c->s has been freed. */
2567 nir_shader *orig_shader = key->shader_state->base.ir.nir;
2568 if (orig_shader->info->outputs_written & (1 << FRAG_RESULT_DEPTH))
2569 shader->disable_early_z = true;
2571 shader->num_inputs = c->num_inputs;
2573 shader->vattr_offsets[0] = 0;
2574 for (int i = 0; i < 8; i++) {
2575 shader->vattr_offsets[i + 1] =
2576 shader->vattr_offsets[i] + c->vattr_sizes[i];
2578 if (c->vattr_sizes[i])
2579 shader->vattrs_live |= (1 << i);
2583 shader->failed = c->failed;
2585 shader->failed = true;
2587 copy_uniform_state_to_shader(shader, c);
2588 shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
2593 shader->fs_threaded = c->fs_threaded;
2595 /* Copy the compiler UBO range state to the compiled shader, dropping
2596 * out arrays that were never referenced by an indirect load.
2598 * (Note that QIR dead code elimination of an array access still
2599 * leaves that array alive, though)
2601 if (c->num_ubo_ranges) {
2602 shader->num_ubo_ranges = c->num_ubo_ranges;
2603 shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range,
2606 for (int i = 0; i < c->num_uniform_ranges; i++) {
2607 struct vc4_compiler_ubo_range *range =
2612 shader->ubo_ranges[j].dst_offset = range->dst_offset;
2613 shader->ubo_ranges[j].src_offset = range->src_offset;
2614 shader->ubo_ranges[j].size = range->size;
2615 shader->ubo_size += c->ubo_ranges[i].size;
2619 if (shader->ubo_size) {
2620 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2621 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2622 qir_get_stage_name(c->stage),
2623 c->program_id, c->variant_id,
2624 shader->ubo_size / 4);
2628 qir_compile_destroy(c);
2630 struct vc4_key *dup_key;
2631 dup_key = rzalloc_size(shader, key_size); /* TODO: don't use rzalloc */
2632 memcpy(dup_key, key, key_size);
2633 _mesa_hash_table_insert(ht, dup_key, shader);
2639 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2640 struct vc4_texture_stateobj *texstate)
2642 for (int i = 0; i < texstate->num_textures; i++) {
2643 struct pipe_sampler_view *sampler = texstate->textures[i];
2644 struct vc4_sampler_view *vc4_sampler = vc4_sampler_view(sampler);
2645 struct pipe_sampler_state *sampler_state =
2646 texstate->samplers[i];
2651 key->tex[i].format = sampler->format;
2652 key->tex[i].swizzle[0] = sampler->swizzle_r;
2653 key->tex[i].swizzle[1] = sampler->swizzle_g;
2654 key->tex[i].swizzle[2] = sampler->swizzle_b;
2655 key->tex[i].swizzle[3] = sampler->swizzle_a;
2657 if (sampler->texture->nr_samples > 1) {
2658 key->tex[i].msaa_width = sampler->texture->width0;
2659 key->tex[i].msaa_height = sampler->texture->height0;
2660 } else if (sampler){
2661 key->tex[i].compare_mode = sampler_state->compare_mode;
2662 key->tex[i].compare_func = sampler_state->compare_func;
2663 key->tex[i].wrap_s = sampler_state->wrap_s;
2664 key->tex[i].wrap_t = sampler_state->wrap_t;
2665 key->tex[i].force_first_level =
2666 vc4_sampler->force_first_level;
2670 key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2674 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2676 struct vc4_job *job = vc4->job;
2677 struct vc4_fs_key local_key;
2678 struct vc4_fs_key *key = &local_key;
2680 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2682 VC4_DIRTY_FRAMEBUFFER |
2684 VC4_DIRTY_RASTERIZER |
2685 VC4_DIRTY_SAMPLE_MASK |
2687 VC4_DIRTY_UNCOMPILED_FS))) {
2691 memset(key, 0, sizeof(*key));
2692 vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2693 key->base.shader_state = vc4->prog.bind_fs;
2694 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2695 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2696 prim_mode <= PIPE_PRIM_LINE_STRIP);
2697 key->blend = vc4->blend->rt[0];
2698 if (vc4->blend->logicop_enable) {
2699 key->logicop_func = vc4->blend->logicop_func;
2701 key->logicop_func = PIPE_LOGICOP_COPY;
2704 key->msaa = vc4->rasterizer->base.multisample;
2705 key->sample_coverage = (vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
2706 key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
2707 key->sample_alpha_to_one = vc4->blend->alpha_to_one;
2710 if (vc4->framebuffer.cbufs[0])
2711 key->color_format = vc4->framebuffer.cbufs[0]->format;
2713 key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2714 key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2715 key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2716 key->depth_enabled = (vc4->zsa->base.depth.enabled ||
2717 key->stencil_enabled);
2718 if (vc4->zsa->base.alpha.enabled) {
2719 key->alpha_test = true;
2720 key->alpha_test_func = vc4->zsa->base.alpha.func;
2723 if (key->is_points) {
2724 key->point_sprite_mask =
2725 vc4->rasterizer->base.sprite_coord_enable;
2726 key->point_coord_upper_left =
2727 (vc4->rasterizer->base.sprite_coord_mode ==
2728 PIPE_SPRITE_COORD_UPPER_LEFT);
2731 key->light_twoside = vc4->rasterizer->base.light_twoside;
2733 struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2734 vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2735 if (vc4->prog.fs == old_fs)
2738 vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2740 if (vc4->rasterizer->base.flatshade &&
2741 old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
2742 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2745 if (old_fs && vc4->prog.fs->fs_inputs != old_fs->fs_inputs)
2746 vc4->dirty |= VC4_DIRTY_FS_INPUTS;
2750 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2752 struct vc4_vs_key local_key;
2753 struct vc4_vs_key *key = &local_key;
2755 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2756 VC4_DIRTY_RASTERIZER |
2758 VC4_DIRTY_VTXSTATE |
2759 VC4_DIRTY_UNCOMPILED_VS |
2760 VC4_DIRTY_FS_INPUTS))) {
2764 memset(key, 0, sizeof(*key));
2765 vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2766 key->base.shader_state = vc4->prog.bind_vs;
2767 key->fs_inputs = vc4->prog.fs->fs_inputs;
2768 key->clamp_color = vc4->rasterizer->base.clamp_vertex_color;
2770 for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2771 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2773 key->per_vertex_point_size =
2774 (prim_mode == PIPE_PRIM_POINTS &&
2775 vc4->rasterizer->base.point_size_per_vertex);
2777 struct vc4_compiled_shader *vs =
2778 vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2779 if (vs != vc4->prog.vs) {
2781 vc4->dirty |= VC4_DIRTY_COMPILED_VS;
2784 key->is_coord = true;
2785 /* Coord shaders don't care what the FS inputs are. */
2786 key->fs_inputs = NULL;
2787 struct vc4_compiled_shader *cs =
2788 vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2789 if (cs != vc4->prog.cs) {
2791 vc4->dirty |= VC4_DIRTY_COMPILED_CS;
2796 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2798 vc4_update_compiled_fs(vc4, prim_mode);
2799 vc4_update_compiled_vs(vc4, prim_mode);
2801 return !(vc4->prog.cs->failed ||
2802 vc4->prog.vs->failed ||
2803 vc4->prog.fs->failed);
2807 fs_cache_hash(const void *key)
2809 return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2813 vs_cache_hash(const void *key)
2815 return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2819 fs_cache_compare(const void *key1, const void *key2)
2821 return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2825 vs_cache_compare(const void *key1, const void *key2)
2827 return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2831 fs_inputs_hash(const void *key)
2833 const struct vc4_fs_inputs *inputs = key;
2835 return _mesa_hash_data(inputs->input_slots,
2836 sizeof(*inputs->input_slots) *
2837 inputs->num_inputs);
2841 fs_inputs_compare(const void *key1, const void *key2)
2843 const struct vc4_fs_inputs *inputs1 = key1;
2844 const struct vc4_fs_inputs *inputs2 = key2;
2846 return (inputs1->num_inputs == inputs2->num_inputs &&
2847 memcmp(inputs1->input_slots,
2848 inputs2->input_slots,
2849 sizeof(*inputs1->input_slots) *
2850 inputs1->num_inputs) == 0);
2854 delete_from_cache_if_matches(struct hash_table *ht,
2855 struct hash_entry *entry,
2856 struct vc4_uncompiled_shader *so)
2858 const struct vc4_key *key = entry->key;
2860 if (key->shader_state == so) {
2861 struct vc4_compiled_shader *shader = entry->data;
2862 _mesa_hash_table_remove(ht, entry);
2863 vc4_bo_unreference(&shader->bo);
2864 ralloc_free(shader);
2869 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2871 struct vc4_context *vc4 = vc4_context(pctx);
2872 struct vc4_uncompiled_shader *so = hwcso;
2874 struct hash_entry *entry;
2875 hash_table_foreach(vc4->fs_cache, entry)
2876 delete_from_cache_if_matches(vc4->fs_cache, entry, so);
2877 hash_table_foreach(vc4->vs_cache, entry)
2878 delete_from_cache_if_matches(vc4->vs_cache, entry, so);
2880 ralloc_free(so->base.ir.nir);
2885 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2887 struct vc4_context *vc4 = vc4_context(pctx);
2888 vc4->prog.bind_fs = hwcso;
2889 vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2893 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2895 struct vc4_context *vc4 = vc4_context(pctx);
2896 vc4->prog.bind_vs = hwcso;
2897 vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2901 vc4_program_init(struct pipe_context *pctx)
2903 struct vc4_context *vc4 = vc4_context(pctx);
2905 pctx->create_vs_state = vc4_shader_state_create;
2906 pctx->delete_vs_state = vc4_shader_state_delete;
2908 pctx->create_fs_state = vc4_shader_state_create;
2909 pctx->delete_fs_state = vc4_shader_state_delete;
2911 pctx->bind_fs_state = vc4_fp_state_bind;
2912 pctx->bind_vs_state = vc4_vp_state_bind;
2914 vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2916 vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2918 vc4->fs_inputs_set = _mesa_set_create(pctx, fs_inputs_hash,
2923 vc4_program_fini(struct pipe_context *pctx)
2925 struct vc4_context *vc4 = vc4_context(pctx);
2927 struct hash_entry *entry;
2928 hash_table_foreach(vc4->fs_cache, entry) {
2929 struct vc4_compiled_shader *shader = entry->data;
2930 vc4_bo_unreference(&shader->bo);
2931 ralloc_free(shader);
2932 _mesa_hash_table_remove(vc4->fs_cache, entry);
2935 hash_table_foreach(vc4->vs_cache, entry) {
2936 struct vc4_compiled_shader *shader = entry->data;
2937 vc4_bo_unreference(&shader->bo);
2938 ralloc_free(shader);
2939 _mesa_hash_table_remove(vc4->vs_cache, entry);