2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_format.h"
27 #include "util/u_hash.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_lowering.h"
34 #include "tgsi/tgsi_parse.h"
35 #include "glsl/nir/nir.h"
36 #include "glsl/nir/nir_builder.h"
37 #include "nir/tgsi_to_nir.h"
38 #include "vc4_context.h"
41 #ifdef USE_VC4_SIMULATOR
42 #include "simpenrose/simpenrose.h"
46 ntq_get_src(struct vc4_compile *c, nir_src src, int i);
49 resize_qreg_array(struct vc4_compile *c,
54 if (*size >= decl_size)
57 uint32_t old_size = *size;
58 *size = MAX2(*size * 2, decl_size);
59 *regs = reralloc(c, *regs, struct qreg, *size);
61 fprintf(stderr, "Malloc failure\n");
65 for (uint32_t i = old_size; i < *size; i++)
66 (*regs)[i] = c->undef;
70 indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
72 struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
73 uint32_t offset = intr->const_index[0];
74 struct vc4_compiler_ubo_range *range = NULL;
76 for (i = 0; i < c->num_uniform_ranges; i++) {
77 range = &c->ubo_ranges[i];
78 if (offset >= range->src_offset &&
79 offset < range->src_offset + range->size) {
83 /* The driver-location-based offset always has to be within a declared
89 range->dst_offset = c->next_ubo_dst_offset;
90 c->next_ubo_dst_offset += range->size;
94 offset -= range->src_offset;
96 /* Adjust for where we stored the TGSI register base. */
97 indirect_offset = qir_ADD(c, indirect_offset,
98 qir_uniform_ui(c, (range->dst_offset +
101 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
102 indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
103 indirect_offset = qir_MIN(c, indirect_offset,
104 qir_uniform_ui(c, (range->dst_offset +
107 qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
108 c->num_texture_samples++;
109 return qir_TEX_RESULT(c);
112 nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b,
113 enum quniform_contents contents)
115 nir_intrinsic_instr *intr =
116 nir_intrinsic_instr_create(b->shader,
117 nir_intrinsic_load_uniform);
118 intr->const_index[0] = VC4_NIR_STATE_UNIFORM_OFFSET + contents;
119 intr->num_components = 1;
120 nir_ssa_dest_init(&intr->instr, &intr->dest, 1, NULL);
121 nir_builder_instr_insert(b, &intr->instr);
122 return &intr->dest.ssa;
126 vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
130 case UTIL_FORMAT_SWIZZLE_NONE:
131 fprintf(stderr, "warning: unknown swizzle\n");
133 case UTIL_FORMAT_SWIZZLE_0:
134 return nir_imm_float(b, 0.0);
135 case UTIL_FORMAT_SWIZZLE_1:
136 return nir_imm_float(b, 1.0);
137 case UTIL_FORMAT_SWIZZLE_X:
138 case UTIL_FORMAT_SWIZZLE_Y:
139 case UTIL_FORMAT_SWIZZLE_Z:
140 case UTIL_FORMAT_SWIZZLE_W:
146 ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
148 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
149 def->num_components);
150 _mesa_hash_table_insert(c->def_ht, def, qregs);
155 ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
158 struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
159 for (int i = 0; i < dest->ssa.num_components; i++)
163 nir_register *reg = dest->reg.reg;
164 assert(dest->reg.base_offset == 0);
165 assert(reg->num_array_elems == 0);
166 struct hash_entry *entry =
167 _mesa_hash_table_search(c->def_ht, reg);
173 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
175 struct hash_entry *entry;
177 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
178 assert(i < src.ssa->num_components);
180 nir_register *reg = src.reg.reg;
181 entry = _mesa_hash_table_search(c->def_ht, reg);
182 assert(reg->num_array_elems == 0);
183 assert(src.reg.base_offset == 0);
184 assert(i < reg->num_components);
187 struct qreg *qregs = entry->data;
192 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
195 assert(util_is_power_of_two(instr->dest.write_mask));
196 unsigned chan = ffs(instr->dest.write_mask) - 1;
197 struct qreg r = ntq_get_src(c, instr->src[src].src,
198 instr->src[src].swizzle[chan]);
200 assert(!instr->src[src].abs);
201 assert(!instr->src[src].negate);
207 get_swizzled_channel(struct vc4_compile *c,
208 struct qreg *srcs, int swiz)
212 case UTIL_FORMAT_SWIZZLE_NONE:
213 fprintf(stderr, "warning: unknown swizzle\n");
215 case UTIL_FORMAT_SWIZZLE_0:
216 return qir_uniform_f(c, 0.0);
217 case UTIL_FORMAT_SWIZZLE_1:
218 return qir_uniform_f(c, 1.0);
219 case UTIL_FORMAT_SWIZZLE_X:
220 case UTIL_FORMAT_SWIZZLE_Y:
221 case UTIL_FORMAT_SWIZZLE_Z:
222 case UTIL_FORMAT_SWIZZLE_W:
227 static inline struct qreg
228 qir_SAT(struct vc4_compile *c, struct qreg val)
231 qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
232 qir_uniform_f(c, 0.0));
236 ntq_rcp(struct vc4_compile *c, struct qreg x)
238 struct qreg r = qir_RCP(c, x);
240 /* Apply a Newton-Raphson step to improve the accuracy. */
241 r = qir_FMUL(c, r, qir_FSUB(c,
242 qir_uniform_f(c, 2.0),
249 ntq_rsq(struct vc4_compile *c, struct qreg x)
251 struct qreg r = qir_RSQ(c, x);
253 /* Apply a Newton-Raphson step to improve the accuracy. */
254 r = qir_FMUL(c, r, qir_FSUB(c,
255 qir_uniform_f(c, 1.5),
257 qir_uniform_f(c, 0.5),
259 qir_FMUL(c, r, r)))));
265 qir_srgb_decode(struct vc4_compile *c, struct qreg srgb)
267 struct qreg low = qir_FMUL(c, srgb, qir_uniform_f(c, 1.0 / 12.92));
268 struct qreg high = qir_POW(c,
272 qir_uniform_f(c, 0.055)),
273 qir_uniform_f(c, 1.0 / 1.055)),
274 qir_uniform_f(c, 2.4));
276 qir_SF(c, qir_FSUB(c, srgb, qir_uniform_f(c, 0.04045)));
277 return qir_SEL_X_Y_NS(c, low, high);
281 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
283 struct qreg src0_hi = qir_SHR(c, src0,
284 qir_uniform_ui(c, 24));
285 struct qreg src1_hi = qir_SHR(c, src1,
286 qir_uniform_ui(c, 24));
288 struct qreg hilo = qir_MUL24(c, src0_hi, src1);
289 struct qreg lohi = qir_MUL24(c, src0, src1_hi);
290 struct qreg lolo = qir_MUL24(c, src0, src1);
292 return qir_ADD(c, lolo, qir_SHL(c,
293 qir_ADD(c, hilo, lohi),
294 qir_uniform_ui(c, 24)));
298 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
300 struct qreg s, t, r, lod, proj, compare;
301 bool is_txb = false, is_txl = false, has_proj = false;
302 unsigned unit = instr->sampler_index;
304 for (unsigned i = 0; i < instr->num_srcs; i++) {
305 switch (instr->src[i].src_type) {
306 case nir_tex_src_coord:
307 s = ntq_get_src(c, instr->src[i].src, 0);
308 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
309 t = qir_uniform_f(c, 0.5);
311 t = ntq_get_src(c, instr->src[i].src, 1);
312 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
313 r = ntq_get_src(c, instr->src[i].src, 2);
315 case nir_tex_src_bias:
316 lod = ntq_get_src(c, instr->src[i].src, 0);
319 case nir_tex_src_lod:
320 lod = ntq_get_src(c, instr->src[i].src, 0);
323 case nir_tex_src_comparitor:
324 compare = ntq_get_src(c, instr->src[i].src, 0);
326 case nir_tex_src_projector:
327 proj = qir_RCP(c, ntq_get_src(c, instr->src[i].src, 0));
328 s = qir_FMUL(c, s, proj);
329 t = qir_FMUL(c, t, proj);
333 unreachable("unknown texture source");
337 struct qreg texture_u[] = {
338 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
339 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
340 qir_uniform(c, QUNIFORM_CONSTANT, 0),
341 qir_uniform(c, QUNIFORM_CONSTANT, 0),
343 uint32_t next_texture_u = 0;
345 /* There is no native support for GL texture rectangle coordinates, so
346 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
349 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
351 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
353 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
356 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
357 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
358 unit | (is_txl << 16));
361 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
362 struct qreg ma = qir_FMAXABS(c, qir_FMAXABS(c, s, t), r);
363 struct qreg rcp_ma = qir_RCP(c, ma);
364 s = qir_FMUL(c, s, rcp_ma);
365 t = qir_FMUL(c, t, rcp_ma);
366 r = qir_FMUL(c, r, rcp_ma);
368 qir_TEX_R(c, r, texture_u[next_texture_u++]);
369 } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
370 c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
371 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
372 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
373 qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit),
374 texture_u[next_texture_u++]);
377 if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
381 if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
385 qir_TEX_T(c, t, texture_u[next_texture_u++]);
387 if (is_txl || is_txb)
388 qir_TEX_B(c, lod, texture_u[next_texture_u++]);
390 qir_TEX_S(c, s, texture_u[next_texture_u++]);
392 c->num_texture_samples++;
393 struct qreg tex = qir_TEX_RESULT(c);
395 enum pipe_format format = c->key->tex[unit].format;
397 struct qreg unpacked[4];
398 if (util_format_is_depth_or_stencil(format)) {
399 struct qreg depthf = qir_ITOF(c, qir_SHR(c, tex,
400 qir_uniform_ui(c, 8)));
401 struct qreg normalized = qir_FMUL(c, depthf,
402 qir_uniform_f(c, 1.0f/0xffffff));
404 struct qreg depth_output;
406 struct qreg one = qir_uniform_f(c, 1.0f);
407 if (c->key->tex[unit].compare_mode) {
409 compare = qir_FMUL(c, compare, proj);
411 switch (c->key->tex[unit].compare_func) {
412 case PIPE_FUNC_NEVER:
413 depth_output = qir_uniform_f(c, 0.0f);
415 case PIPE_FUNC_ALWAYS:
418 case PIPE_FUNC_EQUAL:
419 qir_SF(c, qir_FSUB(c, compare, normalized));
420 depth_output = qir_SEL_X_0_ZS(c, one);
422 case PIPE_FUNC_NOTEQUAL:
423 qir_SF(c, qir_FSUB(c, compare, normalized));
424 depth_output = qir_SEL_X_0_ZC(c, one);
426 case PIPE_FUNC_GREATER:
427 qir_SF(c, qir_FSUB(c, compare, normalized));
428 depth_output = qir_SEL_X_0_NC(c, one);
430 case PIPE_FUNC_GEQUAL:
431 qir_SF(c, qir_FSUB(c, normalized, compare));
432 depth_output = qir_SEL_X_0_NS(c, one);
435 qir_SF(c, qir_FSUB(c, compare, normalized));
436 depth_output = qir_SEL_X_0_NS(c, one);
438 case PIPE_FUNC_LEQUAL:
439 qir_SF(c, qir_FSUB(c, normalized, compare));
440 depth_output = qir_SEL_X_0_NC(c, one);
444 depth_output = normalized;
447 for (int i = 0; i < 4; i++)
448 unpacked[i] = depth_output;
450 for (int i = 0; i < 4; i++)
451 unpacked[i] = qir_UNPACK_8_F(c, tex, i);
454 const uint8_t *format_swiz = vc4_get_format_swizzle(format);
455 struct qreg texture_output[4];
456 for (int i = 0; i < 4; i++) {
457 texture_output[i] = get_swizzled_channel(c, unpacked,
461 if (util_format_is_srgb(format)) {
462 for (int i = 0; i < 3; i++)
463 texture_output[i] = qir_srgb_decode(c,
467 struct qreg *dest = ntq_get_dest(c, &instr->dest);
468 for (int i = 0; i < 4; i++) {
469 dest[i] = get_swizzled_channel(c, texture_output,
470 c->key->tex[unit].swizzle[i]);
475 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
479 ntq_ffract(struct vc4_compile *c, struct qreg src)
481 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
482 struct qreg diff = qir_FSUB(c, src, trunc);
484 return qir_SEL_X_Y_NS(c,
485 qir_FADD(c, diff, qir_uniform_f(c, 1.0)),
490 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
494 ntq_ffloor(struct vc4_compile *c, struct qreg src)
496 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
498 /* This will be < 0 if we truncated and the truncation was of a value
499 * that was < 0 in the first place.
501 qir_SF(c, qir_FSUB(c, src, trunc));
503 return qir_SEL_X_Y_NS(c,
504 qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)),
509 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
513 ntq_fceil(struct vc4_compile *c, struct qreg src)
515 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
517 /* This will be < 0 if we truncated and the truncation was of a value
518 * that was > 0 in the first place.
520 qir_SF(c, qir_FSUB(c, trunc, src));
522 return qir_SEL_X_Y_NS(c,
523 qir_FADD(c, trunc, qir_uniform_f(c, 1.0)),
528 ntq_fsin(struct vc4_compile *c, struct qreg src)
532 pow(2.0 * M_PI, 3) / (3 * 2 * 1),
533 -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
534 pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
535 -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
538 struct qreg scaled_x =
541 qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
543 struct qreg x = qir_FADD(c,
544 ntq_ffract(c, scaled_x),
545 qir_uniform_f(c, -0.5));
546 struct qreg x2 = qir_FMUL(c, x, x);
547 struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
548 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
549 x = qir_FMUL(c, x, x2);
554 qir_uniform_f(c, coeff[i])));
560 ntq_fcos(struct vc4_compile *c, struct qreg src)
564 pow(2.0 * M_PI, 2) / (2 * 1),
565 -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
566 pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
567 -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
568 pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
571 struct qreg scaled_x =
573 qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
574 struct qreg x_frac = qir_FADD(c,
575 ntq_ffract(c, scaled_x),
576 qir_uniform_f(c, -0.5));
578 struct qreg sum = qir_uniform_f(c, coeff[0]);
579 struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
580 struct qreg x = x2; /* Current x^2, x^4, or x^6 */
581 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
583 x = qir_FMUL(c, x, x2);
585 struct qreg mul = qir_FMUL(c,
587 qir_uniform_f(c, coeff[i]));
591 sum = qir_FADD(c, sum, mul);
597 ntq_fsign(struct vc4_compile *c, struct qreg src)
600 return qir_SEL_X_Y_NC(c,
601 qir_SEL_X_0_ZC(c, qir_uniform_f(c, 1.0)),
602 qir_uniform_f(c, -1.0));
606 emit_vertex_input(struct vc4_compile *c, int attr)
608 enum pipe_format format = c->vs_key->attr_formats[attr];
609 uint32_t attr_size = util_format_get_blocksize(format);
611 c->vattr_sizes[attr] = align(attr_size, 4);
612 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
613 struct qreg vpm = { QFILE_VPM, attr * 4 + i };
614 c->inputs[attr * 4 + i] = qir_MOV(c, vpm);
620 emit_fragcoord_input(struct vc4_compile *c, int attr)
622 c->inputs[attr * 4 + 0] = qir_FRAG_X(c);
623 c->inputs[attr * 4 + 1] = qir_FRAG_Y(c);
624 c->inputs[attr * 4 + 2] =
626 qir_ITOF(c, qir_FRAG_Z(c)),
627 qir_uniform_f(c, 1.0 / 0xffffff));
628 c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
632 emit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
635 uint32_t i = c->num_input_slots++;
641 if (c->num_input_slots >= c->input_slots_array_size) {
642 c->input_slots_array_size =
643 MAX2(4, c->input_slots_array_size * 2);
645 c->input_slots = reralloc(c, c->input_slots,
646 struct vc4_varying_slot,
647 c->input_slots_array_size);
650 c->input_slots[i].slot = slot;
651 c->input_slots[i].swizzle = swizzle;
653 return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
657 emit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
659 for (int i = 0; i < 4; i++) {
660 c->inputs[attr * 4 + i] =
661 emit_fragment_varying(c, slot, i);
667 add_output(struct vc4_compile *c,
668 uint32_t decl_offset,
672 uint32_t old_array_size = c->outputs_array_size;
673 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
676 if (old_array_size != c->outputs_array_size) {
677 c->output_slots = reralloc(c,
679 struct vc4_varying_slot,
680 c->outputs_array_size);
683 c->output_slots[decl_offset].slot = slot;
684 c->output_slots[decl_offset].swizzle = swizzle;
688 declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size)
690 unsigned array_id = c->num_uniform_ranges++;
691 if (array_id >= c->ubo_ranges_array_size) {
692 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
694 c->ubo_ranges = reralloc(c, c->ubo_ranges,
695 struct vc4_compiler_ubo_range,
696 c->ubo_ranges_array_size);
699 c->ubo_ranges[array_id].dst_offset = 0;
700 c->ubo_ranges[array_id].src_offset = start;
701 c->ubo_ranges[array_id].size = size;
702 c->ubo_ranges[array_id].used = false;
706 ntq_src_is_only_ssa_def_user(nir_src *src)
711 if (!list_empty(&src->ssa->if_uses))
714 return (src->ssa->uses.next == &src->use_link &&
715 src->ssa->uses.next->next == &src->ssa->uses);
719 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
722 * However, as an optimization, it tries to find the instructions generating
723 * the sources to be packed and just emit the pack flag there, if possible.
726 ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
728 struct qreg result = qir_get_temp(c);
729 struct nir_alu_instr *vec4 = NULL;
731 /* If packing from a vec4 op (as expected), identify it so that we can
732 * peek back at what generated its sources.
734 if (instr->src[0].src.is_ssa &&
735 instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
736 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
738 vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
741 for (int i = 0; i < 4; i++) {
742 int swiz = instr->src[0].swizzle[i];
745 src = ntq_get_src(c, vec4->src[swiz].src,
746 vec4->src[swiz].swizzle[0]);
748 src = ntq_get_src(c, instr->src[0].src, swiz);
752 ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
753 src.file == QFILE_TEMP &&
754 c->defs[src.index] &&
755 qir_is_mul(c->defs[src.index]) &&
756 !c->defs[src.index]->dst.pack) {
757 struct qinst *rewrite = c->defs[src.index];
758 c->defs[src.index] = NULL;
759 rewrite->dst = result;
760 rewrite->dst.pack = QPU_PACK_MUL_8A + i;
764 qir_PACK_8_F(c, result, src, i);
767 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
771 /** Handles sign-extended bitfield extracts for 16 bits. */
773 ntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
776 assert(bits.file == QFILE_UNIF &&
777 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
778 c->uniform_data[bits.index] == 16);
780 assert(offset.file == QFILE_UNIF &&
781 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
782 int offset_bit = c->uniform_data[offset.index];
783 assert(offset_bit % 16 == 0);
785 return qir_UNPACK_16_I(c, base, offset_bit / 16);
788 /** Handles unsigned bitfield extracts for 8 bits. */
790 ntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
793 assert(bits.file == QFILE_UNIF &&
794 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
795 c->uniform_data[bits.index] == 8);
797 assert(offset.file == QFILE_UNIF &&
798 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
799 int offset_bit = c->uniform_data[offset.index];
800 assert(offset_bit % 8 == 0);
802 return qir_UNPACK_8_I(c, base, offset_bit / 8);
806 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
808 /* Vectors are special in that they have non-scalarized writemasks,
809 * and just take the first swizzle channel for each argument in order
810 * into each writemask channel.
812 if (instr->op == nir_op_vec2 ||
813 instr->op == nir_op_vec3 ||
814 instr->op == nir_op_vec4) {
816 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
817 srcs[i] = ntq_get_src(c, instr->src[i].src,
818 instr->src[i].swizzle[0]);
819 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
820 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
825 if (instr->op == nir_op_pack_unorm_4x8) {
826 ntq_emit_pack_unorm_4x8(c, instr);
830 if (instr->op == nir_op_unpack_unorm_4x8) {
831 struct qreg src = ntq_get_src(c, instr->src[0].src,
832 instr->src[0].swizzle[0]);
833 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
834 for (int i = 0; i < 4; i++) {
835 if (instr->dest.write_mask & (1 << i))
836 dest[i] = qir_UNPACK_8_F(c, src, i);
841 /* General case: We can just grab the one used channel per src. */
842 struct qreg src[nir_op_infos[instr->op].num_inputs];
843 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
844 src[i] = ntq_get_alu_src(c, instr, i);
847 /* Pick the channel to store the output in. */
848 assert(!instr->dest.saturate);
849 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
850 assert(util_is_power_of_two(instr->dest.write_mask));
851 dest += ffs(instr->dest.write_mask) - 1;
856 *dest = qir_MOV(c, src[0]);
859 *dest = qir_FMUL(c, src[0], src[1]);
862 *dest = qir_FADD(c, src[0], src[1]);
865 *dest = qir_FSUB(c, src[0], src[1]);
868 *dest = qir_FMIN(c, src[0], src[1]);
871 *dest = qir_FMAX(c, src[0], src[1]);
876 *dest = qir_FTOI(c, src[0]);
880 *dest = qir_ITOF(c, src[0]);
883 *dest = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
886 *dest = qir_AND(c, src[0], qir_uniform_ui(c, 1));
891 *dest = qir_SEL_X_0_ZC(c, qir_uniform_ui(c, ~0));
895 *dest = qir_ADD(c, src[0], src[1]);
898 *dest = qir_SHR(c, src[0], src[1]);
901 *dest = qir_SUB(c, src[0], src[1]);
904 *dest = qir_ASR(c, src[0], src[1]);
907 *dest = qir_SHL(c, src[0], src[1]);
910 *dest = qir_MIN(c, src[0], src[1]);
913 *dest = qir_MAX(c, src[0], src[1]);
916 *dest = qir_AND(c, src[0], src[1]);
919 *dest = qir_OR(c, src[0], src[1]);
922 *dest = qir_XOR(c, src[0], src[1]);
925 *dest = qir_NOT(c, src[0]);
929 *dest = ntq_umul(c, src[0], src[1]);
933 qir_SF(c, qir_FSUB(c, src[0], src[1]));
934 *dest = qir_SEL_X_0_ZS(c, qir_uniform_f(c, 1.0));
937 qir_SF(c, qir_FSUB(c, src[0], src[1]));
938 *dest = qir_SEL_X_0_ZC(c, qir_uniform_f(c, 1.0));
941 qir_SF(c, qir_FSUB(c, src[0], src[1]));
942 *dest = qir_SEL_X_0_NC(c, qir_uniform_f(c, 1.0));
945 qir_SF(c, qir_FSUB(c, src[0], src[1]));
946 *dest = qir_SEL_X_0_NS(c, qir_uniform_f(c, 1.0));
949 qir_SF(c, qir_FSUB(c, src[0], src[1]));
950 *dest = qir_SEL_X_0_ZS(c, qir_uniform_ui(c, ~0));
953 qir_SF(c, qir_FSUB(c, src[0], src[1]));
954 *dest = qir_SEL_X_0_ZC(c, qir_uniform_ui(c, ~0));
957 qir_SF(c, qir_FSUB(c, src[0], src[1]));
958 *dest = qir_SEL_X_0_NC(c, qir_uniform_ui(c, ~0));
961 qir_SF(c, qir_FSUB(c, src[0], src[1]));
962 *dest = qir_SEL_X_0_NS(c, qir_uniform_ui(c, ~0));
965 qir_SF(c, qir_SUB(c, src[0], src[1]));
966 *dest = qir_SEL_X_0_ZS(c, qir_uniform_ui(c, ~0));
969 qir_SF(c, qir_SUB(c, src[0], src[1]));
970 *dest = qir_SEL_X_0_ZC(c, qir_uniform_ui(c, ~0));
973 qir_SF(c, qir_SUB(c, src[0], src[1]));
974 *dest = qir_SEL_X_0_NC(c, qir_uniform_ui(c, ~0));
977 qir_SF(c, qir_SUB(c, src[0], src[1]));
978 *dest = qir_SEL_X_0_NS(c, qir_uniform_ui(c, ~0));
983 *dest = qir_SEL_X_Y_NS(c, src[1], src[2]);
987 *dest = qir_SEL_X_Y_ZC(c, src[1], src[2]);
991 *dest = ntq_rcp(c, src[0]);
994 *dest = ntq_rsq(c, src[0]);
997 *dest = qir_EXP2(c, src[0]);
1000 *dest = qir_LOG2(c, src[0]);
1004 *dest = qir_ITOF(c, qir_FTOI(c, src[0]));
1007 *dest = ntq_fceil(c, src[0]);
1010 *dest = ntq_ffract(c, src[0]);
1013 *dest = ntq_ffloor(c, src[0]);
1017 *dest = ntq_fsin(c, src[0]);
1020 *dest = ntq_fcos(c, src[0]);
1024 *dest = ntq_fsign(c, src[0]);
1028 *dest = qir_FMAXABS(c, src[0], src[0]);
1031 *dest = qir_MAX(c, src[0],
1032 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1035 case nir_op_ibitfield_extract:
1036 *dest = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1039 case nir_op_ubitfield_extract:
1040 *dest = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1044 fprintf(stderr, "unknown NIR ALU inst: ");
1045 nir_print_instr(&instr->instr, stderr);
1046 fprintf(stderr, "\n");
1052 emit_frag_end(struct vc4_compile *c)
1055 if (c->output_color_index != -1) {
1056 color = c->outputs[c->output_color_index];
1058 color = qir_uniform_ui(c, 0);
1061 if (c->discard.file != QFILE_NULL)
1062 qir_TLB_DISCARD_SETUP(c, c->discard);
1064 if (c->fs_key->stencil_enabled) {
1065 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 0));
1066 if (c->fs_key->stencil_twoside) {
1067 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 1));
1069 if (c->fs_key->stencil_full_writemasks) {
1070 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 2));
1074 if (c->fs_key->depth_enabled) {
1076 if (c->output_position_index != -1) {
1077 z = qir_FTOI(c, qir_FMUL(c, c->outputs[c->output_position_index + 2],
1078 qir_uniform_f(c, 0xffffff)));
1082 qir_TLB_Z_WRITE(c, z);
1085 qir_TLB_COLOR_WRITE(c, color);
1089 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1091 struct qreg packed = qir_get_temp(c);
1093 for (int i = 0; i < 2; i++) {
1095 qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1097 struct qreg packed_chan = packed;
1098 packed_chan.pack = QPU_PACK_A_16A + i;
1100 qir_FTOI_dest(c, packed_chan,
1103 c->outputs[c->output_position_index + i],
1108 qir_VPM_WRITE(c, packed);
1112 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1114 struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1115 struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1117 qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1118 c->outputs[c->output_position_index + 2],
1125 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1127 qir_VPM_WRITE(c, rcp_w);
1131 emit_point_size_write(struct vc4_compile *c)
1133 struct qreg point_size;
1135 if (c->output_point_size_index != -1)
1136 point_size = c->outputs[c->output_point_size_index + 3];
1138 point_size = qir_uniform_f(c, 1.0);
1140 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1143 point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
1145 qir_VPM_WRITE(c, point_size);
1149 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1151 * The simulator insists that there be at least one vertex attribute, so
1152 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1153 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1154 * to consume it here.
1157 emit_stub_vpm_read(struct vc4_compile *c)
1162 c->vattr_sizes[0] = 4;
1163 struct qreg vpm = { QFILE_VPM, 0 };
1164 (void)qir_MOV(c, vpm);
1169 emit_vert_end(struct vc4_compile *c,
1170 struct vc4_varying_slot *fs_inputs,
1171 uint32_t num_fs_inputs)
1173 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1175 emit_stub_vpm_read(c);
1177 emit_scaled_viewport_write(c, rcp_w);
1178 emit_zs_write(c, rcp_w);
1179 emit_rcp_wc_write(c, rcp_w);
1180 if (c->vs_key->per_vertex_point_size)
1181 emit_point_size_write(c);
1183 for (int i = 0; i < num_fs_inputs; i++) {
1184 struct vc4_varying_slot *input = &fs_inputs[i];
1187 for (j = 0; j < c->num_outputs; j++) {
1188 struct vc4_varying_slot *output =
1189 &c->output_slots[j];
1191 if (input->slot == output->slot &&
1192 input->swizzle == output->swizzle) {
1193 qir_VPM_WRITE(c, c->outputs[j]);
1197 /* Emit padding if we didn't find a declared VS output for
1200 if (j == c->num_outputs)
1201 qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1206 emit_coord_end(struct vc4_compile *c)
1208 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1210 emit_stub_vpm_read(c);
1212 for (int i = 0; i < 4; i++)
1213 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1215 emit_scaled_viewport_write(c, rcp_w);
1216 emit_zs_write(c, rcp_w);
1217 emit_rcp_wc_write(c, rcp_w);
1218 if (c->vs_key->per_vertex_point_size)
1219 emit_point_size_write(c);
1223 vc4_optimize_nir(struct nir_shader *s)
1230 nir_lower_vars_to_ssa(s);
1231 nir_lower_alu_to_scalar(s);
1233 progress = nir_copy_prop(s) || progress;
1234 progress = nir_opt_dce(s) || progress;
1235 progress = nir_opt_cse(s) || progress;
1236 progress = nir_opt_peephole_select(s) || progress;
1237 progress = nir_opt_algebraic(s) || progress;
1238 progress = nir_opt_constant_folding(s) || progress;
1239 progress = nir_opt_undef(s) || progress;
1244 driver_location_compare(const void *in_a, const void *in_b)
1246 const nir_variable *const *a = in_a;
1247 const nir_variable *const *b = in_b;
1249 return (*a)->data.driver_location - (*b)->data.driver_location;
1253 ntq_setup_inputs(struct vc4_compile *c)
1255 unsigned num_entries = 0;
1256 nir_foreach_variable(var, &c->s->inputs)
1259 nir_variable *vars[num_entries];
1262 nir_foreach_variable(var, &c->s->inputs)
1265 /* Sort the variables so that we emit the input setup in
1266 * driver_location order. This is required for VPM reads, whose data
1267 * is fetched into the VPM in driver_location (TGSI register index)
1270 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1272 for (unsigned i = 0; i < num_entries; i++) {
1273 nir_variable *var = vars[i];
1274 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1275 unsigned loc = var->data.driver_location;
1277 assert(array_len == 1);
1279 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1282 if (c->stage == QSTAGE_FRAG) {
1283 if (var->data.location == VARYING_SLOT_POS) {
1284 emit_fragcoord_input(c, loc);
1285 } else if (var->data.location == VARYING_SLOT_FACE) {
1286 c->inputs[loc * 4 + 0] = qir_FRAG_REV_FLAG(c);
1287 } else if (var->data.location >= VARYING_SLOT_VAR0 &&
1288 (c->fs_key->point_sprite_mask &
1289 (1 << (var->data.location -
1290 VARYING_SLOT_VAR0)))) {
1291 c->inputs[loc * 4 + 0] = c->point_x;
1292 c->inputs[loc * 4 + 1] = c->point_y;
1294 emit_fragment_input(c, loc, var->data.location);
1297 emit_vertex_input(c, loc);
1303 ntq_setup_outputs(struct vc4_compile *c)
1305 nir_foreach_variable(var, &c->s->outputs) {
1306 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1307 unsigned loc = var->data.driver_location * 4;
1309 assert(array_len == 1);
1312 for (int i = 0; i < 4; i++)
1313 add_output(c, loc + i, var->data.location, i);
1315 if (c->stage == QSTAGE_FRAG) {
1316 switch (var->data.location) {
1317 case FRAG_RESULT_COLOR:
1318 case FRAG_RESULT_DATA0:
1319 c->output_color_index = loc;
1321 case FRAG_RESULT_DEPTH:
1322 c->output_position_index = loc;
1326 switch (var->data.location) {
1327 case VARYING_SLOT_POS:
1328 c->output_position_index = loc;
1330 case VARYING_SLOT_PSIZ:
1331 c->output_point_size_index = loc;
1339 ntq_setup_uniforms(struct vc4_compile *c)
1341 nir_foreach_variable(var, &c->s->uniforms) {
1342 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1343 unsigned array_elem_size = 4 * sizeof(float);
1345 declare_uniform_range(c, var->data.driver_location * array_elem_size,
1346 array_len * array_elem_size);
1352 * Sets up the mapping from nir_register to struct qreg *.
1354 * Each nir_register gets a struct qreg per 32-bit component being stored.
1357 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1359 foreach_list_typed(nir_register, nir_reg, node, list) {
1360 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1361 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1363 nir_reg->num_components);
1365 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1367 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1368 qregs[i] = qir_uniform_ui(c, 0);
1373 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1375 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1376 for (int i = 0; i < instr->def.num_components; i++)
1377 qregs[i] = qir_uniform_ui(c, instr->value.u[i]);
1379 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1383 ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1385 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1387 /* QIR needs there to be *some* value, so pick 0 (same as for
1388 * ntq_setup_registers().
1390 for (int i = 0; i < instr->def.num_components; i++)
1391 qregs[i] = qir_uniform_ui(c, 0);
1395 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1397 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
1398 struct qreg *dest = NULL;
1400 if (info->has_dest) {
1401 dest = ntq_get_dest(c, &instr->dest);
1404 switch (instr->intrinsic) {
1405 case nir_intrinsic_load_uniform:
1406 assert(instr->num_components == 1);
1407 if (instr->const_index[0] < VC4_NIR_STATE_UNIFORM_OFFSET) {
1408 *dest = qir_uniform(c, QUNIFORM_UNIFORM,
1409 instr->const_index[0]);
1411 *dest = qir_uniform(c, instr->const_index[0] -
1412 VC4_NIR_STATE_UNIFORM_OFFSET,
1417 case nir_intrinsic_load_uniform_indirect:
1418 *dest = indirect_uniform_load(c, instr);
1422 case nir_intrinsic_load_user_clip_plane:
1423 *dest = qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1424 instr->const_index[0]);
1427 case nir_intrinsic_load_input:
1428 assert(instr->num_components == 1);
1429 if (instr->const_index[0] == VC4_NIR_TLB_COLOR_READ_INPUT) {
1430 *dest = qir_TLB_COLOR_READ(c);
1432 *dest = c->inputs[instr->const_index[0]];
1436 case nir_intrinsic_store_output:
1437 assert(instr->num_components == 1);
1438 c->outputs[instr->const_index[0]] =
1439 qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1440 c->num_outputs = MAX2(c->num_outputs, instr->const_index[0] + 1);
1443 case nir_intrinsic_discard:
1444 c->discard = qir_uniform_ui(c, ~0);
1447 case nir_intrinsic_discard_if:
1448 if (c->discard.file == QFILE_NULL)
1449 c->discard = qir_uniform_ui(c, 0);
1450 c->discard = qir_OR(c, c->discard,
1451 ntq_get_src(c, instr->src[0], 0));
1455 fprintf(stderr, "Unknown intrinsic: ");
1456 nir_print_instr(&instr->instr, stderr);
1457 fprintf(stderr, "\n");
1463 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1465 fprintf(stderr, "general IF statements not handled.\n");
1469 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
1471 switch (instr->type) {
1472 case nir_instr_type_alu:
1473 ntq_emit_alu(c, nir_instr_as_alu(instr));
1476 case nir_instr_type_intrinsic:
1477 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
1480 case nir_instr_type_load_const:
1481 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
1484 case nir_instr_type_ssa_undef:
1485 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
1488 case nir_instr_type_tex:
1489 ntq_emit_tex(c, nir_instr_as_tex(instr));
1493 fprintf(stderr, "Unknown NIR instr type: ");
1494 nir_print_instr(instr, stderr);
1495 fprintf(stderr, "\n");
1501 ntq_emit_block(struct vc4_compile *c, nir_block *block)
1503 nir_foreach_instr(block, instr) {
1504 ntq_emit_instr(c, instr);
1509 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
1511 foreach_list_typed(nir_cf_node, node, node, list) {
1512 switch (node->type) {
1513 /* case nir_cf_node_loop: */
1514 case nir_cf_node_block:
1515 ntq_emit_block(c, nir_cf_node_as_block(node));
1518 case nir_cf_node_if:
1519 ntq_emit_if(c, nir_cf_node_as_if(node));
1529 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
1531 ntq_setup_registers(c, &impl->registers);
1532 ntq_emit_cf_list(c, &impl->body);
1536 nir_to_qir(struct vc4_compile *c)
1538 ntq_setup_inputs(c);
1539 ntq_setup_outputs(c);
1540 ntq_setup_uniforms(c);
1541 ntq_setup_registers(c, &c->s->registers);
1543 /* Find the main function and emit the body. */
1544 nir_foreach_overload(c->s, overload) {
1545 assert(strcmp(overload->function->name, "main") == 0);
1546 assert(overload->impl);
1547 ntq_emit_impl(c, overload->impl);
1551 static const nir_shader_compiler_options nir_options = {
1556 .lower_fsqrt = true,
1557 .lower_negate = true,
1561 count_nir_instrs_in_block(nir_block *block, void *state)
1563 int *count = (int *) state;
1564 nir_foreach_instr(block, instr) {
1565 *count = *count + 1;
1571 count_nir_instrs(nir_shader *nir)
1574 nir_foreach_overload(nir, overload) {
1575 if (!overload->impl)
1577 nir_foreach_block(overload->impl, count_nir_instrs_in_block, &count);
1582 static struct vc4_compile *
1583 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
1584 struct vc4_key *key)
1586 struct vc4_compile *c = qir_compile_init();
1589 c->shader_state = &key->shader_state->base;
1590 c->program_id = key->shader_state->program_id;
1591 c->variant_id = key->shader_state->compiled_variant_count++;
1596 c->fs_key = (struct vc4_fs_key *)key;
1597 if (c->fs_key->is_points) {
1598 c->point_x = emit_fragment_varying(c, ~0, 0);
1599 c->point_y = emit_fragment_varying(c, ~0, 0);
1600 } else if (c->fs_key->is_lines) {
1601 c->line_x = emit_fragment_varying(c, ~0, 0);
1605 c->vs_key = (struct vc4_vs_key *)key;
1608 c->vs_key = (struct vc4_vs_key *)key;
1612 const struct tgsi_token *tokens = key->shader_state->base.tokens;
1614 if (vc4_debug & VC4_DEBUG_TGSI) {
1615 fprintf(stderr, "%s prog %d/%d TGSI:\n",
1616 qir_get_stage_name(c->stage),
1617 c->program_id, c->variant_id);
1618 tgsi_dump(tokens, 0);
1621 c->s = tgsi_to_nir(tokens, &nir_options);
1622 nir_opt_global_to_local(c->s);
1623 nir_convert_to_ssa(c->s);
1625 if (stage == QSTAGE_FRAG)
1626 vc4_nir_lower_blend(c);
1628 if (c->fs_key && c->fs_key->light_twoside)
1629 nir_lower_two_sided_color(c->s);
1631 if (stage == QSTAGE_FRAG)
1632 nir_lower_clip_fs(c->s, c->key->ucp_enables);
1634 nir_lower_clip_vs(c->s, c->key->ucp_enables);
1636 vc4_nir_lower_io(c);
1637 nir_lower_idiv(c->s);
1638 nir_lower_load_const_to_scalar(c->s);
1640 vc4_optimize_nir(c->s);
1642 nir_remove_dead_variables(c->s);
1644 nir_convert_from_ssa(c->s, true);
1646 if (vc4_debug & VC4_DEBUG_SHADERDB) {
1647 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
1648 qir_get_stage_name(c->stage),
1649 c->program_id, c->variant_id,
1650 count_nir_instrs(c->s));
1653 if (vc4_debug & VC4_DEBUG_NIR) {
1654 fprintf(stderr, "%s prog %d/%d NIR:\n",
1655 qir_get_stage_name(c->stage),
1656 c->program_id, c->variant_id);
1657 nir_print_shader(c->s, stderr);
1668 vc4->prog.fs->input_slots,
1669 vc4->prog.fs->num_inputs);
1676 if (vc4_debug & VC4_DEBUG_QIR) {
1677 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
1678 qir_get_stage_name(c->stage),
1679 c->program_id, c->variant_id);
1684 qir_lower_uniforms(c);
1686 if (vc4_debug & VC4_DEBUG_QIR) {
1687 fprintf(stderr, "%s prog %d/%d QIR:\n",
1688 qir_get_stage_name(c->stage),
1689 c->program_id, c->variant_id);
1692 qir_reorder_uniforms(c);
1693 vc4_generate_code(vc4, c);
1695 if (vc4_debug & VC4_DEBUG_SHADERDB) {
1696 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
1697 qir_get_stage_name(c->stage),
1698 c->program_id, c->variant_id,
1700 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
1701 qir_get_stage_name(c->stage),
1702 c->program_id, c->variant_id,
1712 vc4_shader_state_create(struct pipe_context *pctx,
1713 const struct pipe_shader_state *cso)
1715 struct vc4_context *vc4 = vc4_context(pctx);
1716 struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
1720 so->base.tokens = tgsi_dup_tokens(cso->tokens);
1721 so->program_id = vc4->next_uncompiled_program_id++;
1727 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
1728 struct vc4_compile *c)
1730 int count = c->num_uniforms;
1731 struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
1733 uinfo->count = count;
1734 uinfo->data = ralloc_array(shader, uint32_t, count);
1735 memcpy(uinfo->data, c->uniform_data,
1736 count * sizeof(*uinfo->data));
1737 uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
1738 memcpy(uinfo->contents, c->uniform_contents,
1739 count * sizeof(*uinfo->contents));
1740 uinfo->num_texture_samples = c->num_texture_samples;
1742 vc4_set_shader_uniform_dirty_flags(shader);
1745 static struct vc4_compiled_shader *
1746 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
1747 struct vc4_key *key)
1749 struct hash_table *ht;
1751 if (stage == QSTAGE_FRAG) {
1753 key_size = sizeof(struct vc4_fs_key);
1756 key_size = sizeof(struct vc4_vs_key);
1759 struct vc4_compiled_shader *shader;
1760 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
1764 struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key);
1765 shader = rzalloc(NULL, struct vc4_compiled_shader);
1767 shader->program_id = vc4->next_compiled_program_id++;
1768 if (stage == QSTAGE_FRAG) {
1769 bool input_live[c->num_input_slots];
1771 memset(input_live, 0, sizeof(input_live));
1772 list_for_each_entry(struct qinst, inst, &c->instructions, link) {
1773 for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
1774 if (inst->src[i].file == QFILE_VARY)
1775 input_live[inst->src[i].index] = true;
1779 shader->input_slots = ralloc_array(shader,
1780 struct vc4_varying_slot,
1781 c->num_input_slots);
1783 for (int i = 0; i < c->num_input_slots; i++) {
1784 struct vc4_varying_slot *slot = &c->input_slots[i];
1789 /* Skip non-VS-output inputs. */
1790 if (slot->slot == (uint8_t)~0)
1793 if (slot->slot == VARYING_SLOT_COL0 ||
1794 slot->slot == VARYING_SLOT_COL1 ||
1795 slot->slot == VARYING_SLOT_BFC0 ||
1796 slot->slot == VARYING_SLOT_BFC1) {
1797 shader->color_inputs |= (1 << shader->num_inputs);
1800 shader->input_slots[shader->num_inputs] = *slot;
1801 shader->num_inputs++;
1804 shader->num_inputs = c->num_inputs;
1806 shader->vattr_offsets[0] = 0;
1807 for (int i = 0; i < 8; i++) {
1808 shader->vattr_offsets[i + 1] =
1809 shader->vattr_offsets[i] + c->vattr_sizes[i];
1811 if (c->vattr_sizes[i])
1812 shader->vattrs_live |= (1 << i);
1816 copy_uniform_state_to_shader(shader, c);
1817 shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
1818 c->qpu_inst_count * sizeof(uint64_t));
1820 /* Copy the compiler UBO range state to the compiled shader, dropping
1821 * out arrays that were never referenced by an indirect load.
1823 * (Note that QIR dead code elimination of an array access still
1824 * leaves that array alive, though)
1826 if (c->num_ubo_ranges) {
1827 shader->num_ubo_ranges = c->num_ubo_ranges;
1828 shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range,
1831 for (int i = 0; i < c->num_uniform_ranges; i++) {
1832 struct vc4_compiler_ubo_range *range =
1837 shader->ubo_ranges[j].dst_offset = range->dst_offset;
1838 shader->ubo_ranges[j].src_offset = range->src_offset;
1839 shader->ubo_ranges[j].size = range->size;
1840 shader->ubo_size += c->ubo_ranges[i].size;
1844 if (shader->ubo_size) {
1845 if (vc4_debug & VC4_DEBUG_SHADERDB) {
1846 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
1847 qir_get_stage_name(c->stage),
1848 c->program_id, c->variant_id,
1849 shader->ubo_size / 4);
1853 qir_compile_destroy(c);
1855 struct vc4_key *dup_key;
1856 dup_key = ralloc_size(shader, key_size);
1857 memcpy(dup_key, key, key_size);
1858 _mesa_hash_table_insert(ht, dup_key, shader);
1864 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
1865 struct vc4_texture_stateobj *texstate)
1867 for (int i = 0; i < texstate->num_textures; i++) {
1868 struct pipe_sampler_view *sampler = texstate->textures[i];
1869 struct pipe_sampler_state *sampler_state =
1870 texstate->samplers[i];
1873 key->tex[i].format = sampler->format;
1874 key->tex[i].swizzle[0] = sampler->swizzle_r;
1875 key->tex[i].swizzle[1] = sampler->swizzle_g;
1876 key->tex[i].swizzle[2] = sampler->swizzle_b;
1877 key->tex[i].swizzle[3] = sampler->swizzle_a;
1878 key->tex[i].compare_mode = sampler_state->compare_mode;
1879 key->tex[i].compare_func = sampler_state->compare_func;
1880 key->tex[i].wrap_s = sampler_state->wrap_s;
1881 key->tex[i].wrap_t = sampler_state->wrap_t;
1885 key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
1889 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
1891 struct vc4_fs_key local_key;
1892 struct vc4_fs_key *key = &local_key;
1894 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
1896 VC4_DIRTY_FRAMEBUFFER |
1898 VC4_DIRTY_RASTERIZER |
1900 VC4_DIRTY_TEXSTATE |
1901 VC4_DIRTY_UNCOMPILED_FS))) {
1905 memset(key, 0, sizeof(*key));
1906 vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
1907 key->base.shader_state = vc4->prog.bind_fs;
1908 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
1909 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
1910 prim_mode <= PIPE_PRIM_LINE_STRIP);
1911 key->blend = vc4->blend->rt[0];
1912 if (vc4->blend->logicop_enable) {
1913 key->logicop_func = vc4->blend->logicop_func;
1915 key->logicop_func = PIPE_LOGICOP_COPY;
1917 if (vc4->framebuffer.cbufs[0])
1918 key->color_format = vc4->framebuffer.cbufs[0]->format;
1920 key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
1921 key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
1922 key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
1923 key->depth_enabled = (vc4->zsa->base.depth.enabled ||
1924 key->stencil_enabled);
1925 if (vc4->zsa->base.alpha.enabled) {
1926 key->alpha_test = true;
1927 key->alpha_test_func = vc4->zsa->base.alpha.func;
1930 if (key->is_points) {
1931 key->point_sprite_mask =
1932 vc4->rasterizer->base.sprite_coord_enable;
1933 key->point_coord_upper_left =
1934 (vc4->rasterizer->base.sprite_coord_mode ==
1935 PIPE_SPRITE_COORD_UPPER_LEFT);
1938 key->light_twoside = vc4->rasterizer->base.light_twoside;
1940 struct vc4_compiled_shader *old_fs = vc4->prog.fs;
1941 vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
1942 if (vc4->prog.fs == old_fs)
1945 vc4->dirty |= VC4_DIRTY_COMPILED_FS;
1946 if (vc4->rasterizer->base.flatshade &&
1947 old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
1948 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
1953 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
1955 struct vc4_vs_key local_key;
1956 struct vc4_vs_key *key = &local_key;
1958 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
1959 VC4_DIRTY_RASTERIZER |
1961 VC4_DIRTY_TEXSTATE |
1962 VC4_DIRTY_VTXSTATE |
1963 VC4_DIRTY_UNCOMPILED_VS |
1964 VC4_DIRTY_COMPILED_FS))) {
1968 memset(key, 0, sizeof(*key));
1969 vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
1970 key->base.shader_state = vc4->prog.bind_vs;
1971 key->compiled_fs_id = vc4->prog.fs->program_id;
1973 for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
1974 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
1976 key->per_vertex_point_size =
1977 (prim_mode == PIPE_PRIM_POINTS &&
1978 vc4->rasterizer->base.point_size_per_vertex);
1980 struct vc4_compiled_shader *vs =
1981 vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
1982 if (vs != vc4->prog.vs) {
1984 vc4->dirty |= VC4_DIRTY_COMPILED_VS;
1987 key->is_coord = true;
1988 struct vc4_compiled_shader *cs =
1989 vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
1990 if (cs != vc4->prog.cs) {
1992 vc4->dirty |= VC4_DIRTY_COMPILED_CS;
1997 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
1999 vc4_update_compiled_fs(vc4, prim_mode);
2000 vc4_update_compiled_vs(vc4, prim_mode);
2004 fs_cache_hash(const void *key)
2006 return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2010 vs_cache_hash(const void *key)
2012 return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2016 fs_cache_compare(const void *key1, const void *key2)
2018 return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2022 vs_cache_compare(const void *key1, const void *key2)
2024 return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2028 delete_from_cache_if_matches(struct hash_table *ht,
2029 struct hash_entry *entry,
2030 struct vc4_uncompiled_shader *so)
2032 const struct vc4_key *key = entry->key;
2034 if (key->shader_state == so) {
2035 struct vc4_compiled_shader *shader = entry->data;
2036 _mesa_hash_table_remove(ht, entry);
2037 vc4_bo_unreference(&shader->bo);
2038 ralloc_free(shader);
2043 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2045 struct vc4_context *vc4 = vc4_context(pctx);
2046 struct vc4_uncompiled_shader *so = hwcso;
2048 struct hash_entry *entry;
2049 hash_table_foreach(vc4->fs_cache, entry)
2050 delete_from_cache_if_matches(vc4->fs_cache, entry, so);
2051 hash_table_foreach(vc4->vs_cache, entry)
2052 delete_from_cache_if_matches(vc4->vs_cache, entry, so);
2054 free((void *)so->base.tokens);
2059 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2061 struct vc4_context *vc4 = vc4_context(pctx);
2062 vc4->prog.bind_fs = hwcso;
2063 vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2067 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2069 struct vc4_context *vc4 = vc4_context(pctx);
2070 vc4->prog.bind_vs = hwcso;
2071 vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2075 vc4_program_init(struct pipe_context *pctx)
2077 struct vc4_context *vc4 = vc4_context(pctx);
2079 pctx->create_vs_state = vc4_shader_state_create;
2080 pctx->delete_vs_state = vc4_shader_state_delete;
2082 pctx->create_fs_state = vc4_shader_state_create;
2083 pctx->delete_fs_state = vc4_shader_state_delete;
2085 pctx->bind_fs_state = vc4_fp_state_bind;
2086 pctx->bind_vs_state = vc4_vp_state_bind;
2088 vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2090 vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2095 vc4_program_fini(struct pipe_context *pctx)
2097 struct vc4_context *vc4 = vc4_context(pctx);
2099 struct hash_entry *entry;
2100 hash_table_foreach(vc4->fs_cache, entry) {
2101 struct vc4_compiled_shader *shader = entry->data;
2102 vc4_bo_unreference(&shader->bo);
2103 ralloc_free(shader);
2104 _mesa_hash_table_remove(vc4->fs_cache, entry);
2107 hash_table_foreach(vc4->vs_cache, entry) {
2108 struct vc4_compiled_shader *shader = entry->data;
2109 vc4_bo_unreference(&shader->bo);
2110 ralloc_free(shader);
2111 _mesa_hash_table_remove(vc4->vs_cache, entry);