2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_format.h"
27 #include "util/u_hash.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "compiler/nir/nir.h"
35 #include "compiler/nir/nir_builder.h"
36 #include "nir/tgsi_to_nir.h"
37 #include "vc4_context.h"
40 #ifdef USE_VC4_SIMULATOR
41 #include "simpenrose/simpenrose.h"
45 ntq_get_src(struct vc4_compile *c, nir_src src, int i);
48 resize_qreg_array(struct vc4_compile *c,
53 if (*size >= decl_size)
56 uint32_t old_size = *size;
57 *size = MAX2(*size * 2, decl_size);
58 *regs = reralloc(c, *regs, struct qreg, *size);
60 fprintf(stderr, "Malloc failure\n");
64 for (uint32_t i = old_size; i < *size; i++)
65 (*regs)[i] = c->undef;
69 indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
71 struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
72 uint32_t offset = intr->const_index[0];
73 struct vc4_compiler_ubo_range *range = NULL;
75 for (i = 0; i < c->num_uniform_ranges; i++) {
76 range = &c->ubo_ranges[i];
77 if (offset >= range->src_offset &&
78 offset < range->src_offset + range->size) {
82 /* The driver-location-based offset always has to be within a declared
88 range->dst_offset = c->next_ubo_dst_offset;
89 c->next_ubo_dst_offset += range->size;
93 offset -= range->src_offset;
95 /* Adjust for where we stored the TGSI register base. */
96 indirect_offset = qir_ADD(c, indirect_offset,
97 qir_uniform_ui(c, (range->dst_offset +
100 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
101 indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
102 indirect_offset = qir_MIN(c, indirect_offset,
103 qir_uniform_ui(c, (range->dst_offset +
106 qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
107 c->num_texture_samples++;
108 return qir_TEX_RESULT(c);
111 nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b,
112 enum quniform_contents contents)
114 nir_intrinsic_instr *intr =
115 nir_intrinsic_instr_create(b->shader,
116 nir_intrinsic_load_uniform);
117 intr->const_index[0] = (VC4_NIR_STATE_UNIFORM_OFFSET + contents) * 4;
118 intr->num_components = 1;
119 intr->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
120 nir_ssa_dest_init(&intr->instr, &intr->dest, 1, 32, NULL);
121 nir_builder_instr_insert(b, &intr->instr);
122 return &intr->dest.ssa;
126 vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
130 case PIPE_SWIZZLE_NONE:
131 fprintf(stderr, "warning: unknown swizzle\n");
134 return nir_imm_float(b, 0.0);
136 return nir_imm_float(b, 1.0);
146 ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
148 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
149 def->num_components);
150 _mesa_hash_table_insert(c->def_ht, def, qregs);
155 ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
158 struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
159 for (int i = 0; i < dest->ssa.num_components; i++)
163 nir_register *reg = dest->reg.reg;
164 assert(dest->reg.base_offset == 0);
165 assert(reg->num_array_elems == 0);
166 struct hash_entry *entry =
167 _mesa_hash_table_search(c->def_ht, reg);
173 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
175 struct hash_entry *entry;
177 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
178 assert(i < src.ssa->num_components);
180 nir_register *reg = src.reg.reg;
181 entry = _mesa_hash_table_search(c->def_ht, reg);
182 assert(reg->num_array_elems == 0);
183 assert(src.reg.base_offset == 0);
184 assert(i < reg->num_components);
187 struct qreg *qregs = entry->data;
192 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
195 assert(util_is_power_of_two(instr->dest.write_mask));
196 unsigned chan = ffs(instr->dest.write_mask) - 1;
197 struct qreg r = ntq_get_src(c, instr->src[src].src,
198 instr->src[src].swizzle[chan]);
200 assert(!instr->src[src].abs);
201 assert(!instr->src[src].negate);
206 static inline struct qreg
207 qir_SAT(struct vc4_compile *c, struct qreg val)
210 qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
211 qir_uniform_f(c, 0.0));
215 ntq_rcp(struct vc4_compile *c, struct qreg x)
217 struct qreg r = qir_RCP(c, x);
219 /* Apply a Newton-Raphson step to improve the accuracy. */
220 r = qir_FMUL(c, r, qir_FSUB(c,
221 qir_uniform_f(c, 2.0),
228 ntq_rsq(struct vc4_compile *c, struct qreg x)
230 struct qreg r = qir_RSQ(c, x);
232 /* Apply a Newton-Raphson step to improve the accuracy. */
233 r = qir_FMUL(c, r, qir_FSUB(c,
234 qir_uniform_f(c, 1.5),
236 qir_uniform_f(c, 0.5),
238 qir_FMUL(c, r, r)))));
244 qir_srgb_decode(struct vc4_compile *c, struct qreg srgb)
246 struct qreg low = qir_FMUL(c, srgb, qir_uniform_f(c, 1.0 / 12.92));
247 struct qreg high = qir_POW(c,
251 qir_uniform_f(c, 0.055)),
252 qir_uniform_f(c, 1.0 / 1.055)),
253 qir_uniform_f(c, 2.4));
255 qir_SF(c, qir_FSUB(c, srgb, qir_uniform_f(c, 0.04045)));
256 return qir_SEL(c, QPU_COND_NS, low, high);
260 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
262 struct qreg src0_hi = qir_SHR(c, src0,
263 qir_uniform_ui(c, 24));
264 struct qreg src1_hi = qir_SHR(c, src1,
265 qir_uniform_ui(c, 24));
267 struct qreg hilo = qir_MUL24(c, src0_hi, src1);
268 struct qreg lohi = qir_MUL24(c, src0, src1_hi);
269 struct qreg lolo = qir_MUL24(c, src0, src1);
271 return qir_ADD(c, lolo, qir_SHL(c,
272 qir_ADD(c, hilo, lohi),
273 qir_uniform_ui(c, 24)));
277 ntq_scale_depth_texture(struct vc4_compile *c, struct qreg src)
279 struct qreg depthf = qir_ITOF(c, qir_SHR(c, src,
280 qir_uniform_ui(c, 8)));
281 return qir_FMUL(c, depthf, qir_uniform_f(c, 1.0f/0xffffff));
285 * Emits a lowered TXF_MS from an MSAA texture.
287 * The addressing math has been lowered in NIR, and now we just need to read
291 ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr)
293 uint32_t tile_width = 32;
294 uint32_t tile_height = 32;
295 uint32_t tile_size = (tile_height * tile_width *
296 VC4_MAX_SAMPLES * sizeof(uint32_t));
298 unsigned unit = instr->texture_index;
299 uint32_t w = align(c->key->tex[unit].msaa_width, tile_width);
300 uint32_t w_tiles = w / tile_width;
301 uint32_t h = align(c->key->tex[unit].msaa_height, tile_height);
302 uint32_t h_tiles = h / tile_height;
303 uint32_t size = w_tiles * h_tiles * tile_size;
306 assert(instr->num_srcs == 1);
307 assert(instr->src[0].src_type == nir_tex_src_coord);
308 addr = ntq_get_src(c, instr->src[0].src, 0);
310 /* Perform the clamping required by kernel validation. */
311 addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
312 addr = qir_MIN(c, addr, qir_uniform_ui(c, size - 4));
314 qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
316 struct qreg tex = qir_TEX_RESULT(c);
317 c->num_texture_samples++;
319 struct qreg *dest = ntq_get_dest(c, &instr->dest);
320 enum pipe_format format = c->key->tex[unit].format;
321 if (util_format_is_depth_or_stencil(format)) {
322 struct qreg scaled = ntq_scale_depth_texture(c, tex);
323 for (int i = 0; i < 4; i++)
326 for (int i = 0; i < 4; i++)
327 dest[i] = qir_UNPACK_8_F(c, tex, i);
330 for (int i = 0; i < 4; i++) {
331 if (c->tex_srgb_decode[unit] & (1 << i))
332 dest[i] = qir_srgb_decode(c, dest[i]);
337 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
339 struct qreg s, t, r, lod, proj, compare;
340 bool is_txb = false, is_txl = false, has_proj = false;
341 unsigned unit = instr->texture_index;
343 if (instr->op == nir_texop_txf) {
344 ntq_emit_txf(c, instr);
348 for (unsigned i = 0; i < instr->num_srcs; i++) {
349 switch (instr->src[i].src_type) {
350 case nir_tex_src_coord:
351 s = ntq_get_src(c, instr->src[i].src, 0);
352 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
353 t = qir_uniform_f(c, 0.5);
355 t = ntq_get_src(c, instr->src[i].src, 1);
356 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
357 r = ntq_get_src(c, instr->src[i].src, 2);
359 case nir_tex_src_bias:
360 lod = ntq_get_src(c, instr->src[i].src, 0);
363 case nir_tex_src_lod:
364 lod = ntq_get_src(c, instr->src[i].src, 0);
367 case nir_tex_src_comparitor:
368 compare = ntq_get_src(c, instr->src[i].src, 0);
370 case nir_tex_src_projector:
371 proj = qir_RCP(c, ntq_get_src(c, instr->src[i].src, 0));
372 s = qir_FMUL(c, s, proj);
373 t = qir_FMUL(c, t, proj);
377 unreachable("unknown texture source");
381 struct qreg texture_u[] = {
382 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
383 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
384 qir_uniform(c, QUNIFORM_CONSTANT, 0),
385 qir_uniform(c, QUNIFORM_CONSTANT, 0),
387 uint32_t next_texture_u = 0;
389 /* There is no native support for GL texture rectangle coordinates, so
390 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
393 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
395 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
397 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
400 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
401 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
402 unit | (is_txl << 16));
405 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
406 qir_TEX_R(c, r, texture_u[next_texture_u++]);
407 } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
408 c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
409 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
410 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
411 qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit),
412 texture_u[next_texture_u++]);
415 if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
419 if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
423 qir_TEX_T(c, t, texture_u[next_texture_u++]);
425 if (is_txl || is_txb)
426 qir_TEX_B(c, lod, texture_u[next_texture_u++]);
428 qir_TEX_S(c, s, texture_u[next_texture_u++]);
430 c->num_texture_samples++;
431 struct qreg tex = qir_TEX_RESULT(c);
433 enum pipe_format format = c->key->tex[unit].format;
435 struct qreg *dest = ntq_get_dest(c, &instr->dest);
436 if (util_format_is_depth_or_stencil(format)) {
437 struct qreg normalized = ntq_scale_depth_texture(c, tex);
438 struct qreg depth_output;
440 struct qreg u0 = qir_uniform_f(c, 0.0f);
441 struct qreg u1 = qir_uniform_f(c, 1.0f);
442 if (c->key->tex[unit].compare_mode) {
444 compare = qir_FMUL(c, compare, proj);
446 switch (c->key->tex[unit].compare_func) {
447 case PIPE_FUNC_NEVER:
448 depth_output = qir_uniform_f(c, 0.0f);
450 case PIPE_FUNC_ALWAYS:
453 case PIPE_FUNC_EQUAL:
454 qir_SF(c, qir_FSUB(c, compare, normalized));
455 depth_output = qir_SEL(c, QPU_COND_ZS, u1, u0);
457 case PIPE_FUNC_NOTEQUAL:
458 qir_SF(c, qir_FSUB(c, compare, normalized));
459 depth_output = qir_SEL(c, QPU_COND_ZC, u1, u0);
461 case PIPE_FUNC_GREATER:
462 qir_SF(c, qir_FSUB(c, compare, normalized));
463 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
465 case PIPE_FUNC_GEQUAL:
466 qir_SF(c, qir_FSUB(c, normalized, compare));
467 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
470 qir_SF(c, qir_FSUB(c, compare, normalized));
471 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
473 case PIPE_FUNC_LEQUAL:
474 qir_SF(c, qir_FSUB(c, normalized, compare));
475 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
479 depth_output = normalized;
482 for (int i = 0; i < 4; i++)
483 dest[i] = depth_output;
485 for (int i = 0; i < 4; i++)
486 dest[i] = qir_UNPACK_8_F(c, tex, i);
489 for (int i = 0; i < 4; i++) {
490 if (c->tex_srgb_decode[unit] & (1 << i))
491 dest[i] = qir_srgb_decode(c, dest[i]);
496 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
500 ntq_ffract(struct vc4_compile *c, struct qreg src)
502 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
503 struct qreg diff = qir_FSUB(c, src, trunc);
505 return qir_SEL(c, QPU_COND_NS,
506 qir_FADD(c, diff, qir_uniform_f(c, 1.0)), diff);
510 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
514 ntq_ffloor(struct vc4_compile *c, struct qreg src)
516 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
518 /* This will be < 0 if we truncated and the truncation was of a value
519 * that was < 0 in the first place.
521 qir_SF(c, qir_FSUB(c, src, trunc));
523 return qir_SEL(c, QPU_COND_NS,
524 qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)), trunc);
528 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
532 ntq_fceil(struct vc4_compile *c, struct qreg src)
534 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
536 /* This will be < 0 if we truncated and the truncation was of a value
537 * that was > 0 in the first place.
539 qir_SF(c, qir_FSUB(c, trunc, src));
541 return qir_SEL(c, QPU_COND_NS,
542 qir_FADD(c, trunc, qir_uniform_f(c, 1.0)), trunc);
546 ntq_fsin(struct vc4_compile *c, struct qreg src)
550 pow(2.0 * M_PI, 3) / (3 * 2 * 1),
551 -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
552 pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
553 -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
556 struct qreg scaled_x =
559 qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
561 struct qreg x = qir_FADD(c,
562 ntq_ffract(c, scaled_x),
563 qir_uniform_f(c, -0.5));
564 struct qreg x2 = qir_FMUL(c, x, x);
565 struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
566 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
567 x = qir_FMUL(c, x, x2);
572 qir_uniform_f(c, coeff[i])));
578 ntq_fcos(struct vc4_compile *c, struct qreg src)
582 pow(2.0 * M_PI, 2) / (2 * 1),
583 -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
584 pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
585 -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
586 pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
589 struct qreg scaled_x =
591 qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
592 struct qreg x_frac = qir_FADD(c,
593 ntq_ffract(c, scaled_x),
594 qir_uniform_f(c, -0.5));
596 struct qreg sum = qir_uniform_f(c, coeff[0]);
597 struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
598 struct qreg x = x2; /* Current x^2, x^4, or x^6 */
599 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
601 x = qir_FMUL(c, x, x2);
603 struct qreg mul = qir_FMUL(c,
605 qir_uniform_f(c, coeff[i]));
609 sum = qir_FADD(c, sum, mul);
615 ntq_fsign(struct vc4_compile *c, struct qreg src)
617 struct qreg t = qir_get_temp(c);
620 qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
621 qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
622 qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
627 emit_vertex_input(struct vc4_compile *c, int attr)
629 enum pipe_format format = c->vs_key->attr_formats[attr];
630 uint32_t attr_size = util_format_get_blocksize(format);
632 c->vattr_sizes[attr] = align(attr_size, 4);
633 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
634 c->inputs[attr * 4 + i] =
635 qir_MOV(c, qir_reg(QFILE_VPM, attr * 4 + i));
641 emit_fragcoord_input(struct vc4_compile *c, int attr)
643 c->inputs[attr * 4 + 0] = qir_ITOF(c, qir_reg(QFILE_FRAG_X, 0));
644 c->inputs[attr * 4 + 1] = qir_ITOF(c, qir_reg(QFILE_FRAG_Y, 0));
645 c->inputs[attr * 4 + 2] =
647 qir_ITOF(c, qir_FRAG_Z(c)),
648 qir_uniform_f(c, 1.0 / 0xffffff));
649 c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
653 emit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
656 uint32_t i = c->num_input_slots++;
662 if (c->num_input_slots >= c->input_slots_array_size) {
663 c->input_slots_array_size =
664 MAX2(4, c->input_slots_array_size * 2);
666 c->input_slots = reralloc(c, c->input_slots,
667 struct vc4_varying_slot,
668 c->input_slots_array_size);
671 c->input_slots[i].slot = slot;
672 c->input_slots[i].swizzle = swizzle;
674 return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
678 emit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
680 for (int i = 0; i < 4; i++) {
681 c->inputs[attr * 4 + i] =
682 emit_fragment_varying(c, slot, i);
688 add_output(struct vc4_compile *c,
689 uint32_t decl_offset,
693 uint32_t old_array_size = c->outputs_array_size;
694 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
697 if (old_array_size != c->outputs_array_size) {
698 c->output_slots = reralloc(c,
700 struct vc4_varying_slot,
701 c->outputs_array_size);
704 c->output_slots[decl_offset].slot = slot;
705 c->output_slots[decl_offset].swizzle = swizzle;
709 declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size)
711 unsigned array_id = c->num_uniform_ranges++;
712 if (array_id >= c->ubo_ranges_array_size) {
713 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
715 c->ubo_ranges = reralloc(c, c->ubo_ranges,
716 struct vc4_compiler_ubo_range,
717 c->ubo_ranges_array_size);
720 c->ubo_ranges[array_id].dst_offset = 0;
721 c->ubo_ranges[array_id].src_offset = start;
722 c->ubo_ranges[array_id].size = size;
723 c->ubo_ranges[array_id].used = false;
727 ntq_src_is_only_ssa_def_user(nir_src *src)
732 if (!list_empty(&src->ssa->if_uses))
735 return (src->ssa->uses.next == &src->use_link &&
736 src->ssa->uses.next->next == &src->ssa->uses);
740 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
743 * However, as an optimization, it tries to find the instructions generating
744 * the sources to be packed and just emit the pack flag there, if possible.
747 ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
749 struct qreg result = qir_get_temp(c);
750 struct nir_alu_instr *vec4 = NULL;
752 /* If packing from a vec4 op (as expected), identify it so that we can
753 * peek back at what generated its sources.
755 if (instr->src[0].src.is_ssa &&
756 instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
757 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
759 vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
762 /* If the pack is replicating the same channel 4 times, use the 8888
763 * pack flag. This is common for blending using the alpha
766 if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
767 instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
768 instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
769 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
770 *dest = qir_PACK_8888_F(c,
771 ntq_get_src(c, instr->src[0].src,
772 instr->src[0].swizzle[0]));
776 for (int i = 0; i < 4; i++) {
777 int swiz = instr->src[0].swizzle[i];
780 src = ntq_get_src(c, vec4->src[swiz].src,
781 vec4->src[swiz].swizzle[0]);
783 src = ntq_get_src(c, instr->src[0].src, swiz);
787 ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
788 src.file == QFILE_TEMP &&
789 c->defs[src.index] &&
790 qir_is_mul(c->defs[src.index]) &&
791 !c->defs[src.index]->dst.pack) {
792 struct qinst *rewrite = c->defs[src.index];
793 c->defs[src.index] = NULL;
794 rewrite->dst = result;
795 rewrite->dst.pack = QPU_PACK_MUL_8A + i;
799 qir_PACK_8_F(c, result, src, i);
802 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
806 /** Handles sign-extended bitfield extracts for 16 bits. */
808 ntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
811 assert(bits.file == QFILE_UNIF &&
812 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
813 c->uniform_data[bits.index] == 16);
815 assert(offset.file == QFILE_UNIF &&
816 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
817 int offset_bit = c->uniform_data[offset.index];
818 assert(offset_bit % 16 == 0);
820 return qir_UNPACK_16_I(c, base, offset_bit / 16);
823 /** Handles unsigned bitfield extracts for 8 bits. */
825 ntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
828 assert(bits.file == QFILE_UNIF &&
829 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
830 c->uniform_data[bits.index] == 8);
832 assert(offset.file == QFILE_UNIF &&
833 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
834 int offset_bit = c->uniform_data[offset.index];
835 assert(offset_bit % 8 == 0);
837 return qir_UNPACK_8_I(c, base, offset_bit / 8);
841 * If compare_instr is a valid comparison instruction, emits the
842 * compare_instr's comparison and returns the sel_instr's return value based
843 * on the compare_instr's result.
846 ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
847 nir_alu_instr *compare_instr,
848 nir_alu_instr *sel_instr)
852 switch (compare_instr->op) {
878 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
879 struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
881 unsigned unsized_type =
882 nir_alu_type_get_base_type(nir_op_infos[compare_instr->op].input_types[0]);
883 if (unsized_type == nir_type_float)
884 qir_SF(c, qir_FSUB(c, src0, src1));
886 qir_SF(c, qir_SUB(c, src0, src1));
888 switch (sel_instr->op) {
893 *dest = qir_SEL(c, cond,
894 qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
898 *dest = qir_SEL(c, cond,
899 ntq_get_alu_src(c, sel_instr, 1),
900 ntq_get_alu_src(c, sel_instr, 2));
904 *dest = qir_SEL(c, cond,
905 qir_uniform_ui(c, ~0), qir_uniform_ui(c, 0));
913 * Attempts to fold a comparison generating a boolean result into the
914 * condition code for selecting between two values, instead of comparing the
915 * boolean result against 0 to generate the condition code.
917 static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
920 if (!instr->src[0].src.is_ssa)
922 nir_alu_instr *compare =
923 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
928 if (ntq_emit_comparison(c, &dest, compare, instr))
933 return qir_SEL(c, QPU_COND_NS, src[1], src[2]);
937 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
939 /* Vectors are special in that they have non-scalarized writemasks,
940 * and just take the first swizzle channel for each argument in order
941 * into each writemask channel.
943 if (instr->op == nir_op_vec2 ||
944 instr->op == nir_op_vec3 ||
945 instr->op == nir_op_vec4) {
947 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
948 srcs[i] = ntq_get_src(c, instr->src[i].src,
949 instr->src[i].swizzle[0]);
950 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
951 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
956 if (instr->op == nir_op_pack_unorm_4x8) {
957 ntq_emit_pack_unorm_4x8(c, instr);
961 if (instr->op == nir_op_unpack_unorm_4x8) {
962 struct qreg src = ntq_get_src(c, instr->src[0].src,
963 instr->src[0].swizzle[0]);
964 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
965 for (int i = 0; i < 4; i++) {
966 if (instr->dest.write_mask & (1 << i))
967 dest[i] = qir_UNPACK_8_F(c, src, i);
972 /* General case: We can just grab the one used channel per src. */
973 struct qreg src[nir_op_infos[instr->op].num_inputs];
974 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
975 src[i] = ntq_get_alu_src(c, instr, i);
978 /* Pick the channel to store the output in. */
979 assert(!instr->dest.saturate);
980 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
981 assert(util_is_power_of_two(instr->dest.write_mask));
982 dest += ffs(instr->dest.write_mask) - 1;
987 *dest = qir_MOV(c, src[0]);
990 *dest = qir_FMUL(c, src[0], src[1]);
993 *dest = qir_FADD(c, src[0], src[1]);
996 *dest = qir_FSUB(c, src[0], src[1]);
999 *dest = qir_FMIN(c, src[0], src[1]);
1002 *dest = qir_FMAX(c, src[0], src[1]);
1007 *dest = qir_FTOI(c, src[0]);
1011 *dest = qir_ITOF(c, src[0]);
1014 *dest = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
1017 *dest = qir_AND(c, src[0], qir_uniform_ui(c, 1));
1022 *dest = qir_SEL(c, QPU_COND_ZC,
1023 qir_uniform_ui(c, ~0),
1024 qir_uniform_ui(c, 0));
1028 *dest = qir_ADD(c, src[0], src[1]);
1031 *dest = qir_SHR(c, src[0], src[1]);
1034 *dest = qir_SUB(c, src[0], src[1]);
1037 *dest = qir_ASR(c, src[0], src[1]);
1040 *dest = qir_SHL(c, src[0], src[1]);
1043 *dest = qir_MIN(c, src[0], src[1]);
1046 *dest = qir_MAX(c, src[0], src[1]);
1049 *dest = qir_AND(c, src[0], src[1]);
1052 *dest = qir_OR(c, src[0], src[1]);
1055 *dest = qir_XOR(c, src[0], src[1]);
1058 *dest = qir_NOT(c, src[0]);
1062 *dest = ntq_umul(c, src[0], src[1]);
1078 if (!ntq_emit_comparison(c, dest, instr, instr)) {
1079 fprintf(stderr, "Bad comparison instruction\n");
1084 *dest = ntq_emit_bcsel(c, instr, src);
1088 *dest = qir_SEL(c, QPU_COND_ZC, src[1], src[2]);
1092 *dest = ntq_rcp(c, src[0]);
1095 *dest = ntq_rsq(c, src[0]);
1098 *dest = qir_EXP2(c, src[0]);
1101 *dest = qir_LOG2(c, src[0]);
1105 *dest = qir_ITOF(c, qir_FTOI(c, src[0]));
1108 *dest = ntq_fceil(c, src[0]);
1111 *dest = ntq_ffract(c, src[0]);
1114 *dest = ntq_ffloor(c, src[0]);
1118 *dest = ntq_fsin(c, src[0]);
1121 *dest = ntq_fcos(c, src[0]);
1125 *dest = ntq_fsign(c, src[0]);
1129 *dest = qir_FMAXABS(c, src[0], src[0]);
1132 *dest = qir_MAX(c, src[0],
1133 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1136 case nir_op_ibitfield_extract:
1137 *dest = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1140 case nir_op_ubitfield_extract:
1141 *dest = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1144 case nir_op_usadd_4x8:
1145 *dest = qir_V8ADDS(c, src[0], src[1]);
1148 case nir_op_ussub_4x8:
1149 *dest = qir_V8SUBS(c, src[0], src[1]);
1152 case nir_op_umin_4x8:
1153 *dest = qir_V8MIN(c, src[0], src[1]);
1156 case nir_op_umax_4x8:
1157 *dest = qir_V8MAX(c, src[0], src[1]);
1160 case nir_op_umul_unorm_4x8:
1161 *dest = qir_V8MULD(c, src[0], src[1]);
1165 fprintf(stderr, "unknown NIR ALU inst: ");
1166 nir_print_instr(&instr->instr, stderr);
1167 fprintf(stderr, "\n");
1173 emit_frag_end(struct vc4_compile *c)
1176 if (c->output_color_index != -1) {
1177 color = c->outputs[c->output_color_index];
1179 color = qir_uniform_ui(c, 0);
1182 uint32_t discard_cond = QPU_COND_ALWAYS;
1183 if (c->discard.file != QFILE_NULL) {
1184 qir_SF(c, c->discard);
1185 discard_cond = QPU_COND_ZS;
1188 if (c->fs_key->stencil_enabled) {
1189 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1190 qir_uniform(c, QUNIFORM_STENCIL, 0));
1191 if (c->fs_key->stencil_twoside) {
1192 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1193 qir_uniform(c, QUNIFORM_STENCIL, 1));
1195 if (c->fs_key->stencil_full_writemasks) {
1196 qir_MOV_dest(c, qir_reg(QFILE_TLB_STENCIL_SETUP, 0),
1197 qir_uniform(c, QUNIFORM_STENCIL, 2));
1201 if (c->output_sample_mask_index != -1) {
1202 qir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1205 if (c->fs_key->depth_enabled) {
1206 if (c->output_position_index != -1) {
1207 qir_FTOI_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1209 c->outputs[c->output_position_index + 2],
1210 qir_uniform_f(c, 0xffffff)))->cond = discard_cond;
1212 qir_MOV_dest(c, qir_reg(QFILE_TLB_Z_WRITE, 0),
1213 qir_FRAG_Z(c))->cond = discard_cond;
1217 if (!c->msaa_per_sample_output) {
1218 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE, 0),
1219 color)->cond = discard_cond;
1221 for (int i = 0; i < VC4_MAX_SAMPLES; i++) {
1222 qir_MOV_dest(c, qir_reg(QFILE_TLB_COLOR_WRITE_MS, 0),
1223 c->sample_colors[i])->cond = discard_cond;
1229 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1231 struct qreg packed = qir_get_temp(c);
1233 for (int i = 0; i < 2; i++) {
1235 qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1237 struct qreg packed_chan = packed;
1238 packed_chan.pack = QPU_PACK_A_16A + i;
1240 qir_FTOI_dest(c, packed_chan,
1243 c->outputs[c->output_position_index + i],
1248 qir_VPM_WRITE(c, packed);
1252 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1254 struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1255 struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1257 qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1258 c->outputs[c->output_position_index + 2],
1265 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1267 qir_VPM_WRITE(c, rcp_w);
1271 emit_point_size_write(struct vc4_compile *c)
1273 struct qreg point_size;
1275 if (c->output_point_size_index != -1)
1276 point_size = c->outputs[c->output_point_size_index];
1278 point_size = qir_uniform_f(c, 1.0);
1280 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1283 point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
1285 qir_VPM_WRITE(c, point_size);
1289 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1291 * The simulator insists that there be at least one vertex attribute, so
1292 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1293 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1294 * to consume it here.
1297 emit_stub_vpm_read(struct vc4_compile *c)
1302 c->vattr_sizes[0] = 4;
1303 (void)qir_MOV(c, qir_reg(QFILE_VPM, 0));
1308 emit_vert_end(struct vc4_compile *c,
1309 struct vc4_varying_slot *fs_inputs,
1310 uint32_t num_fs_inputs)
1312 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1314 emit_stub_vpm_read(c);
1316 emit_scaled_viewport_write(c, rcp_w);
1317 emit_zs_write(c, rcp_w);
1318 emit_rcp_wc_write(c, rcp_w);
1319 if (c->vs_key->per_vertex_point_size)
1320 emit_point_size_write(c);
1322 for (int i = 0; i < num_fs_inputs; i++) {
1323 struct vc4_varying_slot *input = &fs_inputs[i];
1326 for (j = 0; j < c->num_outputs; j++) {
1327 struct vc4_varying_slot *output =
1328 &c->output_slots[j];
1330 if (input->slot == output->slot &&
1331 input->swizzle == output->swizzle) {
1332 qir_VPM_WRITE(c, c->outputs[j]);
1336 /* Emit padding if we didn't find a declared VS output for
1339 if (j == c->num_outputs)
1340 qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1345 emit_coord_end(struct vc4_compile *c)
1347 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1349 emit_stub_vpm_read(c);
1351 for (int i = 0; i < 4; i++)
1352 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1354 emit_scaled_viewport_write(c, rcp_w);
1355 emit_zs_write(c, rcp_w);
1356 emit_rcp_wc_write(c, rcp_w);
1357 if (c->vs_key->per_vertex_point_size)
1358 emit_point_size_write(c);
1362 vc4_optimize_nir(struct nir_shader *s)
1369 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1370 NIR_PASS_V(s, nir_lower_alu_to_scalar);
1372 NIR_PASS(progress, s, nir_copy_prop);
1373 NIR_PASS(progress, s, nir_opt_dce);
1374 NIR_PASS(progress, s, nir_opt_cse);
1375 NIR_PASS(progress, s, nir_opt_peephole_select);
1376 NIR_PASS(progress, s, nir_opt_algebraic);
1377 NIR_PASS(progress, s, nir_opt_constant_folding);
1378 NIR_PASS(progress, s, nir_opt_undef);
1383 driver_location_compare(const void *in_a, const void *in_b)
1385 const nir_variable *const *a = in_a;
1386 const nir_variable *const *b = in_b;
1388 return (*a)->data.driver_location - (*b)->data.driver_location;
1392 ntq_setup_inputs(struct vc4_compile *c)
1394 unsigned num_entries = 0;
1395 nir_foreach_variable(var, &c->s->inputs)
1398 nir_variable *vars[num_entries];
1401 nir_foreach_variable(var, &c->s->inputs)
1404 /* Sort the variables so that we emit the input setup in
1405 * driver_location order. This is required for VPM reads, whose data
1406 * is fetched into the VPM in driver_location (TGSI register index)
1409 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1411 for (unsigned i = 0; i < num_entries; i++) {
1412 nir_variable *var = vars[i];
1413 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1414 unsigned loc = var->data.driver_location;
1416 assert(array_len == 1);
1418 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1421 if (c->stage == QSTAGE_FRAG) {
1422 if (var->data.location == VARYING_SLOT_POS) {
1423 emit_fragcoord_input(c, loc);
1424 } else if (var->data.location == VARYING_SLOT_FACE) {
1425 c->inputs[loc * 4 + 0] =
1426 qir_ITOF(c, qir_reg(QFILE_FRAG_REV_FLAG,
1428 } else if (var->data.location >= VARYING_SLOT_VAR0 &&
1429 (c->fs_key->point_sprite_mask &
1430 (1 << (var->data.location -
1431 VARYING_SLOT_VAR0)))) {
1432 c->inputs[loc * 4 + 0] = c->point_x;
1433 c->inputs[loc * 4 + 1] = c->point_y;
1435 emit_fragment_input(c, loc, var->data.location);
1438 emit_vertex_input(c, loc);
1444 ntq_setup_outputs(struct vc4_compile *c)
1446 nir_foreach_variable(var, &c->s->outputs) {
1447 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1448 unsigned loc = var->data.driver_location * 4;
1450 assert(array_len == 1);
1453 for (int i = 0; i < 4; i++)
1454 add_output(c, loc + i, var->data.location, i);
1456 if (c->stage == QSTAGE_FRAG) {
1457 switch (var->data.location) {
1458 case FRAG_RESULT_COLOR:
1459 case FRAG_RESULT_DATA0:
1460 c->output_color_index = loc;
1462 case FRAG_RESULT_DEPTH:
1463 c->output_position_index = loc;
1465 case FRAG_RESULT_SAMPLE_MASK:
1466 c->output_sample_mask_index = loc;
1470 switch (var->data.location) {
1471 case VARYING_SLOT_POS:
1472 c->output_position_index = loc;
1474 case VARYING_SLOT_PSIZ:
1475 c->output_point_size_index = loc;
1483 ntq_setup_uniforms(struct vc4_compile *c)
1485 nir_foreach_variable(var, &c->s->uniforms) {
1486 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1487 unsigned array_elem_size = 4 * sizeof(float);
1489 declare_uniform_range(c, var->data.driver_location * array_elem_size,
1490 array_len * array_elem_size);
1496 * Sets up the mapping from nir_register to struct qreg *.
1498 * Each nir_register gets a struct qreg per 32-bit component being stored.
1501 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1503 foreach_list_typed(nir_register, nir_reg, node, list) {
1504 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1505 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1507 nir_reg->num_components);
1509 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1511 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1512 qregs[i] = qir_uniform_ui(c, 0);
1517 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1519 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1520 for (int i = 0; i < instr->def.num_components; i++)
1521 qregs[i] = qir_uniform_ui(c, instr->value.u32[i]);
1523 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1527 ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1529 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1531 /* QIR needs there to be *some* value, so pick 0 (same as for
1532 * ntq_setup_registers().
1534 for (int i = 0; i < instr->def.num_components; i++)
1535 qregs[i] = qir_uniform_ui(c, 0);
1539 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1541 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
1542 nir_const_value *const_offset;
1544 struct qreg *dest = NULL;
1546 if (info->has_dest) {
1547 dest = ntq_get_dest(c, &instr->dest);
1550 switch (instr->intrinsic) {
1551 case nir_intrinsic_load_uniform:
1552 assert(instr->num_components == 1);
1553 const_offset = nir_src_as_const_value(instr->src[0]);
1555 offset = instr->const_index[0] + const_offset->u32[0];
1556 assert(offset % 4 == 0);
1557 /* We need dwords */
1558 offset = offset / 4;
1559 if (offset < VC4_NIR_STATE_UNIFORM_OFFSET) {
1560 *dest = qir_uniform(c, QUNIFORM_UNIFORM,
1563 *dest = qir_uniform(c, offset -
1564 VC4_NIR_STATE_UNIFORM_OFFSET,
1568 *dest = indirect_uniform_load(c, instr);
1572 case nir_intrinsic_load_user_clip_plane:
1573 for (int i = 0; i < instr->num_components; i++) {
1574 dest[i] = qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1575 instr->const_index[0] * 4 + i);
1579 case nir_intrinsic_load_sample_mask_in:
1580 *dest = qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0);
1583 case nir_intrinsic_load_input:
1584 assert(instr->num_components == 1);
1585 const_offset = nir_src_as_const_value(instr->src[0]);
1586 assert(const_offset && "vc4 doesn't support indirect inputs");
1587 if (instr->const_index[0] >= VC4_NIR_TLB_COLOR_READ_INPUT) {
1588 assert(const_offset->u32[0] == 0);
1589 /* Reads of the per-sample color need to be done in
1592 int sample_index = (instr->const_index[0] -
1593 VC4_NIR_TLB_COLOR_READ_INPUT);
1594 for (int i = 0; i <= sample_index; i++) {
1595 if (c->color_reads[i].file == QFILE_NULL) {
1597 qir_TLB_COLOR_READ(c);
1600 *dest = c->color_reads[sample_index];
1602 offset = instr->const_index[0] + const_offset->u32[0];
1603 *dest = c->inputs[offset];
1607 case nir_intrinsic_store_output:
1608 const_offset = nir_src_as_const_value(instr->src[1]);
1609 assert(const_offset && "vc4 doesn't support indirect outputs");
1610 offset = instr->const_index[0] + const_offset->u32[0];
1612 /* MSAA color outputs are the only case where we have an
1613 * output that's not lowered to being a store of a single 32
1616 if (c->stage == QSTAGE_FRAG && instr->num_components == 4) {
1617 assert(offset == c->output_color_index);
1618 for (int i = 0; i < 4; i++) {
1619 c->sample_colors[i] =
1620 qir_MOV(c, ntq_get_src(c, instr->src[0],
1624 assert(instr->num_components == 1);
1625 c->outputs[offset] =
1626 qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1627 c->num_outputs = MAX2(c->num_outputs, offset + 1);
1631 case nir_intrinsic_discard:
1632 c->discard = qir_uniform_ui(c, ~0);
1635 case nir_intrinsic_discard_if:
1636 if (c->discard.file == QFILE_NULL)
1637 c->discard = qir_uniform_ui(c, 0);
1638 c->discard = qir_OR(c, c->discard,
1639 ntq_get_src(c, instr->src[0], 0));
1643 fprintf(stderr, "Unknown intrinsic: ");
1644 nir_print_instr(&instr->instr, stderr);
1645 fprintf(stderr, "\n");
1651 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1653 fprintf(stderr, "general IF statements not handled.\n");
1657 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
1659 switch (instr->type) {
1660 case nir_instr_type_alu:
1661 ntq_emit_alu(c, nir_instr_as_alu(instr));
1664 case nir_instr_type_intrinsic:
1665 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
1668 case nir_instr_type_load_const:
1669 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
1672 case nir_instr_type_ssa_undef:
1673 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
1676 case nir_instr_type_tex:
1677 ntq_emit_tex(c, nir_instr_as_tex(instr));
1681 fprintf(stderr, "Unknown NIR instr type: ");
1682 nir_print_instr(instr, stderr);
1683 fprintf(stderr, "\n");
1689 ntq_emit_block(struct vc4_compile *c, nir_block *block)
1691 nir_foreach_instr(instr, block) {
1692 ntq_emit_instr(c, instr);
1696 static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
1699 ntq_emit_loop(struct vc4_compile *c, nir_loop *nloop)
1701 fprintf(stderr, "LOOPS not fully handled. Rendering errors likely.\n");
1702 ntq_emit_cf_list(c, &nloop->body);
1706 ntq_emit_function(struct vc4_compile *c, nir_function_impl *func)
1708 fprintf(stderr, "FUNCTIONS not handled.\n");
1713 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
1715 foreach_list_typed(nir_cf_node, node, node, list) {
1716 switch (node->type) {
1717 case nir_cf_node_block:
1718 ntq_emit_block(c, nir_cf_node_as_block(node));
1721 case nir_cf_node_if:
1722 ntq_emit_if(c, nir_cf_node_as_if(node));
1725 case nir_cf_node_loop:
1726 ntq_emit_loop(c, nir_cf_node_as_loop(node));
1729 case nir_cf_node_function:
1730 ntq_emit_function(c, nir_cf_node_as_function(node));
1734 fprintf(stderr, "Unknown NIR node type\n");
1741 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
1743 ntq_setup_registers(c, &impl->registers);
1744 ntq_emit_cf_list(c, &impl->body);
1748 nir_to_qir(struct vc4_compile *c)
1750 ntq_setup_inputs(c);
1751 ntq_setup_outputs(c);
1752 ntq_setup_uniforms(c);
1753 ntq_setup_registers(c, &c->s->registers);
1755 /* Find the main function and emit the body. */
1756 nir_foreach_function(function, c->s) {
1757 assert(strcmp(function->name, "main") == 0);
1758 assert(function->impl);
1759 ntq_emit_impl(c, function->impl);
1763 static const nir_shader_compiler_options nir_options = {
1764 .lower_extract_byte = true,
1765 .lower_extract_word = true,
1767 .lower_flrp32 = true,
1770 .lower_fsqrt = true,
1771 .lower_negate = true,
1775 count_nir_instrs_in_block(nir_block *block, void *state)
1777 int *count = (int *) state;
1778 nir_foreach_instr(instr, block) {
1779 *count = *count + 1;
1785 count_nir_instrs(nir_shader *nir)
1788 nir_foreach_function(function, nir) {
1789 if (!function->impl)
1791 nir_foreach_block_call(function->impl, count_nir_instrs_in_block, &count);
1796 static struct vc4_compile *
1797 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
1798 struct vc4_key *key)
1800 struct vc4_compile *c = qir_compile_init();
1803 c->shader_state = &key->shader_state->base;
1804 c->program_id = key->shader_state->program_id;
1805 c->variant_id = key->shader_state->compiled_variant_count++;
1810 c->fs_key = (struct vc4_fs_key *)key;
1811 if (c->fs_key->is_points) {
1812 c->point_x = emit_fragment_varying(c, ~0, 0);
1813 c->point_y = emit_fragment_varying(c, ~0, 0);
1814 } else if (c->fs_key->is_lines) {
1815 c->line_x = emit_fragment_varying(c, ~0, 0);
1819 c->vs_key = (struct vc4_vs_key *)key;
1822 c->vs_key = (struct vc4_vs_key *)key;
1826 const struct tgsi_token *tokens = key->shader_state->base.tokens;
1828 if (vc4_debug & VC4_DEBUG_TGSI) {
1829 fprintf(stderr, "%s prog %d/%d TGSI:\n",
1830 qir_get_stage_name(c->stage),
1831 c->program_id, c->variant_id);
1832 tgsi_dump(tokens, 0);
1835 c->s = tgsi_to_nir(tokens, &nir_options);
1836 NIR_PASS_V(c->s, nir_opt_global_to_local);
1837 NIR_PASS_V(c->s, nir_convert_to_ssa);
1839 if (stage == QSTAGE_FRAG)
1840 NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
1842 struct nir_lower_tex_options tex_options = {
1843 /* We would need to implement txs, but we don't want the
1844 * int/float conversions
1846 .lower_rect = false,
1848 /* We want to use this, but we don't want to newton-raphson
1853 /* Apply swizzles to all samplers. */
1854 .swizzle_result = ~0,
1857 /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
1858 * The format swizzling applies before sRGB decode, and
1859 * ARB_texture_swizzle is the last thing before returning the sample.
1861 for (int i = 0; i < ARRAY_SIZE(key->tex); i++) {
1862 enum pipe_format format = c->key->tex[i].format;
1867 const uint8_t *format_swizzle = vc4_get_format_swizzle(format);
1869 for (int j = 0; j < 4; j++) {
1870 uint8_t arb_swiz = c->key->tex[i].swizzle[j];
1872 if (arb_swiz <= 3) {
1873 tex_options.swizzles[i][j] =
1874 format_swizzle[arb_swiz];
1876 tex_options.swizzles[i][j] = arb_swiz;
1879 /* If ARB_texture_swizzle is reading from the R, G, or
1880 * B channels of an sRGB texture, then we need to
1881 * apply sRGB decode to this channel at sample time.
1883 if (arb_swiz < 3 && util_format_is_srgb(format)) {
1884 c->tex_srgb_decode[i] |= (1 << j);
1890 NIR_PASS_V(c->s, nir_normalize_cubemap_coords);
1891 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
1893 if (c->fs_key && c->fs_key->light_twoside)
1894 NIR_PASS_V(c->s, nir_lower_two_sided_color);
1896 if (stage == QSTAGE_FRAG)
1897 NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
1899 NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables);
1901 NIR_PASS_V(c->s, vc4_nir_lower_io, c);
1902 NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
1903 NIR_PASS_V(c->s, nir_lower_idiv);
1904 NIR_PASS_V(c->s, nir_lower_load_const_to_scalar);
1906 vc4_optimize_nir(c->s);
1908 NIR_PASS_V(c->s, nir_remove_dead_variables, nir_var_local);
1909 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
1911 if (vc4_debug & VC4_DEBUG_SHADERDB) {
1912 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
1913 qir_get_stage_name(c->stage),
1914 c->program_id, c->variant_id,
1915 count_nir_instrs(c->s));
1918 if (vc4_debug & VC4_DEBUG_NIR) {
1919 fprintf(stderr, "%s prog %d/%d NIR:\n",
1920 qir_get_stage_name(c->stage),
1921 c->program_id, c->variant_id);
1922 nir_print_shader(c->s, stderr);
1933 vc4->prog.fs->input_slots,
1934 vc4->prog.fs->num_inputs);
1941 if (vc4_debug & VC4_DEBUG_QIR) {
1942 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
1943 qir_get_stage_name(c->stage),
1944 c->program_id, c->variant_id);
1946 fprintf(stderr, "\n");
1950 qir_lower_uniforms(c);
1952 qir_schedule_instructions(c);
1954 if (vc4_debug & VC4_DEBUG_QIR) {
1955 fprintf(stderr, "%s prog %d/%d QIR:\n",
1956 qir_get_stage_name(c->stage),
1957 c->program_id, c->variant_id);
1959 fprintf(stderr, "\n");
1962 qir_reorder_uniforms(c);
1963 vc4_generate_code(vc4, c);
1965 if (vc4_debug & VC4_DEBUG_SHADERDB) {
1966 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
1967 qir_get_stage_name(c->stage),
1968 c->program_id, c->variant_id,
1970 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
1971 qir_get_stage_name(c->stage),
1972 c->program_id, c->variant_id,
1982 vc4_shader_state_create(struct pipe_context *pctx,
1983 const struct pipe_shader_state *cso)
1985 struct vc4_context *vc4 = vc4_context(pctx);
1986 struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
1990 so->base.tokens = tgsi_dup_tokens(cso->tokens);
1991 so->program_id = vc4->next_uncompiled_program_id++;
1997 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
1998 struct vc4_compile *c)
2000 int count = c->num_uniforms;
2001 struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
2003 uinfo->count = count;
2004 uinfo->data = ralloc_array(shader, uint32_t, count);
2005 memcpy(uinfo->data, c->uniform_data,
2006 count * sizeof(*uinfo->data));
2007 uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
2008 memcpy(uinfo->contents, c->uniform_contents,
2009 count * sizeof(*uinfo->contents));
2010 uinfo->num_texture_samples = c->num_texture_samples;
2012 vc4_set_shader_uniform_dirty_flags(shader);
2015 static struct vc4_compiled_shader *
2016 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
2017 struct vc4_key *key)
2019 struct hash_table *ht;
2021 if (stage == QSTAGE_FRAG) {
2023 key_size = sizeof(struct vc4_fs_key);
2026 key_size = sizeof(struct vc4_vs_key);
2029 struct vc4_compiled_shader *shader;
2030 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
2034 struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key);
2035 shader = rzalloc(NULL, struct vc4_compiled_shader);
2037 shader->program_id = vc4->next_compiled_program_id++;
2038 if (stage == QSTAGE_FRAG) {
2039 bool input_live[c->num_input_slots];
2041 memset(input_live, 0, sizeof(input_live));
2042 list_for_each_entry(struct qinst, inst, &c->instructions, link) {
2043 for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
2044 if (inst->src[i].file == QFILE_VARY)
2045 input_live[inst->src[i].index] = true;
2049 shader->input_slots = ralloc_array(shader,
2050 struct vc4_varying_slot,
2051 c->num_input_slots);
2053 for (int i = 0; i < c->num_input_slots; i++) {
2054 struct vc4_varying_slot *slot = &c->input_slots[i];
2059 /* Skip non-VS-output inputs. */
2060 if (slot->slot == (uint8_t)~0)
2063 if (slot->slot == VARYING_SLOT_COL0 ||
2064 slot->slot == VARYING_SLOT_COL1 ||
2065 slot->slot == VARYING_SLOT_BFC0 ||
2066 slot->slot == VARYING_SLOT_BFC1) {
2067 shader->color_inputs |= (1 << shader->num_inputs);
2070 shader->input_slots[shader->num_inputs] = *slot;
2071 shader->num_inputs++;
2074 shader->num_inputs = c->num_inputs;
2076 shader->vattr_offsets[0] = 0;
2077 for (int i = 0; i < 8; i++) {
2078 shader->vattr_offsets[i + 1] =
2079 shader->vattr_offsets[i] + c->vattr_sizes[i];
2081 if (c->vattr_sizes[i])
2082 shader->vattrs_live |= (1 << i);
2086 copy_uniform_state_to_shader(shader, c);
2087 shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
2088 c->qpu_inst_count * sizeof(uint64_t));
2090 /* Copy the compiler UBO range state to the compiled shader, dropping
2091 * out arrays that were never referenced by an indirect load.
2093 * (Note that QIR dead code elimination of an array access still
2094 * leaves that array alive, though)
2096 if (c->num_ubo_ranges) {
2097 shader->num_ubo_ranges = c->num_ubo_ranges;
2098 shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range,
2101 for (int i = 0; i < c->num_uniform_ranges; i++) {
2102 struct vc4_compiler_ubo_range *range =
2107 shader->ubo_ranges[j].dst_offset = range->dst_offset;
2108 shader->ubo_ranges[j].src_offset = range->src_offset;
2109 shader->ubo_ranges[j].size = range->size;
2110 shader->ubo_size += c->ubo_ranges[i].size;
2114 if (shader->ubo_size) {
2115 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2116 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2117 qir_get_stage_name(c->stage),
2118 c->program_id, c->variant_id,
2119 shader->ubo_size / 4);
2123 qir_compile_destroy(c);
2125 struct vc4_key *dup_key;
2126 dup_key = ralloc_size(shader, key_size);
2127 memcpy(dup_key, key, key_size);
2128 _mesa_hash_table_insert(ht, dup_key, shader);
2134 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2135 struct vc4_texture_stateobj *texstate)
2137 for (int i = 0; i < texstate->num_textures; i++) {
2138 struct pipe_sampler_view *sampler = texstate->textures[i];
2139 struct pipe_sampler_state *sampler_state =
2140 texstate->samplers[i];
2145 key->tex[i].format = sampler->format;
2146 key->tex[i].swizzle[0] = sampler->swizzle_r;
2147 key->tex[i].swizzle[1] = sampler->swizzle_g;
2148 key->tex[i].swizzle[2] = sampler->swizzle_b;
2149 key->tex[i].swizzle[3] = sampler->swizzle_a;
2151 if (sampler->texture->nr_samples > 1) {
2152 key->tex[i].msaa_width = sampler->texture->width0;
2153 key->tex[i].msaa_height = sampler->texture->height0;
2154 } else if (sampler){
2155 key->tex[i].compare_mode = sampler_state->compare_mode;
2156 key->tex[i].compare_func = sampler_state->compare_func;
2157 key->tex[i].wrap_s = sampler_state->wrap_s;
2158 key->tex[i].wrap_t = sampler_state->wrap_t;
2162 key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2166 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2168 struct vc4_fs_key local_key;
2169 struct vc4_fs_key *key = &local_key;
2171 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2173 VC4_DIRTY_FRAMEBUFFER |
2175 VC4_DIRTY_RASTERIZER |
2176 VC4_DIRTY_SAMPLE_MASK |
2178 VC4_DIRTY_TEXSTATE |
2179 VC4_DIRTY_UNCOMPILED_FS))) {
2183 memset(key, 0, sizeof(*key));
2184 vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2185 key->base.shader_state = vc4->prog.bind_fs;
2186 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2187 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2188 prim_mode <= PIPE_PRIM_LINE_STRIP);
2189 key->blend = vc4->blend->rt[0];
2190 if (vc4->blend->logicop_enable) {
2191 key->logicop_func = vc4->blend->logicop_func;
2193 key->logicop_func = PIPE_LOGICOP_COPY;
2196 key->msaa = vc4->rasterizer->base.multisample;
2197 key->sample_coverage = (vc4->rasterizer->base.multisample &&
2198 vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
2199 key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
2200 key->sample_alpha_to_one = vc4->blend->alpha_to_one;
2203 if (vc4->framebuffer.cbufs[0])
2204 key->color_format = vc4->framebuffer.cbufs[0]->format;
2206 key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2207 key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2208 key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2209 key->depth_enabled = (vc4->zsa->base.depth.enabled ||
2210 key->stencil_enabled);
2211 if (vc4->zsa->base.alpha.enabled) {
2212 key->alpha_test = true;
2213 key->alpha_test_func = vc4->zsa->base.alpha.func;
2216 if (key->is_points) {
2217 key->point_sprite_mask =
2218 vc4->rasterizer->base.sprite_coord_enable;
2219 key->point_coord_upper_left =
2220 (vc4->rasterizer->base.sprite_coord_mode ==
2221 PIPE_SPRITE_COORD_UPPER_LEFT);
2224 key->light_twoside = vc4->rasterizer->base.light_twoside;
2226 struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2227 vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2228 if (vc4->prog.fs == old_fs)
2231 vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2232 if (vc4->rasterizer->base.flatshade &&
2233 old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
2234 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2239 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2241 struct vc4_vs_key local_key;
2242 struct vc4_vs_key *key = &local_key;
2244 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2245 VC4_DIRTY_RASTERIZER |
2247 VC4_DIRTY_TEXSTATE |
2248 VC4_DIRTY_VTXSTATE |
2249 VC4_DIRTY_UNCOMPILED_VS |
2250 VC4_DIRTY_COMPILED_FS))) {
2254 memset(key, 0, sizeof(*key));
2255 vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2256 key->base.shader_state = vc4->prog.bind_vs;
2257 key->compiled_fs_id = vc4->prog.fs->program_id;
2259 for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2260 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2262 key->per_vertex_point_size =
2263 (prim_mode == PIPE_PRIM_POINTS &&
2264 vc4->rasterizer->base.point_size_per_vertex);
2266 struct vc4_compiled_shader *vs =
2267 vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2268 if (vs != vc4->prog.vs) {
2270 vc4->dirty |= VC4_DIRTY_COMPILED_VS;
2273 key->is_coord = true;
2274 struct vc4_compiled_shader *cs =
2275 vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2276 if (cs != vc4->prog.cs) {
2278 vc4->dirty |= VC4_DIRTY_COMPILED_CS;
2283 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2285 vc4_update_compiled_fs(vc4, prim_mode);
2286 vc4_update_compiled_vs(vc4, prim_mode);
2290 fs_cache_hash(const void *key)
2292 return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2296 vs_cache_hash(const void *key)
2298 return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2302 fs_cache_compare(const void *key1, const void *key2)
2304 return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2308 vs_cache_compare(const void *key1, const void *key2)
2310 return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2314 delete_from_cache_if_matches(struct hash_table *ht,
2315 struct hash_entry *entry,
2316 struct vc4_uncompiled_shader *so)
2318 const struct vc4_key *key = entry->key;
2320 if (key->shader_state == so) {
2321 struct vc4_compiled_shader *shader = entry->data;
2322 _mesa_hash_table_remove(ht, entry);
2323 vc4_bo_unreference(&shader->bo);
2324 ralloc_free(shader);
2329 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2331 struct vc4_context *vc4 = vc4_context(pctx);
2332 struct vc4_uncompiled_shader *so = hwcso;
2334 struct hash_entry *entry;
2335 hash_table_foreach(vc4->fs_cache, entry)
2336 delete_from_cache_if_matches(vc4->fs_cache, entry, so);
2337 hash_table_foreach(vc4->vs_cache, entry)
2338 delete_from_cache_if_matches(vc4->vs_cache, entry, so);
2340 free((void *)so->base.tokens);
2345 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2347 struct vc4_context *vc4 = vc4_context(pctx);
2348 vc4->prog.bind_fs = hwcso;
2349 vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2353 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2355 struct vc4_context *vc4 = vc4_context(pctx);
2356 vc4->prog.bind_vs = hwcso;
2357 vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2361 vc4_program_init(struct pipe_context *pctx)
2363 struct vc4_context *vc4 = vc4_context(pctx);
2365 pctx->create_vs_state = vc4_shader_state_create;
2366 pctx->delete_vs_state = vc4_shader_state_delete;
2368 pctx->create_fs_state = vc4_shader_state_create;
2369 pctx->delete_fs_state = vc4_shader_state_delete;
2371 pctx->bind_fs_state = vc4_fp_state_bind;
2372 pctx->bind_vs_state = vc4_vp_state_bind;
2374 vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2376 vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2381 vc4_program_fini(struct pipe_context *pctx)
2383 struct vc4_context *vc4 = vc4_context(pctx);
2385 struct hash_entry *entry;
2386 hash_table_foreach(vc4->fs_cache, entry) {
2387 struct vc4_compiled_shader *shader = entry->data;
2388 vc4_bo_unreference(&shader->bo);
2389 ralloc_free(shader);
2390 _mesa_hash_table_remove(vc4->fs_cache, entry);
2393 hash_table_foreach(vc4->vs_cache, entry) {
2394 struct vc4_compiled_shader *shader = entry->data;
2395 vc4_bo_unreference(&shader->bo);
2396 ralloc_free(shader);
2397 _mesa_hash_table_remove(vc4->vs_cache, entry);