2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_format.h"
27 #include "util/u_hash.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_parse.h"
34 #include "compiler/nir/nir.h"
35 #include "compiler/nir/nir_builder.h"
36 #include "nir/tgsi_to_nir.h"
37 #include "vc4_context.h"
40 #ifdef USE_VC4_SIMULATOR
41 #include "simpenrose/simpenrose.h"
45 ntq_get_src(struct vc4_compile *c, nir_src src, int i);
48 resize_qreg_array(struct vc4_compile *c,
53 if (*size >= decl_size)
56 uint32_t old_size = *size;
57 *size = MAX2(*size * 2, decl_size);
58 *regs = reralloc(c, *regs, struct qreg, *size);
60 fprintf(stderr, "Malloc failure\n");
64 for (uint32_t i = old_size; i < *size; i++)
65 (*regs)[i] = c->undef;
69 indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
71 struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
72 uint32_t offset = intr->const_index[0];
73 struct vc4_compiler_ubo_range *range = NULL;
75 for (i = 0; i < c->num_uniform_ranges; i++) {
76 range = &c->ubo_ranges[i];
77 if (offset >= range->src_offset &&
78 offset < range->src_offset + range->size) {
82 /* The driver-location-based offset always has to be within a declared
88 range->dst_offset = c->next_ubo_dst_offset;
89 c->next_ubo_dst_offset += range->size;
93 offset -= range->src_offset;
95 /* Adjust for where we stored the TGSI register base. */
96 indirect_offset = qir_ADD(c, indirect_offset,
97 qir_uniform_ui(c, (range->dst_offset +
100 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
101 indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
102 indirect_offset = qir_MIN(c, indirect_offset,
103 qir_uniform_ui(c, (range->dst_offset +
106 qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
107 c->num_texture_samples++;
108 return qir_TEX_RESULT(c);
111 nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b,
112 enum quniform_contents contents)
114 nir_intrinsic_instr *intr =
115 nir_intrinsic_instr_create(b->shader,
116 nir_intrinsic_load_uniform);
117 intr->const_index[0] = (VC4_NIR_STATE_UNIFORM_OFFSET + contents) * 4;
118 intr->num_components = 1;
119 intr->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
120 nir_ssa_dest_init(&intr->instr, &intr->dest, 1, 32, NULL);
121 nir_builder_instr_insert(b, &intr->instr);
122 return &intr->dest.ssa;
126 vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
130 case UTIL_FORMAT_SWIZZLE_NONE:
131 fprintf(stderr, "warning: unknown swizzle\n");
133 case UTIL_FORMAT_SWIZZLE_0:
134 return nir_imm_float(b, 0.0);
135 case UTIL_FORMAT_SWIZZLE_1:
136 return nir_imm_float(b, 1.0);
137 case UTIL_FORMAT_SWIZZLE_X:
138 case UTIL_FORMAT_SWIZZLE_Y:
139 case UTIL_FORMAT_SWIZZLE_Z:
140 case UTIL_FORMAT_SWIZZLE_W:
146 ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
148 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
149 def->num_components);
150 _mesa_hash_table_insert(c->def_ht, def, qregs);
155 ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
158 struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
159 for (int i = 0; i < dest->ssa.num_components; i++)
163 nir_register *reg = dest->reg.reg;
164 assert(dest->reg.base_offset == 0);
165 assert(reg->num_array_elems == 0);
166 struct hash_entry *entry =
167 _mesa_hash_table_search(c->def_ht, reg);
173 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
175 struct hash_entry *entry;
177 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
178 assert(i < src.ssa->num_components);
180 nir_register *reg = src.reg.reg;
181 entry = _mesa_hash_table_search(c->def_ht, reg);
182 assert(reg->num_array_elems == 0);
183 assert(src.reg.base_offset == 0);
184 assert(i < reg->num_components);
187 struct qreg *qregs = entry->data;
192 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
195 assert(util_is_power_of_two(instr->dest.write_mask));
196 unsigned chan = ffs(instr->dest.write_mask) - 1;
197 struct qreg r = ntq_get_src(c, instr->src[src].src,
198 instr->src[src].swizzle[chan]);
200 assert(!instr->src[src].abs);
201 assert(!instr->src[src].negate);
206 static inline struct qreg
207 qir_SAT(struct vc4_compile *c, struct qreg val)
210 qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
211 qir_uniform_f(c, 0.0));
215 ntq_rcp(struct vc4_compile *c, struct qreg x)
217 struct qreg r = qir_RCP(c, x);
219 /* Apply a Newton-Raphson step to improve the accuracy. */
220 r = qir_FMUL(c, r, qir_FSUB(c,
221 qir_uniform_f(c, 2.0),
228 ntq_rsq(struct vc4_compile *c, struct qreg x)
230 struct qreg r = qir_RSQ(c, x);
232 /* Apply a Newton-Raphson step to improve the accuracy. */
233 r = qir_FMUL(c, r, qir_FSUB(c,
234 qir_uniform_f(c, 1.5),
236 qir_uniform_f(c, 0.5),
238 qir_FMUL(c, r, r)))));
244 qir_srgb_decode(struct vc4_compile *c, struct qreg srgb)
246 struct qreg low = qir_FMUL(c, srgb, qir_uniform_f(c, 1.0 / 12.92));
247 struct qreg high = qir_POW(c,
251 qir_uniform_f(c, 0.055)),
252 qir_uniform_f(c, 1.0 / 1.055)),
253 qir_uniform_f(c, 2.4));
255 qir_SF(c, qir_FSUB(c, srgb, qir_uniform_f(c, 0.04045)));
256 return qir_SEL(c, QPU_COND_NS, low, high);
260 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
262 struct qreg src0_hi = qir_SHR(c, src0,
263 qir_uniform_ui(c, 24));
264 struct qreg src1_hi = qir_SHR(c, src1,
265 qir_uniform_ui(c, 24));
267 struct qreg hilo = qir_MUL24(c, src0_hi, src1);
268 struct qreg lohi = qir_MUL24(c, src0, src1_hi);
269 struct qreg lolo = qir_MUL24(c, src0, src1);
271 return qir_ADD(c, lolo, qir_SHL(c,
272 qir_ADD(c, hilo, lohi),
273 qir_uniform_ui(c, 24)));
277 ntq_scale_depth_texture(struct vc4_compile *c, struct qreg src)
279 struct qreg depthf = qir_ITOF(c, qir_SHR(c, src,
280 qir_uniform_ui(c, 8)));
281 return qir_FMUL(c, depthf, qir_uniform_f(c, 1.0f/0xffffff));
285 * Emits a lowered TXF_MS from an MSAA texture.
287 * The addressing math has been lowered in NIR, and now we just need to read
291 ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr)
293 uint32_t tile_width = 32;
294 uint32_t tile_height = 32;
295 uint32_t tile_size = (tile_height * tile_width *
296 VC4_MAX_SAMPLES * sizeof(uint32_t));
298 unsigned unit = instr->texture_index;
299 uint32_t w = align(c->key->tex[unit].msaa_width, tile_width);
300 uint32_t w_tiles = w / tile_width;
301 uint32_t h = align(c->key->tex[unit].msaa_height, tile_height);
302 uint32_t h_tiles = h / tile_height;
303 uint32_t size = w_tiles * h_tiles * tile_size;
306 assert(instr->num_srcs == 1);
307 assert(instr->src[0].src_type == nir_tex_src_coord);
308 addr = ntq_get_src(c, instr->src[0].src, 0);
310 /* Perform the clamping required by kernel validation. */
311 addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
312 addr = qir_MIN(c, addr, qir_uniform_ui(c, size - 4));
314 qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
316 struct qreg tex = qir_TEX_RESULT(c);
317 c->num_texture_samples++;
319 struct qreg *dest = ntq_get_dest(c, &instr->dest);
320 enum pipe_format format = c->key->tex[unit].format;
321 if (util_format_is_depth_or_stencil(format)) {
322 struct qreg scaled = ntq_scale_depth_texture(c, tex);
323 for (int i = 0; i < 4; i++)
326 for (int i = 0; i < 4; i++)
327 dest[i] = qir_UNPACK_8_F(c, tex, i);
330 for (int i = 0; i < 4; i++) {
331 if (c->tex_srgb_decode[unit] & (1 << i))
332 dest[i] = qir_srgb_decode(c, dest[i]);
337 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
339 struct qreg s, t, r, lod, proj, compare;
340 bool is_txb = false, is_txl = false, has_proj = false;
341 unsigned unit = instr->texture_index;
343 if (instr->op == nir_texop_txf) {
344 ntq_emit_txf(c, instr);
348 for (unsigned i = 0; i < instr->num_srcs; i++) {
349 switch (instr->src[i].src_type) {
350 case nir_tex_src_coord:
351 s = ntq_get_src(c, instr->src[i].src, 0);
352 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
353 t = qir_uniform_f(c, 0.5);
355 t = ntq_get_src(c, instr->src[i].src, 1);
356 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
357 r = ntq_get_src(c, instr->src[i].src, 2);
359 case nir_tex_src_bias:
360 lod = ntq_get_src(c, instr->src[i].src, 0);
363 case nir_tex_src_lod:
364 lod = ntq_get_src(c, instr->src[i].src, 0);
367 case nir_tex_src_comparitor:
368 compare = ntq_get_src(c, instr->src[i].src, 0);
370 case nir_tex_src_projector:
371 proj = qir_RCP(c, ntq_get_src(c, instr->src[i].src, 0));
372 s = qir_FMUL(c, s, proj);
373 t = qir_FMUL(c, t, proj);
377 unreachable("unknown texture source");
381 struct qreg texture_u[] = {
382 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
383 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
384 qir_uniform(c, QUNIFORM_CONSTANT, 0),
385 qir_uniform(c, QUNIFORM_CONSTANT, 0),
387 uint32_t next_texture_u = 0;
389 /* There is no native support for GL texture rectangle coordinates, so
390 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
393 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
395 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
397 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
400 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
401 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
402 unit | (is_txl << 16));
405 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
406 struct qreg ma = qir_FMAXABS(c, qir_FMAXABS(c, s, t), r);
407 struct qreg rcp_ma = qir_RCP(c, ma);
408 s = qir_FMUL(c, s, rcp_ma);
409 t = qir_FMUL(c, t, rcp_ma);
410 r = qir_FMUL(c, r, rcp_ma);
412 qir_TEX_R(c, r, texture_u[next_texture_u++]);
413 } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
414 c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
415 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
416 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
417 qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit),
418 texture_u[next_texture_u++]);
421 if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
425 if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
429 qir_TEX_T(c, t, texture_u[next_texture_u++]);
431 if (is_txl || is_txb)
432 qir_TEX_B(c, lod, texture_u[next_texture_u++]);
434 qir_TEX_S(c, s, texture_u[next_texture_u++]);
436 c->num_texture_samples++;
437 struct qreg tex = qir_TEX_RESULT(c);
439 enum pipe_format format = c->key->tex[unit].format;
441 struct qreg *dest = ntq_get_dest(c, &instr->dest);
442 if (util_format_is_depth_or_stencil(format)) {
443 struct qreg normalized = ntq_scale_depth_texture(c, tex);
444 struct qreg depth_output;
446 struct qreg u0 = qir_uniform_f(c, 0.0f);
447 struct qreg u1 = qir_uniform_f(c, 1.0f);
448 if (c->key->tex[unit].compare_mode) {
450 compare = qir_FMUL(c, compare, proj);
452 switch (c->key->tex[unit].compare_func) {
453 case PIPE_FUNC_NEVER:
454 depth_output = qir_uniform_f(c, 0.0f);
456 case PIPE_FUNC_ALWAYS:
459 case PIPE_FUNC_EQUAL:
460 qir_SF(c, qir_FSUB(c, compare, normalized));
461 depth_output = qir_SEL(c, QPU_COND_ZS, u1, u0);
463 case PIPE_FUNC_NOTEQUAL:
464 qir_SF(c, qir_FSUB(c, compare, normalized));
465 depth_output = qir_SEL(c, QPU_COND_ZC, u1, u0);
467 case PIPE_FUNC_GREATER:
468 qir_SF(c, qir_FSUB(c, compare, normalized));
469 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
471 case PIPE_FUNC_GEQUAL:
472 qir_SF(c, qir_FSUB(c, normalized, compare));
473 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
476 qir_SF(c, qir_FSUB(c, compare, normalized));
477 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
479 case PIPE_FUNC_LEQUAL:
480 qir_SF(c, qir_FSUB(c, normalized, compare));
481 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
485 depth_output = normalized;
488 for (int i = 0; i < 4; i++)
489 dest[i] = depth_output;
491 for (int i = 0; i < 4; i++)
492 dest[i] = qir_UNPACK_8_F(c, tex, i);
495 for (int i = 0; i < 4; i++) {
496 if (c->tex_srgb_decode[unit] & (1 << i))
497 dest[i] = qir_srgb_decode(c, dest[i]);
502 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
506 ntq_ffract(struct vc4_compile *c, struct qreg src)
508 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
509 struct qreg diff = qir_FSUB(c, src, trunc);
511 return qir_SEL(c, QPU_COND_NS,
512 qir_FADD(c, diff, qir_uniform_f(c, 1.0)), diff);
516 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
520 ntq_ffloor(struct vc4_compile *c, struct qreg src)
522 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
524 /* This will be < 0 if we truncated and the truncation was of a value
525 * that was < 0 in the first place.
527 qir_SF(c, qir_FSUB(c, src, trunc));
529 return qir_SEL(c, QPU_COND_NS,
530 qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)), trunc);
534 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
538 ntq_fceil(struct vc4_compile *c, struct qreg src)
540 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
542 /* This will be < 0 if we truncated and the truncation was of a value
543 * that was > 0 in the first place.
545 qir_SF(c, qir_FSUB(c, trunc, src));
547 return qir_SEL(c, QPU_COND_NS,
548 qir_FADD(c, trunc, qir_uniform_f(c, 1.0)), trunc);
552 ntq_fsin(struct vc4_compile *c, struct qreg src)
556 pow(2.0 * M_PI, 3) / (3 * 2 * 1),
557 -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
558 pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
559 -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
562 struct qreg scaled_x =
565 qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
567 struct qreg x = qir_FADD(c,
568 ntq_ffract(c, scaled_x),
569 qir_uniform_f(c, -0.5));
570 struct qreg x2 = qir_FMUL(c, x, x);
571 struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
572 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
573 x = qir_FMUL(c, x, x2);
578 qir_uniform_f(c, coeff[i])));
584 ntq_fcos(struct vc4_compile *c, struct qreg src)
588 pow(2.0 * M_PI, 2) / (2 * 1),
589 -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
590 pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
591 -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
592 pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
595 struct qreg scaled_x =
597 qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
598 struct qreg x_frac = qir_FADD(c,
599 ntq_ffract(c, scaled_x),
600 qir_uniform_f(c, -0.5));
602 struct qreg sum = qir_uniform_f(c, coeff[0]);
603 struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
604 struct qreg x = x2; /* Current x^2, x^4, or x^6 */
605 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
607 x = qir_FMUL(c, x, x2);
609 struct qreg mul = qir_FMUL(c,
611 qir_uniform_f(c, coeff[i]));
615 sum = qir_FADD(c, sum, mul);
621 ntq_fsign(struct vc4_compile *c, struct qreg src)
623 struct qreg t = qir_get_temp(c);
626 qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
627 qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
628 qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
633 emit_vertex_input(struct vc4_compile *c, int attr)
635 enum pipe_format format = c->vs_key->attr_formats[attr];
636 uint32_t attr_size = util_format_get_blocksize(format);
638 c->vattr_sizes[attr] = align(attr_size, 4);
639 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
640 c->inputs[attr * 4 + i] =
641 qir_MOV(c, qir_reg(QFILE_VPM, attr * 4 + i));
647 emit_fragcoord_input(struct vc4_compile *c, int attr)
649 c->inputs[attr * 4 + 0] = qir_FRAG_X(c);
650 c->inputs[attr * 4 + 1] = qir_FRAG_Y(c);
651 c->inputs[attr * 4 + 2] =
653 qir_ITOF(c, qir_FRAG_Z(c)),
654 qir_uniform_f(c, 1.0 / 0xffffff));
655 c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
659 emit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
662 uint32_t i = c->num_input_slots++;
668 if (c->num_input_slots >= c->input_slots_array_size) {
669 c->input_slots_array_size =
670 MAX2(4, c->input_slots_array_size * 2);
672 c->input_slots = reralloc(c, c->input_slots,
673 struct vc4_varying_slot,
674 c->input_slots_array_size);
677 c->input_slots[i].slot = slot;
678 c->input_slots[i].swizzle = swizzle;
680 return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
684 emit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
686 for (int i = 0; i < 4; i++) {
687 c->inputs[attr * 4 + i] =
688 emit_fragment_varying(c, slot, i);
694 add_output(struct vc4_compile *c,
695 uint32_t decl_offset,
699 uint32_t old_array_size = c->outputs_array_size;
700 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
703 if (old_array_size != c->outputs_array_size) {
704 c->output_slots = reralloc(c,
706 struct vc4_varying_slot,
707 c->outputs_array_size);
710 c->output_slots[decl_offset].slot = slot;
711 c->output_slots[decl_offset].swizzle = swizzle;
715 declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size)
717 unsigned array_id = c->num_uniform_ranges++;
718 if (array_id >= c->ubo_ranges_array_size) {
719 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
721 c->ubo_ranges = reralloc(c, c->ubo_ranges,
722 struct vc4_compiler_ubo_range,
723 c->ubo_ranges_array_size);
726 c->ubo_ranges[array_id].dst_offset = 0;
727 c->ubo_ranges[array_id].src_offset = start;
728 c->ubo_ranges[array_id].size = size;
729 c->ubo_ranges[array_id].used = false;
733 ntq_src_is_only_ssa_def_user(nir_src *src)
738 if (!list_empty(&src->ssa->if_uses))
741 return (src->ssa->uses.next == &src->use_link &&
742 src->ssa->uses.next->next == &src->ssa->uses);
746 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
749 * However, as an optimization, it tries to find the instructions generating
750 * the sources to be packed and just emit the pack flag there, if possible.
753 ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
755 struct qreg result = qir_get_temp(c);
756 struct nir_alu_instr *vec4 = NULL;
758 /* If packing from a vec4 op (as expected), identify it so that we can
759 * peek back at what generated its sources.
761 if (instr->src[0].src.is_ssa &&
762 instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
763 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
765 vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
768 /* If the pack is replicating the same channel 4 times, use the 8888
769 * pack flag. This is common for blending using the alpha
772 if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
773 instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
774 instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
775 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
776 *dest = qir_PACK_8888_F(c,
777 ntq_get_src(c, instr->src[0].src,
778 instr->src[0].swizzle[0]));
782 for (int i = 0; i < 4; i++) {
783 int swiz = instr->src[0].swizzle[i];
786 src = ntq_get_src(c, vec4->src[swiz].src,
787 vec4->src[swiz].swizzle[0]);
789 src = ntq_get_src(c, instr->src[0].src, swiz);
793 ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
794 src.file == QFILE_TEMP &&
795 c->defs[src.index] &&
796 qir_is_mul(c->defs[src.index]) &&
797 !c->defs[src.index]->dst.pack) {
798 struct qinst *rewrite = c->defs[src.index];
799 c->defs[src.index] = NULL;
800 rewrite->dst = result;
801 rewrite->dst.pack = QPU_PACK_MUL_8A + i;
805 qir_PACK_8_F(c, result, src, i);
808 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
812 /** Handles sign-extended bitfield extracts for 16 bits. */
814 ntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
817 assert(bits.file == QFILE_UNIF &&
818 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
819 c->uniform_data[bits.index] == 16);
821 assert(offset.file == QFILE_UNIF &&
822 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
823 int offset_bit = c->uniform_data[offset.index];
824 assert(offset_bit % 16 == 0);
826 return qir_UNPACK_16_I(c, base, offset_bit / 16);
829 /** Handles unsigned bitfield extracts for 8 bits. */
831 ntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
834 assert(bits.file == QFILE_UNIF &&
835 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
836 c->uniform_data[bits.index] == 8);
838 assert(offset.file == QFILE_UNIF &&
839 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
840 int offset_bit = c->uniform_data[offset.index];
841 assert(offset_bit % 8 == 0);
843 return qir_UNPACK_8_I(c, base, offset_bit / 8);
847 * If compare_instr is a valid comparison instruction, emits the
848 * compare_instr's comparison and returns the sel_instr's return value based
849 * on the compare_instr's result.
852 ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
853 nir_alu_instr *compare_instr,
854 nir_alu_instr *sel_instr)
858 switch (compare_instr->op) {
884 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
885 struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
887 unsigned unsized_type =
888 nir_alu_type_get_base_type(nir_op_infos[compare_instr->op].input_types[0]);
889 if (unsized_type == nir_type_float)
890 qir_SF(c, qir_FSUB(c, src0, src1));
892 qir_SF(c, qir_SUB(c, src0, src1));
894 switch (sel_instr->op) {
899 *dest = qir_SEL(c, cond,
900 qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
904 *dest = qir_SEL(c, cond,
905 ntq_get_alu_src(c, sel_instr, 1),
906 ntq_get_alu_src(c, sel_instr, 2));
910 *dest = qir_SEL(c, cond,
911 qir_uniform_ui(c, ~0), qir_uniform_ui(c, 0));
919 * Attempts to fold a comparison generating a boolean result into the
920 * condition code for selecting between two values, instead of comparing the
921 * boolean result against 0 to generate the condition code.
923 static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
926 if (!instr->src[0].src.is_ssa)
928 nir_alu_instr *compare =
929 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
934 if (ntq_emit_comparison(c, &dest, compare, instr))
939 return qir_SEL(c, QPU_COND_NS, src[1], src[2]);
943 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
945 /* Vectors are special in that they have non-scalarized writemasks,
946 * and just take the first swizzle channel for each argument in order
947 * into each writemask channel.
949 if (instr->op == nir_op_vec2 ||
950 instr->op == nir_op_vec3 ||
951 instr->op == nir_op_vec4) {
953 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
954 srcs[i] = ntq_get_src(c, instr->src[i].src,
955 instr->src[i].swizzle[0]);
956 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
957 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
962 if (instr->op == nir_op_pack_unorm_4x8) {
963 ntq_emit_pack_unorm_4x8(c, instr);
967 if (instr->op == nir_op_unpack_unorm_4x8) {
968 struct qreg src = ntq_get_src(c, instr->src[0].src,
969 instr->src[0].swizzle[0]);
970 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
971 for (int i = 0; i < 4; i++) {
972 if (instr->dest.write_mask & (1 << i))
973 dest[i] = qir_UNPACK_8_F(c, src, i);
978 /* General case: We can just grab the one used channel per src. */
979 struct qreg src[nir_op_infos[instr->op].num_inputs];
980 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
981 src[i] = ntq_get_alu_src(c, instr, i);
984 /* Pick the channel to store the output in. */
985 assert(!instr->dest.saturate);
986 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
987 assert(util_is_power_of_two(instr->dest.write_mask));
988 dest += ffs(instr->dest.write_mask) - 1;
993 *dest = qir_MOV(c, src[0]);
996 *dest = qir_FMUL(c, src[0], src[1]);
999 *dest = qir_FADD(c, src[0], src[1]);
1002 *dest = qir_FSUB(c, src[0], src[1]);
1005 *dest = qir_FMIN(c, src[0], src[1]);
1008 *dest = qir_FMAX(c, src[0], src[1]);
1013 *dest = qir_FTOI(c, src[0]);
1017 *dest = qir_ITOF(c, src[0]);
1020 *dest = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
1023 *dest = qir_AND(c, src[0], qir_uniform_ui(c, 1));
1028 *dest = qir_SEL(c, QPU_COND_ZC,
1029 qir_uniform_ui(c, ~0),
1030 qir_uniform_ui(c, 0));
1034 *dest = qir_ADD(c, src[0], src[1]);
1037 *dest = qir_SHR(c, src[0], src[1]);
1040 *dest = qir_SUB(c, src[0], src[1]);
1043 *dest = qir_ASR(c, src[0], src[1]);
1046 *dest = qir_SHL(c, src[0], src[1]);
1049 *dest = qir_MIN(c, src[0], src[1]);
1052 *dest = qir_MAX(c, src[0], src[1]);
1055 *dest = qir_AND(c, src[0], src[1]);
1058 *dest = qir_OR(c, src[0], src[1]);
1061 *dest = qir_XOR(c, src[0], src[1]);
1064 *dest = qir_NOT(c, src[0]);
1068 *dest = ntq_umul(c, src[0], src[1]);
1084 if (!ntq_emit_comparison(c, dest, instr, instr)) {
1085 fprintf(stderr, "Bad comparison instruction\n");
1090 *dest = ntq_emit_bcsel(c, instr, src);
1094 *dest = qir_SEL(c, QPU_COND_ZC, src[1], src[2]);
1098 *dest = ntq_rcp(c, src[0]);
1101 *dest = ntq_rsq(c, src[0]);
1104 *dest = qir_EXP2(c, src[0]);
1107 *dest = qir_LOG2(c, src[0]);
1111 *dest = qir_ITOF(c, qir_FTOI(c, src[0]));
1114 *dest = ntq_fceil(c, src[0]);
1117 *dest = ntq_ffract(c, src[0]);
1120 *dest = ntq_ffloor(c, src[0]);
1124 *dest = ntq_fsin(c, src[0]);
1127 *dest = ntq_fcos(c, src[0]);
1131 *dest = ntq_fsign(c, src[0]);
1135 *dest = qir_FMAXABS(c, src[0], src[0]);
1138 *dest = qir_MAX(c, src[0],
1139 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1142 case nir_op_ibitfield_extract:
1143 *dest = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1146 case nir_op_ubitfield_extract:
1147 *dest = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1150 case nir_op_usadd_4x8:
1151 *dest = qir_V8ADDS(c, src[0], src[1]);
1154 case nir_op_ussub_4x8:
1155 *dest = qir_V8SUBS(c, src[0], src[1]);
1158 case nir_op_umin_4x8:
1159 *dest = qir_V8MIN(c, src[0], src[1]);
1162 case nir_op_umax_4x8:
1163 *dest = qir_V8MAX(c, src[0], src[1]);
1166 case nir_op_umul_unorm_4x8:
1167 *dest = qir_V8MULD(c, src[0], src[1]);
1171 fprintf(stderr, "unknown NIR ALU inst: ");
1172 nir_print_instr(&instr->instr, stderr);
1173 fprintf(stderr, "\n");
1179 emit_frag_end(struct vc4_compile *c)
1182 if (c->output_color_index != -1) {
1183 color = c->outputs[c->output_color_index];
1185 color = qir_uniform_ui(c, 0);
1188 uint32_t discard_cond = QPU_COND_ALWAYS;
1189 if (c->discard.file != QFILE_NULL) {
1190 qir_SF(c, c->discard);
1191 discard_cond = QPU_COND_ZS;
1194 if (c->fs_key->stencil_enabled) {
1195 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 0));
1196 if (c->fs_key->stencil_twoside) {
1197 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 1));
1199 if (c->fs_key->stencil_full_writemasks) {
1200 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 2));
1204 if (c->output_sample_mask_index != -1) {
1205 qir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1208 if (c->fs_key->depth_enabled) {
1210 if (c->output_position_index != -1) {
1211 z = qir_FTOI(c, qir_FMUL(c, c->outputs[c->output_position_index + 2],
1212 qir_uniform_f(c, 0xffffff)));
1216 struct qinst *inst = qir_TLB_Z_WRITE(c, z);
1217 inst->cond = discard_cond;
1220 if (!c->msaa_per_sample_output) {
1221 struct qinst *inst = qir_TLB_COLOR_WRITE(c, color);
1222 inst->cond = discard_cond;
1224 for (int i = 0; i < VC4_MAX_SAMPLES; i++) {
1225 struct qinst *inst = qir_TLB_COLOR_WRITE_MS(c, c->sample_colors[i]);
1226 inst->cond = discard_cond;
1232 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1234 struct qreg packed = qir_get_temp(c);
1236 for (int i = 0; i < 2; i++) {
1238 qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1240 struct qreg packed_chan = packed;
1241 packed_chan.pack = QPU_PACK_A_16A + i;
1243 qir_FTOI_dest(c, packed_chan,
1246 c->outputs[c->output_position_index + i],
1251 qir_VPM_WRITE(c, packed);
1255 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1257 struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1258 struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1260 qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1261 c->outputs[c->output_position_index + 2],
1268 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1270 qir_VPM_WRITE(c, rcp_w);
1274 emit_point_size_write(struct vc4_compile *c)
1276 struct qreg point_size;
1278 if (c->output_point_size_index != -1)
1279 point_size = c->outputs[c->output_point_size_index];
1281 point_size = qir_uniform_f(c, 1.0);
1283 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1286 point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
1288 qir_VPM_WRITE(c, point_size);
1292 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1294 * The simulator insists that there be at least one vertex attribute, so
1295 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1296 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1297 * to consume it here.
1300 emit_stub_vpm_read(struct vc4_compile *c)
1305 c->vattr_sizes[0] = 4;
1306 (void)qir_MOV(c, qir_reg(QFILE_VPM, 0));
1311 emit_vert_end(struct vc4_compile *c,
1312 struct vc4_varying_slot *fs_inputs,
1313 uint32_t num_fs_inputs)
1315 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1317 emit_stub_vpm_read(c);
1319 emit_scaled_viewport_write(c, rcp_w);
1320 emit_zs_write(c, rcp_w);
1321 emit_rcp_wc_write(c, rcp_w);
1322 if (c->vs_key->per_vertex_point_size)
1323 emit_point_size_write(c);
1325 for (int i = 0; i < num_fs_inputs; i++) {
1326 struct vc4_varying_slot *input = &fs_inputs[i];
1329 for (j = 0; j < c->num_outputs; j++) {
1330 struct vc4_varying_slot *output =
1331 &c->output_slots[j];
1333 if (input->slot == output->slot &&
1334 input->swizzle == output->swizzle) {
1335 qir_VPM_WRITE(c, c->outputs[j]);
1339 /* Emit padding if we didn't find a declared VS output for
1342 if (j == c->num_outputs)
1343 qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1348 emit_coord_end(struct vc4_compile *c)
1350 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1352 emit_stub_vpm_read(c);
1354 for (int i = 0; i < 4; i++)
1355 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1357 emit_scaled_viewport_write(c, rcp_w);
1358 emit_zs_write(c, rcp_w);
1359 emit_rcp_wc_write(c, rcp_w);
1360 if (c->vs_key->per_vertex_point_size)
1361 emit_point_size_write(c);
1365 vc4_optimize_nir(struct nir_shader *s)
1372 NIR_PASS_V(s, nir_lower_vars_to_ssa);
1373 NIR_PASS_V(s, nir_lower_alu_to_scalar);
1375 NIR_PASS(progress, s, nir_copy_prop);
1376 NIR_PASS(progress, s, nir_opt_dce);
1377 NIR_PASS(progress, s, nir_opt_cse);
1378 NIR_PASS(progress, s, nir_opt_peephole_select);
1379 NIR_PASS(progress, s, nir_opt_algebraic);
1380 NIR_PASS(progress, s, nir_opt_constant_folding);
1381 NIR_PASS(progress, s, nir_opt_undef);
1386 driver_location_compare(const void *in_a, const void *in_b)
1388 const nir_variable *const *a = in_a;
1389 const nir_variable *const *b = in_b;
1391 return (*a)->data.driver_location - (*b)->data.driver_location;
1395 ntq_setup_inputs(struct vc4_compile *c)
1397 unsigned num_entries = 0;
1398 nir_foreach_variable(var, &c->s->inputs)
1401 nir_variable *vars[num_entries];
1404 nir_foreach_variable(var, &c->s->inputs)
1407 /* Sort the variables so that we emit the input setup in
1408 * driver_location order. This is required for VPM reads, whose data
1409 * is fetched into the VPM in driver_location (TGSI register index)
1412 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1414 for (unsigned i = 0; i < num_entries; i++) {
1415 nir_variable *var = vars[i];
1416 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1417 unsigned loc = var->data.driver_location;
1419 assert(array_len == 1);
1421 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1424 if (c->stage == QSTAGE_FRAG) {
1425 if (var->data.location == VARYING_SLOT_POS) {
1426 emit_fragcoord_input(c, loc);
1427 } else if (var->data.location == VARYING_SLOT_FACE) {
1428 c->inputs[loc * 4 + 0] = qir_FRAG_REV_FLAG(c);
1429 } else if (var->data.location >= VARYING_SLOT_VAR0 &&
1430 (c->fs_key->point_sprite_mask &
1431 (1 << (var->data.location -
1432 VARYING_SLOT_VAR0)))) {
1433 c->inputs[loc * 4 + 0] = c->point_x;
1434 c->inputs[loc * 4 + 1] = c->point_y;
1436 emit_fragment_input(c, loc, var->data.location);
1439 emit_vertex_input(c, loc);
1445 ntq_setup_outputs(struct vc4_compile *c)
1447 nir_foreach_variable(var, &c->s->outputs) {
1448 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1449 unsigned loc = var->data.driver_location * 4;
1451 assert(array_len == 1);
1454 for (int i = 0; i < 4; i++)
1455 add_output(c, loc + i, var->data.location, i);
1457 if (c->stage == QSTAGE_FRAG) {
1458 switch (var->data.location) {
1459 case FRAG_RESULT_COLOR:
1460 case FRAG_RESULT_DATA0:
1461 c->output_color_index = loc;
1463 case FRAG_RESULT_DEPTH:
1464 c->output_position_index = loc;
1466 case FRAG_RESULT_SAMPLE_MASK:
1467 c->output_sample_mask_index = loc;
1471 switch (var->data.location) {
1472 case VARYING_SLOT_POS:
1473 c->output_position_index = loc;
1475 case VARYING_SLOT_PSIZ:
1476 c->output_point_size_index = loc;
1484 ntq_setup_uniforms(struct vc4_compile *c)
1486 nir_foreach_variable(var, &c->s->uniforms) {
1487 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1488 unsigned array_elem_size = 4 * sizeof(float);
1490 declare_uniform_range(c, var->data.driver_location * array_elem_size,
1491 array_len * array_elem_size);
1497 * Sets up the mapping from nir_register to struct qreg *.
1499 * Each nir_register gets a struct qreg per 32-bit component being stored.
1502 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1504 foreach_list_typed(nir_register, nir_reg, node, list) {
1505 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1506 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1508 nir_reg->num_components);
1510 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1512 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1513 qregs[i] = qir_uniform_ui(c, 0);
1518 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1520 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1521 for (int i = 0; i < instr->def.num_components; i++)
1522 qregs[i] = qir_uniform_ui(c, instr->value.u32[i]);
1524 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1528 ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1530 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1532 /* QIR needs there to be *some* value, so pick 0 (same as for
1533 * ntq_setup_registers().
1535 for (int i = 0; i < instr->def.num_components; i++)
1536 qregs[i] = qir_uniform_ui(c, 0);
1540 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1542 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
1543 nir_const_value *const_offset;
1545 struct qreg *dest = NULL;
1547 if (info->has_dest) {
1548 dest = ntq_get_dest(c, &instr->dest);
1551 switch (instr->intrinsic) {
1552 case nir_intrinsic_load_uniform:
1553 assert(instr->num_components == 1);
1554 const_offset = nir_src_as_const_value(instr->src[0]);
1556 offset = instr->const_index[0] + const_offset->u32[0];
1557 assert(offset % 4 == 0);
1558 /* We need dwords */
1559 offset = offset / 4;
1560 if (offset < VC4_NIR_STATE_UNIFORM_OFFSET) {
1561 *dest = qir_uniform(c, QUNIFORM_UNIFORM,
1564 *dest = qir_uniform(c, offset -
1565 VC4_NIR_STATE_UNIFORM_OFFSET,
1569 *dest = indirect_uniform_load(c, instr);
1573 case nir_intrinsic_load_user_clip_plane:
1574 for (int i = 0; i < instr->num_components; i++) {
1575 dest[i] = qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1576 instr->const_index[0] * 4 + i);
1580 case nir_intrinsic_load_sample_mask_in:
1581 *dest = qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0);
1584 case nir_intrinsic_load_input:
1585 assert(instr->num_components == 1);
1586 const_offset = nir_src_as_const_value(instr->src[0]);
1587 assert(const_offset && "vc4 doesn't support indirect inputs");
1588 if (instr->const_index[0] >= VC4_NIR_TLB_COLOR_READ_INPUT) {
1589 assert(const_offset->u32[0] == 0);
1590 /* Reads of the per-sample color need to be done in
1593 int sample_index = (instr->const_index[0] -
1594 VC4_NIR_TLB_COLOR_READ_INPUT);
1595 for (int i = 0; i <= sample_index; i++) {
1596 if (c->color_reads[i].file == QFILE_NULL) {
1598 qir_TLB_COLOR_READ(c);
1601 *dest = c->color_reads[sample_index];
1603 offset = instr->const_index[0] + const_offset->u32[0];
1604 *dest = c->inputs[offset];
1608 case nir_intrinsic_store_output:
1609 const_offset = nir_src_as_const_value(instr->src[1]);
1610 assert(const_offset && "vc4 doesn't support indirect outputs");
1611 offset = instr->const_index[0] + const_offset->u32[0];
1613 /* MSAA color outputs are the only case where we have an
1614 * output that's not lowered to being a store of a single 32
1617 if (c->stage == QSTAGE_FRAG && instr->num_components == 4) {
1618 assert(offset == c->output_color_index);
1619 for (int i = 0; i < 4; i++) {
1620 c->sample_colors[i] =
1621 qir_MOV(c, ntq_get_src(c, instr->src[0],
1625 assert(instr->num_components == 1);
1626 c->outputs[offset] =
1627 qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1628 c->num_outputs = MAX2(c->num_outputs, offset + 1);
1632 case nir_intrinsic_discard:
1633 c->discard = qir_uniform_ui(c, ~0);
1636 case nir_intrinsic_discard_if:
1637 if (c->discard.file == QFILE_NULL)
1638 c->discard = qir_uniform_ui(c, 0);
1639 c->discard = qir_OR(c, c->discard,
1640 ntq_get_src(c, instr->src[0], 0));
1644 fprintf(stderr, "Unknown intrinsic: ");
1645 nir_print_instr(&instr->instr, stderr);
1646 fprintf(stderr, "\n");
1652 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1654 fprintf(stderr, "general IF statements not handled.\n");
1658 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
1660 switch (instr->type) {
1661 case nir_instr_type_alu:
1662 ntq_emit_alu(c, nir_instr_as_alu(instr));
1665 case nir_instr_type_intrinsic:
1666 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
1669 case nir_instr_type_load_const:
1670 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
1673 case nir_instr_type_ssa_undef:
1674 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
1677 case nir_instr_type_tex:
1678 ntq_emit_tex(c, nir_instr_as_tex(instr));
1682 fprintf(stderr, "Unknown NIR instr type: ");
1683 nir_print_instr(instr, stderr);
1684 fprintf(stderr, "\n");
1690 ntq_emit_block(struct vc4_compile *c, nir_block *block)
1692 nir_foreach_instr(block, instr) {
1693 ntq_emit_instr(c, instr);
1697 static void ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list);
1700 ntq_emit_loop(struct vc4_compile *c, nir_loop *nloop)
1702 fprintf(stderr, "LOOPS not fully handled. Rendering errors likely.\n");
1703 ntq_emit_cf_list(c, &nloop->body);
1707 ntq_emit_function(struct vc4_compile *c, nir_function_impl *func)
1709 fprintf(stderr, "FUNCTIONS not handled.\n");
1714 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
1716 foreach_list_typed(nir_cf_node, node, node, list) {
1717 switch (node->type) {
1718 case nir_cf_node_block:
1719 ntq_emit_block(c, nir_cf_node_as_block(node));
1722 case nir_cf_node_if:
1723 ntq_emit_if(c, nir_cf_node_as_if(node));
1726 case nir_cf_node_loop:
1727 ntq_emit_loop(c, nir_cf_node_as_loop(node));
1730 case nir_cf_node_function:
1731 ntq_emit_function(c, nir_cf_node_as_function(node));
1735 fprintf(stderr, "Unknown NIR node type\n");
1742 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
1744 ntq_setup_registers(c, &impl->registers);
1745 ntq_emit_cf_list(c, &impl->body);
1749 nir_to_qir(struct vc4_compile *c)
1751 ntq_setup_inputs(c);
1752 ntq_setup_outputs(c);
1753 ntq_setup_uniforms(c);
1754 ntq_setup_registers(c, &c->s->registers);
1756 /* Find the main function and emit the body. */
1757 nir_foreach_function(c->s, function) {
1758 assert(strcmp(function->name, "main") == 0);
1759 assert(function->impl);
1760 ntq_emit_impl(c, function->impl);
1764 static const nir_shader_compiler_options nir_options = {
1765 .lower_extract_byte = true,
1766 .lower_extract_word = true,
1771 .lower_fsqrt = true,
1772 .lower_negate = true,
1776 count_nir_instrs_in_block(nir_block *block, void *state)
1778 int *count = (int *) state;
1779 nir_foreach_instr(block, instr) {
1780 *count = *count + 1;
1786 count_nir_instrs(nir_shader *nir)
1789 nir_foreach_function(nir, function) {
1790 if (!function->impl)
1792 nir_foreach_block(function->impl, count_nir_instrs_in_block, &count);
1797 static struct vc4_compile *
1798 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
1799 struct vc4_key *key)
1801 struct vc4_compile *c = qir_compile_init();
1804 c->shader_state = &key->shader_state->base;
1805 c->program_id = key->shader_state->program_id;
1806 c->variant_id = key->shader_state->compiled_variant_count++;
1811 c->fs_key = (struct vc4_fs_key *)key;
1812 if (c->fs_key->is_points) {
1813 c->point_x = emit_fragment_varying(c, ~0, 0);
1814 c->point_y = emit_fragment_varying(c, ~0, 0);
1815 } else if (c->fs_key->is_lines) {
1816 c->line_x = emit_fragment_varying(c, ~0, 0);
1820 c->vs_key = (struct vc4_vs_key *)key;
1823 c->vs_key = (struct vc4_vs_key *)key;
1827 const struct tgsi_token *tokens = key->shader_state->base.tokens;
1829 if (vc4_debug & VC4_DEBUG_TGSI) {
1830 fprintf(stderr, "%s prog %d/%d TGSI:\n",
1831 qir_get_stage_name(c->stage),
1832 c->program_id, c->variant_id);
1833 tgsi_dump(tokens, 0);
1836 c->s = tgsi_to_nir(tokens, &nir_options);
1837 NIR_PASS_V(c->s, nir_opt_global_to_local);
1838 NIR_PASS_V(c->s, nir_convert_to_ssa);
1840 if (stage == QSTAGE_FRAG)
1841 NIR_PASS_V(c->s, vc4_nir_lower_blend, c);
1843 struct nir_lower_tex_options tex_options = {
1844 /* We would need to implement txs, but we don't want the
1845 * int/float conversions
1847 .lower_rect = false,
1849 /* We want to use this, but we don't want to newton-raphson
1854 /* Apply swizzles to all samplers. */
1855 .swizzle_result = ~0,
1858 /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
1859 * The format swizzling applies before sRGB decode, and
1860 * ARB_texture_swizzle is the last thing before returning the sample.
1862 for (int i = 0; i < ARRAY_SIZE(key->tex); i++) {
1863 enum pipe_format format = c->key->tex[i].format;
1868 const uint8_t *format_swizzle = vc4_get_format_swizzle(format);
1870 for (int j = 0; j < 4; j++) {
1871 uint8_t arb_swiz = c->key->tex[i].swizzle[j];
1873 if (arb_swiz <= 3) {
1874 tex_options.swizzles[i][j] =
1875 format_swizzle[arb_swiz];
1877 tex_options.swizzles[i][j] = arb_swiz;
1880 /* If ARB_texture_swizzle is reading from the R, G, or
1881 * B channels of an sRGB texture, then we need to
1882 * apply sRGB decode to this channel at sample time.
1884 if (arb_swiz < 3 && util_format_is_srgb(format)) {
1885 c->tex_srgb_decode[i] |= (1 << j);
1891 NIR_PASS_V(c->s, nir_lower_tex, &tex_options);
1893 if (c->fs_key && c->fs_key->light_twoside)
1894 NIR_PASS_V(c->s, nir_lower_two_sided_color);
1896 if (stage == QSTAGE_FRAG)
1897 NIR_PASS_V(c->s, nir_lower_clip_fs, c->key->ucp_enables);
1899 NIR_PASS_V(c->s, nir_lower_clip_vs, c->key->ucp_enables);
1901 NIR_PASS_V(c->s, vc4_nir_lower_io, c);
1902 NIR_PASS_V(c->s, vc4_nir_lower_txf_ms, c);
1903 NIR_PASS_V(c->s, nir_lower_idiv);
1904 NIR_PASS_V(c->s, nir_lower_load_const_to_scalar);
1906 vc4_optimize_nir(c->s);
1908 NIR_PASS_V(c->s, nir_remove_dead_variables);
1909 NIR_PASS_V(c->s, nir_convert_from_ssa, true);
1911 if (vc4_debug & VC4_DEBUG_SHADERDB) {
1912 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
1913 qir_get_stage_name(c->stage),
1914 c->program_id, c->variant_id,
1915 count_nir_instrs(c->s));
1918 if (vc4_debug & VC4_DEBUG_NIR) {
1919 fprintf(stderr, "%s prog %d/%d NIR:\n",
1920 qir_get_stage_name(c->stage),
1921 c->program_id, c->variant_id);
1922 nir_print_shader(c->s, stderr);
1933 vc4->prog.fs->input_slots,
1934 vc4->prog.fs->num_inputs);
1941 if (vc4_debug & VC4_DEBUG_QIR) {
1942 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
1943 qir_get_stage_name(c->stage),
1944 c->program_id, c->variant_id);
1949 qir_lower_uniforms(c);
1951 qir_schedule_instructions(c);
1953 if (vc4_debug & VC4_DEBUG_QIR) {
1954 fprintf(stderr, "%s prog %d/%d QIR:\n",
1955 qir_get_stage_name(c->stage),
1956 c->program_id, c->variant_id);
1960 qir_reorder_uniforms(c);
1961 vc4_generate_code(vc4, c);
1963 if (vc4_debug & VC4_DEBUG_SHADERDB) {
1964 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
1965 qir_get_stage_name(c->stage),
1966 c->program_id, c->variant_id,
1968 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
1969 qir_get_stage_name(c->stage),
1970 c->program_id, c->variant_id,
1980 vc4_shader_state_create(struct pipe_context *pctx,
1981 const struct pipe_shader_state *cso)
1983 struct vc4_context *vc4 = vc4_context(pctx);
1984 struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
1988 so->base.tokens = tgsi_dup_tokens(cso->tokens);
1989 so->program_id = vc4->next_uncompiled_program_id++;
1995 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
1996 struct vc4_compile *c)
1998 int count = c->num_uniforms;
1999 struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
2001 uinfo->count = count;
2002 uinfo->data = ralloc_array(shader, uint32_t, count);
2003 memcpy(uinfo->data, c->uniform_data,
2004 count * sizeof(*uinfo->data));
2005 uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
2006 memcpy(uinfo->contents, c->uniform_contents,
2007 count * sizeof(*uinfo->contents));
2008 uinfo->num_texture_samples = c->num_texture_samples;
2010 vc4_set_shader_uniform_dirty_flags(shader);
2013 static struct vc4_compiled_shader *
2014 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
2015 struct vc4_key *key)
2017 struct hash_table *ht;
2019 if (stage == QSTAGE_FRAG) {
2021 key_size = sizeof(struct vc4_fs_key);
2024 key_size = sizeof(struct vc4_vs_key);
2027 struct vc4_compiled_shader *shader;
2028 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
2032 struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key);
2033 shader = rzalloc(NULL, struct vc4_compiled_shader);
2035 shader->program_id = vc4->next_compiled_program_id++;
2036 if (stage == QSTAGE_FRAG) {
2037 bool input_live[c->num_input_slots];
2039 memset(input_live, 0, sizeof(input_live));
2040 list_for_each_entry(struct qinst, inst, &c->instructions, link) {
2041 for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
2042 if (inst->src[i].file == QFILE_VARY)
2043 input_live[inst->src[i].index] = true;
2047 shader->input_slots = ralloc_array(shader,
2048 struct vc4_varying_slot,
2049 c->num_input_slots);
2051 for (int i = 0; i < c->num_input_slots; i++) {
2052 struct vc4_varying_slot *slot = &c->input_slots[i];
2057 /* Skip non-VS-output inputs. */
2058 if (slot->slot == (uint8_t)~0)
2061 if (slot->slot == VARYING_SLOT_COL0 ||
2062 slot->slot == VARYING_SLOT_COL1 ||
2063 slot->slot == VARYING_SLOT_BFC0 ||
2064 slot->slot == VARYING_SLOT_BFC1) {
2065 shader->color_inputs |= (1 << shader->num_inputs);
2068 shader->input_slots[shader->num_inputs] = *slot;
2069 shader->num_inputs++;
2072 shader->num_inputs = c->num_inputs;
2074 shader->vattr_offsets[0] = 0;
2075 for (int i = 0; i < 8; i++) {
2076 shader->vattr_offsets[i + 1] =
2077 shader->vattr_offsets[i] + c->vattr_sizes[i];
2079 if (c->vattr_sizes[i])
2080 shader->vattrs_live |= (1 << i);
2084 copy_uniform_state_to_shader(shader, c);
2085 shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
2086 c->qpu_inst_count * sizeof(uint64_t));
2088 /* Copy the compiler UBO range state to the compiled shader, dropping
2089 * out arrays that were never referenced by an indirect load.
2091 * (Note that QIR dead code elimination of an array access still
2092 * leaves that array alive, though)
2094 if (c->num_ubo_ranges) {
2095 shader->num_ubo_ranges = c->num_ubo_ranges;
2096 shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range,
2099 for (int i = 0; i < c->num_uniform_ranges; i++) {
2100 struct vc4_compiler_ubo_range *range =
2105 shader->ubo_ranges[j].dst_offset = range->dst_offset;
2106 shader->ubo_ranges[j].src_offset = range->src_offset;
2107 shader->ubo_ranges[j].size = range->size;
2108 shader->ubo_size += c->ubo_ranges[i].size;
2112 if (shader->ubo_size) {
2113 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2114 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2115 qir_get_stage_name(c->stage),
2116 c->program_id, c->variant_id,
2117 shader->ubo_size / 4);
2121 qir_compile_destroy(c);
2123 struct vc4_key *dup_key;
2124 dup_key = ralloc_size(shader, key_size);
2125 memcpy(dup_key, key, key_size);
2126 _mesa_hash_table_insert(ht, dup_key, shader);
2132 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2133 struct vc4_texture_stateobj *texstate)
2135 for (int i = 0; i < texstate->num_textures; i++) {
2136 struct pipe_sampler_view *sampler = texstate->textures[i];
2137 struct pipe_sampler_state *sampler_state =
2138 texstate->samplers[i];
2143 key->tex[i].format = sampler->format;
2144 key->tex[i].swizzle[0] = sampler->swizzle_r;
2145 key->tex[i].swizzle[1] = sampler->swizzle_g;
2146 key->tex[i].swizzle[2] = sampler->swizzle_b;
2147 key->tex[i].swizzle[3] = sampler->swizzle_a;
2149 if (sampler->texture->nr_samples > 1) {
2150 key->tex[i].msaa_width = sampler->texture->width0;
2151 key->tex[i].msaa_height = sampler->texture->height0;
2152 } else if (sampler){
2153 key->tex[i].compare_mode = sampler_state->compare_mode;
2154 key->tex[i].compare_func = sampler_state->compare_func;
2155 key->tex[i].wrap_s = sampler_state->wrap_s;
2156 key->tex[i].wrap_t = sampler_state->wrap_t;
2160 key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2164 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2166 struct vc4_fs_key local_key;
2167 struct vc4_fs_key *key = &local_key;
2169 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2171 VC4_DIRTY_FRAMEBUFFER |
2173 VC4_DIRTY_RASTERIZER |
2175 VC4_DIRTY_TEXSTATE |
2176 VC4_DIRTY_UNCOMPILED_FS))) {
2180 memset(key, 0, sizeof(*key));
2181 vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2182 key->base.shader_state = vc4->prog.bind_fs;
2183 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2184 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2185 prim_mode <= PIPE_PRIM_LINE_STRIP);
2186 key->blend = vc4->blend->rt[0];
2187 if (vc4->blend->logicop_enable) {
2188 key->logicop_func = vc4->blend->logicop_func;
2190 key->logicop_func = PIPE_LOGICOP_COPY;
2192 key->msaa = vc4->rasterizer->base.multisample;
2193 key->sample_coverage = (vc4->rasterizer->base.multisample &&
2194 vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
2195 key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
2196 key->sample_alpha_to_one = vc4->blend->alpha_to_one;
2197 if (vc4->framebuffer.cbufs[0])
2198 key->color_format = vc4->framebuffer.cbufs[0]->format;
2200 key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2201 key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2202 key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2203 key->depth_enabled = (vc4->zsa->base.depth.enabled ||
2204 key->stencil_enabled);
2205 if (vc4->zsa->base.alpha.enabled) {
2206 key->alpha_test = true;
2207 key->alpha_test_func = vc4->zsa->base.alpha.func;
2210 if (key->is_points) {
2211 key->point_sprite_mask =
2212 vc4->rasterizer->base.sprite_coord_enable;
2213 key->point_coord_upper_left =
2214 (vc4->rasterizer->base.sprite_coord_mode ==
2215 PIPE_SPRITE_COORD_UPPER_LEFT);
2218 key->light_twoside = vc4->rasterizer->base.light_twoside;
2220 struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2221 vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2222 if (vc4->prog.fs == old_fs)
2225 vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2226 if (vc4->rasterizer->base.flatshade &&
2227 old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
2228 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2233 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2235 struct vc4_vs_key local_key;
2236 struct vc4_vs_key *key = &local_key;
2238 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2239 VC4_DIRTY_RASTERIZER |
2241 VC4_DIRTY_TEXSTATE |
2242 VC4_DIRTY_VTXSTATE |
2243 VC4_DIRTY_UNCOMPILED_VS |
2244 VC4_DIRTY_COMPILED_FS))) {
2248 memset(key, 0, sizeof(*key));
2249 vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2250 key->base.shader_state = vc4->prog.bind_vs;
2251 key->compiled_fs_id = vc4->prog.fs->program_id;
2253 for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2254 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2256 key->per_vertex_point_size =
2257 (prim_mode == PIPE_PRIM_POINTS &&
2258 vc4->rasterizer->base.point_size_per_vertex);
2260 struct vc4_compiled_shader *vs =
2261 vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2262 if (vs != vc4->prog.vs) {
2264 vc4->dirty |= VC4_DIRTY_COMPILED_VS;
2267 key->is_coord = true;
2268 struct vc4_compiled_shader *cs =
2269 vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2270 if (cs != vc4->prog.cs) {
2272 vc4->dirty |= VC4_DIRTY_COMPILED_CS;
2277 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2279 vc4_update_compiled_fs(vc4, prim_mode);
2280 vc4_update_compiled_vs(vc4, prim_mode);
2284 fs_cache_hash(const void *key)
2286 return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2290 vs_cache_hash(const void *key)
2292 return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2296 fs_cache_compare(const void *key1, const void *key2)
2298 return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2302 vs_cache_compare(const void *key1, const void *key2)
2304 return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2308 delete_from_cache_if_matches(struct hash_table *ht,
2309 struct hash_entry *entry,
2310 struct vc4_uncompiled_shader *so)
2312 const struct vc4_key *key = entry->key;
2314 if (key->shader_state == so) {
2315 struct vc4_compiled_shader *shader = entry->data;
2316 _mesa_hash_table_remove(ht, entry);
2317 vc4_bo_unreference(&shader->bo);
2318 ralloc_free(shader);
2323 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2325 struct vc4_context *vc4 = vc4_context(pctx);
2326 struct vc4_uncompiled_shader *so = hwcso;
2328 struct hash_entry *entry;
2329 hash_table_foreach(vc4->fs_cache, entry)
2330 delete_from_cache_if_matches(vc4->fs_cache, entry, so);
2331 hash_table_foreach(vc4->vs_cache, entry)
2332 delete_from_cache_if_matches(vc4->vs_cache, entry, so);
2334 free((void *)so->base.tokens);
2339 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2341 struct vc4_context *vc4 = vc4_context(pctx);
2342 vc4->prog.bind_fs = hwcso;
2343 vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2347 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2349 struct vc4_context *vc4 = vc4_context(pctx);
2350 vc4->prog.bind_vs = hwcso;
2351 vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2355 vc4_program_init(struct pipe_context *pctx)
2357 struct vc4_context *vc4 = vc4_context(pctx);
2359 pctx->create_vs_state = vc4_shader_state_create;
2360 pctx->delete_vs_state = vc4_shader_state_delete;
2362 pctx->create_fs_state = vc4_shader_state_create;
2363 pctx->delete_fs_state = vc4_shader_state_delete;
2365 pctx->bind_fs_state = vc4_fp_state_bind;
2366 pctx->bind_vs_state = vc4_vp_state_bind;
2368 vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2370 vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2375 vc4_program_fini(struct pipe_context *pctx)
2377 struct vc4_context *vc4 = vc4_context(pctx);
2379 struct hash_entry *entry;
2380 hash_table_foreach(vc4->fs_cache, entry) {
2381 struct vc4_compiled_shader *shader = entry->data;
2382 vc4_bo_unreference(&shader->bo);
2383 ralloc_free(shader);
2384 _mesa_hash_table_remove(vc4->fs_cache, entry);
2387 hash_table_foreach(vc4->vs_cache, entry) {
2388 struct vc4_compiled_shader *shader = entry->data;
2389 vc4_bo_unreference(&shader->bo);
2390 ralloc_free(shader);
2391 _mesa_hash_table_remove(vc4->vs_cache, entry);