2 * Copyright (c) 2014 Scott Mansell
3 * Copyright © 2014 Broadcom
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "util/u_format.h"
27 #include "util/u_hash.h"
28 #include "util/u_math.h"
29 #include "util/u_memory.h"
30 #include "util/ralloc.h"
31 #include "util/hash_table.h"
32 #include "tgsi/tgsi_dump.h"
33 #include "tgsi/tgsi_lowering.h"
34 #include "tgsi/tgsi_parse.h"
35 #include "compiler/nir/nir.h"
36 #include "compiler/nir/nir_builder.h"
37 #include "nir/tgsi_to_nir.h"
38 #include "vc4_context.h"
41 #ifdef USE_VC4_SIMULATOR
42 #include "simpenrose/simpenrose.h"
46 ntq_get_src(struct vc4_compile *c, nir_src src, int i);
49 resize_qreg_array(struct vc4_compile *c,
54 if (*size >= decl_size)
57 uint32_t old_size = *size;
58 *size = MAX2(*size * 2, decl_size);
59 *regs = reralloc(c, *regs, struct qreg, *size);
61 fprintf(stderr, "Malloc failure\n");
65 for (uint32_t i = old_size; i < *size; i++)
66 (*regs)[i] = c->undef;
70 indirect_uniform_load(struct vc4_compile *c, nir_intrinsic_instr *intr)
72 struct qreg indirect_offset = ntq_get_src(c, intr->src[0], 0);
73 uint32_t offset = intr->const_index[0];
74 struct vc4_compiler_ubo_range *range = NULL;
76 for (i = 0; i < c->num_uniform_ranges; i++) {
77 range = &c->ubo_ranges[i];
78 if (offset >= range->src_offset &&
79 offset < range->src_offset + range->size) {
83 /* The driver-location-based offset always has to be within a declared
89 range->dst_offset = c->next_ubo_dst_offset;
90 c->next_ubo_dst_offset += range->size;
94 offset -= range->src_offset;
96 /* Adjust for where we stored the TGSI register base. */
97 indirect_offset = qir_ADD(c, indirect_offset,
98 qir_uniform_ui(c, (range->dst_offset +
101 /* Clamp to [0, array size). Note that MIN/MAX are signed. */
102 indirect_offset = qir_MAX(c, indirect_offset, qir_uniform_ui(c, 0));
103 indirect_offset = qir_MIN(c, indirect_offset,
104 qir_uniform_ui(c, (range->dst_offset +
107 qir_TEX_DIRECT(c, indirect_offset, qir_uniform(c, QUNIFORM_UBO_ADDR, 0));
108 c->num_texture_samples++;
109 return qir_TEX_RESULT(c);
112 nir_ssa_def *vc4_nir_get_state_uniform(struct nir_builder *b,
113 enum quniform_contents contents)
115 nir_intrinsic_instr *intr =
116 nir_intrinsic_instr_create(b->shader,
117 nir_intrinsic_load_uniform);
118 intr->const_index[0] = (VC4_NIR_STATE_UNIFORM_OFFSET + contents) * 4;
119 intr->num_components = 1;
120 intr->src[0] = nir_src_for_ssa(nir_imm_int(b, 0));
121 nir_ssa_dest_init(&intr->instr, &intr->dest, 1, NULL);
122 nir_builder_instr_insert(b, &intr->instr);
123 return &intr->dest.ssa;
127 vc4_nir_get_swizzled_channel(nir_builder *b, nir_ssa_def **srcs, int swiz)
131 case UTIL_FORMAT_SWIZZLE_NONE:
132 fprintf(stderr, "warning: unknown swizzle\n");
134 case UTIL_FORMAT_SWIZZLE_0:
135 return nir_imm_float(b, 0.0);
136 case UTIL_FORMAT_SWIZZLE_1:
137 return nir_imm_float(b, 1.0);
138 case UTIL_FORMAT_SWIZZLE_X:
139 case UTIL_FORMAT_SWIZZLE_Y:
140 case UTIL_FORMAT_SWIZZLE_Z:
141 case UTIL_FORMAT_SWIZZLE_W:
147 ntq_init_ssa_def(struct vc4_compile *c, nir_ssa_def *def)
149 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
150 def->num_components);
151 _mesa_hash_table_insert(c->def_ht, def, qregs);
156 ntq_get_dest(struct vc4_compile *c, nir_dest *dest)
159 struct qreg *qregs = ntq_init_ssa_def(c, &dest->ssa);
160 for (int i = 0; i < dest->ssa.num_components; i++)
164 nir_register *reg = dest->reg.reg;
165 assert(dest->reg.base_offset == 0);
166 assert(reg->num_array_elems == 0);
167 struct hash_entry *entry =
168 _mesa_hash_table_search(c->def_ht, reg);
174 ntq_get_src(struct vc4_compile *c, nir_src src, int i)
176 struct hash_entry *entry;
178 entry = _mesa_hash_table_search(c->def_ht, src.ssa);
179 assert(i < src.ssa->num_components);
181 nir_register *reg = src.reg.reg;
182 entry = _mesa_hash_table_search(c->def_ht, reg);
183 assert(reg->num_array_elems == 0);
184 assert(src.reg.base_offset == 0);
185 assert(i < reg->num_components);
188 struct qreg *qregs = entry->data;
193 ntq_get_alu_src(struct vc4_compile *c, nir_alu_instr *instr,
196 assert(util_is_power_of_two(instr->dest.write_mask));
197 unsigned chan = ffs(instr->dest.write_mask) - 1;
198 struct qreg r = ntq_get_src(c, instr->src[src].src,
199 instr->src[src].swizzle[chan]);
201 assert(!instr->src[src].abs);
202 assert(!instr->src[src].negate);
207 static inline struct qreg
208 qir_SAT(struct vc4_compile *c, struct qreg val)
211 qir_FMIN(c, val, qir_uniform_f(c, 1.0)),
212 qir_uniform_f(c, 0.0));
216 ntq_rcp(struct vc4_compile *c, struct qreg x)
218 struct qreg r = qir_RCP(c, x);
220 /* Apply a Newton-Raphson step to improve the accuracy. */
221 r = qir_FMUL(c, r, qir_FSUB(c,
222 qir_uniform_f(c, 2.0),
229 ntq_rsq(struct vc4_compile *c, struct qreg x)
231 struct qreg r = qir_RSQ(c, x);
233 /* Apply a Newton-Raphson step to improve the accuracy. */
234 r = qir_FMUL(c, r, qir_FSUB(c,
235 qir_uniform_f(c, 1.5),
237 qir_uniform_f(c, 0.5),
239 qir_FMUL(c, r, r)))));
245 qir_srgb_decode(struct vc4_compile *c, struct qreg srgb)
247 struct qreg low = qir_FMUL(c, srgb, qir_uniform_f(c, 1.0 / 12.92));
248 struct qreg high = qir_POW(c,
252 qir_uniform_f(c, 0.055)),
253 qir_uniform_f(c, 1.0 / 1.055)),
254 qir_uniform_f(c, 2.4));
256 qir_SF(c, qir_FSUB(c, srgb, qir_uniform_f(c, 0.04045)));
257 return qir_SEL(c, QPU_COND_NS, low, high);
261 ntq_umul(struct vc4_compile *c, struct qreg src0, struct qreg src1)
263 struct qreg src0_hi = qir_SHR(c, src0,
264 qir_uniform_ui(c, 24));
265 struct qreg src1_hi = qir_SHR(c, src1,
266 qir_uniform_ui(c, 24));
268 struct qreg hilo = qir_MUL24(c, src0_hi, src1);
269 struct qreg lohi = qir_MUL24(c, src0, src1_hi);
270 struct qreg lolo = qir_MUL24(c, src0, src1);
272 return qir_ADD(c, lolo, qir_SHL(c,
273 qir_ADD(c, hilo, lohi),
274 qir_uniform_ui(c, 24)));
278 ntq_scale_depth_texture(struct vc4_compile *c, struct qreg src)
280 struct qreg depthf = qir_ITOF(c, qir_SHR(c, src,
281 qir_uniform_ui(c, 8)));
282 return qir_FMUL(c, depthf, qir_uniform_f(c, 1.0f/0xffffff));
286 * Emits a lowered TXF_MS from an MSAA texture.
288 * The addressing math has been lowered in NIR, and now we just need to read
292 ntq_emit_txf(struct vc4_compile *c, nir_tex_instr *instr)
294 uint32_t tile_width = 32;
295 uint32_t tile_height = 32;
296 uint32_t tile_size = (tile_height * tile_width *
297 VC4_MAX_SAMPLES * sizeof(uint32_t));
299 unsigned unit = instr->texture_index;
300 uint32_t w = align(c->key->tex[unit].msaa_width, tile_width);
301 uint32_t w_tiles = w / tile_width;
302 uint32_t h = align(c->key->tex[unit].msaa_height, tile_height);
303 uint32_t h_tiles = h / tile_height;
304 uint32_t size = w_tiles * h_tiles * tile_size;
307 assert(instr->num_srcs == 1);
308 assert(instr->src[0].src_type == nir_tex_src_coord);
309 addr = ntq_get_src(c, instr->src[0].src, 0);
311 /* Perform the clamping required by kernel validation. */
312 addr = qir_MAX(c, addr, qir_uniform_ui(c, 0));
313 addr = qir_MIN(c, addr, qir_uniform_ui(c, size - 4));
315 qir_TEX_DIRECT(c, addr, qir_uniform(c, QUNIFORM_TEXTURE_MSAA_ADDR, unit));
317 struct qreg tex = qir_TEX_RESULT(c);
318 c->num_texture_samples++;
320 struct qreg *dest = ntq_get_dest(c, &instr->dest);
321 enum pipe_format format = c->key->tex[unit].format;
322 if (util_format_is_depth_or_stencil(format)) {
323 struct qreg scaled = ntq_scale_depth_texture(c, tex);
324 for (int i = 0; i < 4; i++)
327 for (int i = 0; i < 4; i++)
328 dest[i] = qir_UNPACK_8_F(c, tex, i);
331 for (int i = 0; i < 4; i++) {
332 if (c->tex_srgb_decode[unit] & (1 << i))
333 dest[i] = qir_srgb_decode(c, dest[i]);
338 ntq_emit_tex(struct vc4_compile *c, nir_tex_instr *instr)
340 struct qreg s, t, r, lod, proj, compare;
341 bool is_txb = false, is_txl = false, has_proj = false;
342 unsigned unit = instr->texture_index;
344 if (instr->op == nir_texop_txf) {
345 ntq_emit_txf(c, instr);
349 for (unsigned i = 0; i < instr->num_srcs; i++) {
350 switch (instr->src[i].src_type) {
351 case nir_tex_src_coord:
352 s = ntq_get_src(c, instr->src[i].src, 0);
353 if (instr->sampler_dim == GLSL_SAMPLER_DIM_1D)
354 t = qir_uniform_f(c, 0.5);
356 t = ntq_get_src(c, instr->src[i].src, 1);
357 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE)
358 r = ntq_get_src(c, instr->src[i].src, 2);
360 case nir_tex_src_bias:
361 lod = ntq_get_src(c, instr->src[i].src, 0);
364 case nir_tex_src_lod:
365 lod = ntq_get_src(c, instr->src[i].src, 0);
368 case nir_tex_src_comparitor:
369 compare = ntq_get_src(c, instr->src[i].src, 0);
371 case nir_tex_src_projector:
372 proj = qir_RCP(c, ntq_get_src(c, instr->src[i].src, 0));
373 s = qir_FMUL(c, s, proj);
374 t = qir_FMUL(c, t, proj);
378 unreachable("unknown texture source");
382 struct qreg texture_u[] = {
383 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P0, unit),
384 qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P1, unit),
385 qir_uniform(c, QUNIFORM_CONSTANT, 0),
386 qir_uniform(c, QUNIFORM_CONSTANT, 0),
388 uint32_t next_texture_u = 0;
390 /* There is no native support for GL texture rectangle coordinates, so
391 * we have to rescale from ([0, width], [0, height]) to ([0, 1], [0,
394 if (instr->sampler_dim == GLSL_SAMPLER_DIM_RECT) {
396 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_X, unit));
398 qir_uniform(c, QUNIFORM_TEXRECT_SCALE_Y, unit));
401 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE || is_txl) {
402 texture_u[2] = qir_uniform(c, QUNIFORM_TEXTURE_CONFIG_P2,
403 unit | (is_txl << 16));
406 if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
407 struct qreg ma = qir_FMAXABS(c, qir_FMAXABS(c, s, t), r);
408 struct qreg rcp_ma = qir_RCP(c, ma);
409 s = qir_FMUL(c, s, rcp_ma);
410 t = qir_FMUL(c, t, rcp_ma);
411 r = qir_FMUL(c, r, rcp_ma);
413 qir_TEX_R(c, r, texture_u[next_texture_u++]);
414 } else if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
415 c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP ||
416 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP_TO_BORDER ||
417 c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
418 qir_TEX_R(c, qir_uniform(c, QUNIFORM_TEXTURE_BORDER_COLOR, unit),
419 texture_u[next_texture_u++]);
422 if (c->key->tex[unit].wrap_s == PIPE_TEX_WRAP_CLAMP) {
426 if (c->key->tex[unit].wrap_t == PIPE_TEX_WRAP_CLAMP) {
430 qir_TEX_T(c, t, texture_u[next_texture_u++]);
432 if (is_txl || is_txb)
433 qir_TEX_B(c, lod, texture_u[next_texture_u++]);
435 qir_TEX_S(c, s, texture_u[next_texture_u++]);
437 c->num_texture_samples++;
438 struct qreg tex = qir_TEX_RESULT(c);
440 enum pipe_format format = c->key->tex[unit].format;
442 struct qreg *dest = ntq_get_dest(c, &instr->dest);
443 if (util_format_is_depth_or_stencil(format)) {
444 struct qreg normalized = ntq_scale_depth_texture(c, tex);
445 struct qreg depth_output;
447 struct qreg u0 = qir_uniform_f(c, 0.0f);
448 struct qreg u1 = qir_uniform_f(c, 1.0f);
449 if (c->key->tex[unit].compare_mode) {
451 compare = qir_FMUL(c, compare, proj);
453 switch (c->key->tex[unit].compare_func) {
454 case PIPE_FUNC_NEVER:
455 depth_output = qir_uniform_f(c, 0.0f);
457 case PIPE_FUNC_ALWAYS:
460 case PIPE_FUNC_EQUAL:
461 qir_SF(c, qir_FSUB(c, compare, normalized));
462 depth_output = qir_SEL(c, QPU_COND_ZS, u1, u0);
464 case PIPE_FUNC_NOTEQUAL:
465 qir_SF(c, qir_FSUB(c, compare, normalized));
466 depth_output = qir_SEL(c, QPU_COND_ZC, u1, u0);
468 case PIPE_FUNC_GREATER:
469 qir_SF(c, qir_FSUB(c, compare, normalized));
470 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
472 case PIPE_FUNC_GEQUAL:
473 qir_SF(c, qir_FSUB(c, normalized, compare));
474 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
477 qir_SF(c, qir_FSUB(c, compare, normalized));
478 depth_output = qir_SEL(c, QPU_COND_NS, u1, u0);
480 case PIPE_FUNC_LEQUAL:
481 qir_SF(c, qir_FSUB(c, normalized, compare));
482 depth_output = qir_SEL(c, QPU_COND_NC, u1, u0);
486 depth_output = normalized;
489 for (int i = 0; i < 4; i++)
490 dest[i] = depth_output;
492 for (int i = 0; i < 4; i++)
493 dest[i] = qir_UNPACK_8_F(c, tex, i);
496 for (int i = 0; i < 4; i++) {
497 if (c->tex_srgb_decode[unit] & (1 << i))
498 dest[i] = qir_srgb_decode(c, dest[i]);
503 * Computes x - floor(x), which is tricky because our FTOI truncates (rounds
507 ntq_ffract(struct vc4_compile *c, struct qreg src)
509 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
510 struct qreg diff = qir_FSUB(c, src, trunc);
512 return qir_SEL(c, QPU_COND_NS,
513 qir_FADD(c, diff, qir_uniform_f(c, 1.0)), diff);
517 * Computes floor(x), which is tricky because our FTOI truncates (rounds to
521 ntq_ffloor(struct vc4_compile *c, struct qreg src)
523 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
525 /* This will be < 0 if we truncated and the truncation was of a value
526 * that was < 0 in the first place.
528 qir_SF(c, qir_FSUB(c, src, trunc));
530 return qir_SEL(c, QPU_COND_NS,
531 qir_FSUB(c, trunc, qir_uniform_f(c, 1.0)), trunc);
535 * Computes ceil(x), which is tricky because our FTOI truncates (rounds to
539 ntq_fceil(struct vc4_compile *c, struct qreg src)
541 struct qreg trunc = qir_ITOF(c, qir_FTOI(c, src));
543 /* This will be < 0 if we truncated and the truncation was of a value
544 * that was > 0 in the first place.
546 qir_SF(c, qir_FSUB(c, trunc, src));
548 return qir_SEL(c, QPU_COND_NS,
549 qir_FADD(c, trunc, qir_uniform_f(c, 1.0)), trunc);
553 ntq_fsin(struct vc4_compile *c, struct qreg src)
557 pow(2.0 * M_PI, 3) / (3 * 2 * 1),
558 -pow(2.0 * M_PI, 5) / (5 * 4 * 3 * 2 * 1),
559 pow(2.0 * M_PI, 7) / (7 * 6 * 5 * 4 * 3 * 2 * 1),
560 -pow(2.0 * M_PI, 9) / (9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
563 struct qreg scaled_x =
566 qir_uniform_f(c, 1.0 / (M_PI * 2.0)));
568 struct qreg x = qir_FADD(c,
569 ntq_ffract(c, scaled_x),
570 qir_uniform_f(c, -0.5));
571 struct qreg x2 = qir_FMUL(c, x, x);
572 struct qreg sum = qir_FMUL(c, x, qir_uniform_f(c, coeff[0]));
573 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
574 x = qir_FMUL(c, x, x2);
579 qir_uniform_f(c, coeff[i])));
585 ntq_fcos(struct vc4_compile *c, struct qreg src)
589 pow(2.0 * M_PI, 2) / (2 * 1),
590 -pow(2.0 * M_PI, 4) / (4 * 3 * 2 * 1),
591 pow(2.0 * M_PI, 6) / (6 * 5 * 4 * 3 * 2 * 1),
592 -pow(2.0 * M_PI, 8) / (8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
593 pow(2.0 * M_PI, 10) / (10 * 9 * 8 * 7 * 6 * 5 * 4 * 3 * 2 * 1),
596 struct qreg scaled_x =
598 qir_uniform_f(c, 1.0f / (M_PI * 2.0f)));
599 struct qreg x_frac = qir_FADD(c,
600 ntq_ffract(c, scaled_x),
601 qir_uniform_f(c, -0.5));
603 struct qreg sum = qir_uniform_f(c, coeff[0]);
604 struct qreg x2 = qir_FMUL(c, x_frac, x_frac);
605 struct qreg x = x2; /* Current x^2, x^4, or x^6 */
606 for (int i = 1; i < ARRAY_SIZE(coeff); i++) {
608 x = qir_FMUL(c, x, x2);
610 struct qreg mul = qir_FMUL(c,
612 qir_uniform_f(c, coeff[i]));
616 sum = qir_FADD(c, sum, mul);
622 ntq_fsign(struct vc4_compile *c, struct qreg src)
624 struct qreg t = qir_get_temp(c);
627 qir_MOV_dest(c, t, qir_uniform_f(c, 0.0));
628 qir_MOV_dest(c, t, qir_uniform_f(c, 1.0))->cond = QPU_COND_ZC;
629 qir_MOV_dest(c, t, qir_uniform_f(c, -1.0))->cond = QPU_COND_NS;
634 emit_vertex_input(struct vc4_compile *c, int attr)
636 enum pipe_format format = c->vs_key->attr_formats[attr];
637 uint32_t attr_size = util_format_get_blocksize(format);
639 c->vattr_sizes[attr] = align(attr_size, 4);
640 for (int i = 0; i < align(attr_size, 4) / 4; i++) {
641 struct qreg vpm = { QFILE_VPM, attr * 4 + i };
642 c->inputs[attr * 4 + i] = qir_MOV(c, vpm);
648 emit_fragcoord_input(struct vc4_compile *c, int attr)
650 c->inputs[attr * 4 + 0] = qir_FRAG_X(c);
651 c->inputs[attr * 4 + 1] = qir_FRAG_Y(c);
652 c->inputs[attr * 4 + 2] =
654 qir_ITOF(c, qir_FRAG_Z(c)),
655 qir_uniform_f(c, 1.0 / 0xffffff));
656 c->inputs[attr * 4 + 3] = qir_RCP(c, qir_FRAG_W(c));
660 emit_fragment_varying(struct vc4_compile *c, gl_varying_slot slot,
663 uint32_t i = c->num_input_slots++;
669 if (c->num_input_slots >= c->input_slots_array_size) {
670 c->input_slots_array_size =
671 MAX2(4, c->input_slots_array_size * 2);
673 c->input_slots = reralloc(c, c->input_slots,
674 struct vc4_varying_slot,
675 c->input_slots_array_size);
678 c->input_slots[i].slot = slot;
679 c->input_slots[i].swizzle = swizzle;
681 return qir_VARY_ADD_C(c, qir_FMUL(c, vary, qir_FRAG_W(c)));
685 emit_fragment_input(struct vc4_compile *c, int attr, gl_varying_slot slot)
687 for (int i = 0; i < 4; i++) {
688 c->inputs[attr * 4 + i] =
689 emit_fragment_varying(c, slot, i);
695 add_output(struct vc4_compile *c,
696 uint32_t decl_offset,
700 uint32_t old_array_size = c->outputs_array_size;
701 resize_qreg_array(c, &c->outputs, &c->outputs_array_size,
704 if (old_array_size != c->outputs_array_size) {
705 c->output_slots = reralloc(c,
707 struct vc4_varying_slot,
708 c->outputs_array_size);
711 c->output_slots[decl_offset].slot = slot;
712 c->output_slots[decl_offset].swizzle = swizzle;
716 declare_uniform_range(struct vc4_compile *c, uint32_t start, uint32_t size)
718 unsigned array_id = c->num_uniform_ranges++;
719 if (array_id >= c->ubo_ranges_array_size) {
720 c->ubo_ranges_array_size = MAX2(c->ubo_ranges_array_size * 2,
722 c->ubo_ranges = reralloc(c, c->ubo_ranges,
723 struct vc4_compiler_ubo_range,
724 c->ubo_ranges_array_size);
727 c->ubo_ranges[array_id].dst_offset = 0;
728 c->ubo_ranges[array_id].src_offset = start;
729 c->ubo_ranges[array_id].size = size;
730 c->ubo_ranges[array_id].used = false;
734 ntq_src_is_only_ssa_def_user(nir_src *src)
739 if (!list_empty(&src->ssa->if_uses))
742 return (src->ssa->uses.next == &src->use_link &&
743 src->ssa->uses.next->next == &src->ssa->uses);
747 * In general, emits a nir_pack_unorm_4x8 as a series of MOVs with the pack
750 * However, as an optimization, it tries to find the instructions generating
751 * the sources to be packed and just emit the pack flag there, if possible.
754 ntq_emit_pack_unorm_4x8(struct vc4_compile *c, nir_alu_instr *instr)
756 struct qreg result = qir_get_temp(c);
757 struct nir_alu_instr *vec4 = NULL;
759 /* If packing from a vec4 op (as expected), identify it so that we can
760 * peek back at what generated its sources.
762 if (instr->src[0].src.is_ssa &&
763 instr->src[0].src.ssa->parent_instr->type == nir_instr_type_alu &&
764 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr)->op ==
766 vec4 = nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
769 /* If the pack is replicating the same channel 4 times, use the 8888
770 * pack flag. This is common for blending using the alpha
773 if (instr->src[0].swizzle[0] == instr->src[0].swizzle[1] &&
774 instr->src[0].swizzle[0] == instr->src[0].swizzle[2] &&
775 instr->src[0].swizzle[0] == instr->src[0].swizzle[3]) {
776 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
777 *dest = qir_PACK_8888_F(c,
778 ntq_get_src(c, instr->src[0].src,
779 instr->src[0].swizzle[0]));
783 for (int i = 0; i < 4; i++) {
784 int swiz = instr->src[0].swizzle[i];
787 src = ntq_get_src(c, vec4->src[swiz].src,
788 vec4->src[swiz].swizzle[0]);
790 src = ntq_get_src(c, instr->src[0].src, swiz);
794 ntq_src_is_only_ssa_def_user(&vec4->src[swiz].src) &&
795 src.file == QFILE_TEMP &&
796 c->defs[src.index] &&
797 qir_is_mul(c->defs[src.index]) &&
798 !c->defs[src.index]->dst.pack) {
799 struct qinst *rewrite = c->defs[src.index];
800 c->defs[src.index] = NULL;
801 rewrite->dst = result;
802 rewrite->dst.pack = QPU_PACK_MUL_8A + i;
806 qir_PACK_8_F(c, result, src, i);
809 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
813 /** Handles sign-extended bitfield extracts for 16 bits. */
815 ntq_emit_ibfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
818 assert(bits.file == QFILE_UNIF &&
819 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
820 c->uniform_data[bits.index] == 16);
822 assert(offset.file == QFILE_UNIF &&
823 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
824 int offset_bit = c->uniform_data[offset.index];
825 assert(offset_bit % 16 == 0);
827 return qir_UNPACK_16_I(c, base, offset_bit / 16);
830 /** Handles unsigned bitfield extracts for 8 bits. */
832 ntq_emit_ubfe(struct vc4_compile *c, struct qreg base, struct qreg offset,
835 assert(bits.file == QFILE_UNIF &&
836 c->uniform_contents[bits.index] == QUNIFORM_CONSTANT &&
837 c->uniform_data[bits.index] == 8);
839 assert(offset.file == QFILE_UNIF &&
840 c->uniform_contents[offset.index] == QUNIFORM_CONSTANT);
841 int offset_bit = c->uniform_data[offset.index];
842 assert(offset_bit % 8 == 0);
844 return qir_UNPACK_8_I(c, base, offset_bit / 8);
848 * If compare_instr is a valid comparison instruction, emits the
849 * compare_instr's comparison and returns the sel_instr's return value based
850 * on the compare_instr's result.
853 ntq_emit_comparison(struct vc4_compile *c, struct qreg *dest,
854 nir_alu_instr *compare_instr,
855 nir_alu_instr *sel_instr)
859 switch (compare_instr->op) {
885 struct qreg src0 = ntq_get_alu_src(c, compare_instr, 0);
886 struct qreg src1 = ntq_get_alu_src(c, compare_instr, 1);
888 if (nir_op_infos[compare_instr->op].input_types[0] == nir_type_float)
889 qir_SF(c, qir_FSUB(c, src0, src1));
891 qir_SF(c, qir_SUB(c, src0, src1));
893 switch (sel_instr->op) {
898 *dest = qir_SEL(c, cond,
899 qir_uniform_f(c, 1.0), qir_uniform_f(c, 0.0));
903 *dest = qir_SEL(c, cond,
904 ntq_get_alu_src(c, sel_instr, 1),
905 ntq_get_alu_src(c, sel_instr, 2));
909 *dest = qir_SEL(c, cond,
910 qir_uniform_ui(c, ~0), qir_uniform_ui(c, 0));
918 * Attempts to fold a comparison generating a boolean result into the
919 * condition code for selecting between two values, instead of comparing the
920 * boolean result against 0 to generate the condition code.
922 static struct qreg ntq_emit_bcsel(struct vc4_compile *c, nir_alu_instr *instr,
925 if (!instr->src[0].src.is_ssa)
927 nir_alu_instr *compare =
928 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
933 if (ntq_emit_comparison(c, &dest, compare, instr))
938 return qir_SEL(c, QPU_COND_NS, src[1], src[2]);
942 ntq_emit_alu(struct vc4_compile *c, nir_alu_instr *instr)
944 /* Vectors are special in that they have non-scalarized writemasks,
945 * and just take the first swizzle channel for each argument in order
946 * into each writemask channel.
948 if (instr->op == nir_op_vec2 ||
949 instr->op == nir_op_vec3 ||
950 instr->op == nir_op_vec4) {
952 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
953 srcs[i] = ntq_get_src(c, instr->src[i].src,
954 instr->src[i].swizzle[0]);
955 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
956 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++)
961 if (instr->op == nir_op_pack_unorm_4x8) {
962 ntq_emit_pack_unorm_4x8(c, instr);
966 if (instr->op == nir_op_unpack_unorm_4x8) {
967 struct qreg src = ntq_get_src(c, instr->src[0].src,
968 instr->src[0].swizzle[0]);
969 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
970 for (int i = 0; i < 4; i++) {
971 if (instr->dest.write_mask & (1 << i))
972 dest[i] = qir_UNPACK_8_F(c, src, i);
977 /* General case: We can just grab the one used channel per src. */
978 struct qreg src[nir_op_infos[instr->op].num_inputs];
979 for (int i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
980 src[i] = ntq_get_alu_src(c, instr, i);
983 /* Pick the channel to store the output in. */
984 assert(!instr->dest.saturate);
985 struct qreg *dest = ntq_get_dest(c, &instr->dest.dest);
986 assert(util_is_power_of_two(instr->dest.write_mask));
987 dest += ffs(instr->dest.write_mask) - 1;
992 *dest = qir_MOV(c, src[0]);
995 *dest = qir_FMUL(c, src[0], src[1]);
998 *dest = qir_FADD(c, src[0], src[1]);
1001 *dest = qir_FSUB(c, src[0], src[1]);
1004 *dest = qir_FMIN(c, src[0], src[1]);
1007 *dest = qir_FMAX(c, src[0], src[1]);
1012 *dest = qir_FTOI(c, src[0]);
1016 *dest = qir_ITOF(c, src[0]);
1019 *dest = qir_AND(c, src[0], qir_uniform_f(c, 1.0));
1022 *dest = qir_AND(c, src[0], qir_uniform_ui(c, 1));
1027 *dest = qir_SEL(c, QPU_COND_ZC,
1028 qir_uniform_ui(c, ~0),
1029 qir_uniform_ui(c, 0));
1033 *dest = qir_ADD(c, src[0], src[1]);
1036 *dest = qir_SHR(c, src[0], src[1]);
1039 *dest = qir_SUB(c, src[0], src[1]);
1042 *dest = qir_ASR(c, src[0], src[1]);
1045 *dest = qir_SHL(c, src[0], src[1]);
1048 *dest = qir_MIN(c, src[0], src[1]);
1051 *dest = qir_MAX(c, src[0], src[1]);
1054 *dest = qir_AND(c, src[0], src[1]);
1057 *dest = qir_OR(c, src[0], src[1]);
1060 *dest = qir_XOR(c, src[0], src[1]);
1063 *dest = qir_NOT(c, src[0]);
1067 *dest = ntq_umul(c, src[0], src[1]);
1083 if (!ntq_emit_comparison(c, dest, instr, instr)) {
1084 fprintf(stderr, "Bad comparison instruction\n");
1089 *dest = ntq_emit_bcsel(c, instr, src);
1093 *dest = qir_SEL(c, QPU_COND_ZC, src[1], src[2]);
1097 *dest = ntq_rcp(c, src[0]);
1100 *dest = ntq_rsq(c, src[0]);
1103 *dest = qir_EXP2(c, src[0]);
1106 *dest = qir_LOG2(c, src[0]);
1110 *dest = qir_ITOF(c, qir_FTOI(c, src[0]));
1113 *dest = ntq_fceil(c, src[0]);
1116 *dest = ntq_ffract(c, src[0]);
1119 *dest = ntq_ffloor(c, src[0]);
1123 *dest = ntq_fsin(c, src[0]);
1126 *dest = ntq_fcos(c, src[0]);
1130 *dest = ntq_fsign(c, src[0]);
1134 *dest = qir_FMAXABS(c, src[0], src[0]);
1137 *dest = qir_MAX(c, src[0],
1138 qir_SUB(c, qir_uniform_ui(c, 0), src[0]));
1141 case nir_op_ibitfield_extract:
1142 *dest = ntq_emit_ibfe(c, src[0], src[1], src[2]);
1145 case nir_op_ubitfield_extract:
1146 *dest = ntq_emit_ubfe(c, src[0], src[1], src[2]);
1149 case nir_op_usadd_4x8:
1150 *dest = qir_V8ADDS(c, src[0], src[1]);
1153 case nir_op_ussub_4x8:
1154 *dest = qir_V8SUBS(c, src[0], src[1]);
1157 case nir_op_umin_4x8:
1158 *dest = qir_V8MIN(c, src[0], src[1]);
1161 case nir_op_umax_4x8:
1162 *dest = qir_V8MAX(c, src[0], src[1]);
1165 case nir_op_umul_unorm_4x8:
1166 *dest = qir_V8MULD(c, src[0], src[1]);
1170 fprintf(stderr, "unknown NIR ALU inst: ");
1171 nir_print_instr(&instr->instr, stderr);
1172 fprintf(stderr, "\n");
1178 emit_frag_end(struct vc4_compile *c)
1181 if (c->output_color_index != -1) {
1182 color = c->outputs[c->output_color_index];
1184 color = qir_uniform_ui(c, 0);
1187 if (c->discard.file != QFILE_NULL)
1188 qir_TLB_DISCARD_SETUP(c, c->discard);
1190 if (c->fs_key->stencil_enabled) {
1191 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 0));
1192 if (c->fs_key->stencil_twoside) {
1193 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 1));
1195 if (c->fs_key->stencil_full_writemasks) {
1196 qir_TLB_STENCIL_SETUP(c, qir_uniform(c, QUNIFORM_STENCIL, 2));
1200 if (c->output_sample_mask_index != -1) {
1201 qir_MS_MASK(c, c->outputs[c->output_sample_mask_index]);
1204 if (c->fs_key->depth_enabled) {
1206 if (c->output_position_index != -1) {
1207 z = qir_FTOI(c, qir_FMUL(c, c->outputs[c->output_position_index + 2],
1208 qir_uniform_f(c, 0xffffff)));
1212 qir_TLB_Z_WRITE(c, z);
1215 if (!c->msaa_per_sample_output) {
1216 qir_TLB_COLOR_WRITE(c, color);
1218 for (int i = 0; i < VC4_MAX_SAMPLES; i++)
1219 qir_TLB_COLOR_WRITE_MS(c, c->sample_colors[i]);
1224 emit_scaled_viewport_write(struct vc4_compile *c, struct qreg rcp_w)
1226 struct qreg packed = qir_get_temp(c);
1228 for (int i = 0; i < 2; i++) {
1230 qir_uniform(c, QUNIFORM_VIEWPORT_X_SCALE + i, 0);
1232 struct qreg packed_chan = packed;
1233 packed_chan.pack = QPU_PACK_A_16A + i;
1235 qir_FTOI_dest(c, packed_chan,
1238 c->outputs[c->output_position_index + i],
1243 qir_VPM_WRITE(c, packed);
1247 emit_zs_write(struct vc4_compile *c, struct qreg rcp_w)
1249 struct qreg zscale = qir_uniform(c, QUNIFORM_VIEWPORT_Z_SCALE, 0);
1250 struct qreg zoffset = qir_uniform(c, QUNIFORM_VIEWPORT_Z_OFFSET, 0);
1252 qir_VPM_WRITE(c, qir_FADD(c, qir_FMUL(c, qir_FMUL(c,
1253 c->outputs[c->output_position_index + 2],
1260 emit_rcp_wc_write(struct vc4_compile *c, struct qreg rcp_w)
1262 qir_VPM_WRITE(c, rcp_w);
1266 emit_point_size_write(struct vc4_compile *c)
1268 struct qreg point_size;
1270 if (c->output_point_size_index != -1)
1271 point_size = c->outputs[c->output_point_size_index];
1273 point_size = qir_uniform_f(c, 1.0);
1275 /* Workaround: HW-2726 PTB does not handle zero-size points (BCM2835,
1278 point_size = qir_FMAX(c, point_size, qir_uniform_f(c, .125));
1280 qir_VPM_WRITE(c, point_size);
1284 * Emits a VPM read of the stub vertex attribute set up by vc4_draw.c.
1286 * The simulator insists that there be at least one vertex attribute, so
1287 * vc4_draw.c will emit one if it wouldn't have otherwise. The simulator also
1288 * insists that all vertex attributes loaded get read by the VS/CS, so we have
1289 * to consume it here.
1292 emit_stub_vpm_read(struct vc4_compile *c)
1297 c->vattr_sizes[0] = 4;
1298 struct qreg vpm = { QFILE_VPM, 0 };
1299 (void)qir_MOV(c, vpm);
1304 emit_vert_end(struct vc4_compile *c,
1305 struct vc4_varying_slot *fs_inputs,
1306 uint32_t num_fs_inputs)
1308 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1310 emit_stub_vpm_read(c);
1312 emit_scaled_viewport_write(c, rcp_w);
1313 emit_zs_write(c, rcp_w);
1314 emit_rcp_wc_write(c, rcp_w);
1315 if (c->vs_key->per_vertex_point_size)
1316 emit_point_size_write(c);
1318 for (int i = 0; i < num_fs_inputs; i++) {
1319 struct vc4_varying_slot *input = &fs_inputs[i];
1322 for (j = 0; j < c->num_outputs; j++) {
1323 struct vc4_varying_slot *output =
1324 &c->output_slots[j];
1326 if (input->slot == output->slot &&
1327 input->swizzle == output->swizzle) {
1328 qir_VPM_WRITE(c, c->outputs[j]);
1332 /* Emit padding if we didn't find a declared VS output for
1335 if (j == c->num_outputs)
1336 qir_VPM_WRITE(c, qir_uniform_f(c, 0.0));
1341 emit_coord_end(struct vc4_compile *c)
1343 struct qreg rcp_w = qir_RCP(c, c->outputs[c->output_position_index + 3]);
1345 emit_stub_vpm_read(c);
1347 for (int i = 0; i < 4; i++)
1348 qir_VPM_WRITE(c, c->outputs[c->output_position_index + i]);
1350 emit_scaled_viewport_write(c, rcp_w);
1351 emit_zs_write(c, rcp_w);
1352 emit_rcp_wc_write(c, rcp_w);
1353 if (c->vs_key->per_vertex_point_size)
1354 emit_point_size_write(c);
1358 vc4_optimize_nir(struct nir_shader *s)
1365 nir_lower_vars_to_ssa(s);
1366 nir_lower_alu_to_scalar(s);
1368 progress = nir_copy_prop(s) || progress;
1369 progress = nir_opt_dce(s) || progress;
1370 progress = nir_opt_cse(s) || progress;
1371 progress = nir_opt_peephole_select(s) || progress;
1372 progress = nir_opt_algebraic(s) || progress;
1373 progress = nir_opt_constant_folding(s) || progress;
1374 progress = nir_opt_undef(s) || progress;
1379 driver_location_compare(const void *in_a, const void *in_b)
1381 const nir_variable *const *a = in_a;
1382 const nir_variable *const *b = in_b;
1384 return (*a)->data.driver_location - (*b)->data.driver_location;
1388 ntq_setup_inputs(struct vc4_compile *c)
1390 unsigned num_entries = 0;
1391 nir_foreach_variable(var, &c->s->inputs)
1394 nir_variable *vars[num_entries];
1397 nir_foreach_variable(var, &c->s->inputs)
1400 /* Sort the variables so that we emit the input setup in
1401 * driver_location order. This is required for VPM reads, whose data
1402 * is fetched into the VPM in driver_location (TGSI register index)
1405 qsort(&vars, num_entries, sizeof(*vars), driver_location_compare);
1407 for (unsigned i = 0; i < num_entries; i++) {
1408 nir_variable *var = vars[i];
1409 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1410 unsigned loc = var->data.driver_location;
1412 assert(array_len == 1);
1414 resize_qreg_array(c, &c->inputs, &c->inputs_array_size,
1417 if (c->stage == QSTAGE_FRAG) {
1418 if (var->data.location == VARYING_SLOT_POS) {
1419 emit_fragcoord_input(c, loc);
1420 } else if (var->data.location == VARYING_SLOT_FACE) {
1421 c->inputs[loc * 4 + 0] = qir_FRAG_REV_FLAG(c);
1422 } else if (var->data.location >= VARYING_SLOT_VAR0 &&
1423 (c->fs_key->point_sprite_mask &
1424 (1 << (var->data.location -
1425 VARYING_SLOT_VAR0)))) {
1426 c->inputs[loc * 4 + 0] = c->point_x;
1427 c->inputs[loc * 4 + 1] = c->point_y;
1429 emit_fragment_input(c, loc, var->data.location);
1432 emit_vertex_input(c, loc);
1438 ntq_setup_outputs(struct vc4_compile *c)
1440 nir_foreach_variable(var, &c->s->outputs) {
1441 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1442 unsigned loc = var->data.driver_location * 4;
1444 assert(array_len == 1);
1447 for (int i = 0; i < 4; i++)
1448 add_output(c, loc + i, var->data.location, i);
1450 if (c->stage == QSTAGE_FRAG) {
1451 switch (var->data.location) {
1452 case FRAG_RESULT_COLOR:
1453 case FRAG_RESULT_DATA0:
1454 c->output_color_index = loc;
1456 case FRAG_RESULT_DEPTH:
1457 c->output_position_index = loc;
1459 case FRAG_RESULT_SAMPLE_MASK:
1460 c->output_sample_mask_index = loc;
1464 switch (var->data.location) {
1465 case VARYING_SLOT_POS:
1466 c->output_position_index = loc;
1468 case VARYING_SLOT_PSIZ:
1469 c->output_point_size_index = loc;
1477 ntq_setup_uniforms(struct vc4_compile *c)
1479 nir_foreach_variable(var, &c->s->uniforms) {
1480 unsigned array_len = MAX2(glsl_get_length(var->type), 1);
1481 unsigned array_elem_size = 4 * sizeof(float);
1483 declare_uniform_range(c, var->data.driver_location * array_elem_size,
1484 array_len * array_elem_size);
1490 * Sets up the mapping from nir_register to struct qreg *.
1492 * Each nir_register gets a struct qreg per 32-bit component being stored.
1495 ntq_setup_registers(struct vc4_compile *c, struct exec_list *list)
1497 foreach_list_typed(nir_register, nir_reg, node, list) {
1498 unsigned array_len = MAX2(nir_reg->num_array_elems, 1);
1499 struct qreg *qregs = ralloc_array(c->def_ht, struct qreg,
1501 nir_reg->num_components);
1503 _mesa_hash_table_insert(c->def_ht, nir_reg, qregs);
1505 for (int i = 0; i < array_len * nir_reg->num_components; i++)
1506 qregs[i] = qir_uniform_ui(c, 0);
1511 ntq_emit_load_const(struct vc4_compile *c, nir_load_const_instr *instr)
1513 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1514 for (int i = 0; i < instr->def.num_components; i++)
1515 qregs[i] = qir_uniform_ui(c, instr->value.u[i]);
1517 _mesa_hash_table_insert(c->def_ht, &instr->def, qregs);
1521 ntq_emit_ssa_undef(struct vc4_compile *c, nir_ssa_undef_instr *instr)
1523 struct qreg *qregs = ntq_init_ssa_def(c, &instr->def);
1525 /* QIR needs there to be *some* value, so pick 0 (same as for
1526 * ntq_setup_registers().
1528 for (int i = 0; i < instr->def.num_components; i++)
1529 qregs[i] = qir_uniform_ui(c, 0);
1533 ntq_emit_intrinsic(struct vc4_compile *c, nir_intrinsic_instr *instr)
1535 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
1536 nir_const_value *const_offset;
1538 struct qreg *dest = NULL;
1540 if (info->has_dest) {
1541 dest = ntq_get_dest(c, &instr->dest);
1544 switch (instr->intrinsic) {
1545 case nir_intrinsic_load_uniform:
1546 assert(instr->num_components == 1);
1547 const_offset = nir_src_as_const_value(instr->src[0]);
1549 offset = instr->const_index[0] + const_offset->u[0];
1550 assert(offset % 4 == 0);
1551 /* We need dwords */
1552 offset = offset / 4;
1553 if (offset < VC4_NIR_STATE_UNIFORM_OFFSET) {
1554 *dest = qir_uniform(c, QUNIFORM_UNIFORM,
1557 *dest = qir_uniform(c, offset -
1558 VC4_NIR_STATE_UNIFORM_OFFSET,
1562 *dest = indirect_uniform_load(c, instr);
1566 case nir_intrinsic_load_user_clip_plane:
1567 *dest = qir_uniform(c, QUNIFORM_USER_CLIP_PLANE,
1568 instr->const_index[0]);
1571 case nir_intrinsic_load_sample_mask_in:
1572 *dest = qir_uniform(c, QUNIFORM_SAMPLE_MASK, 0);
1575 case nir_intrinsic_load_input:
1576 assert(instr->num_components == 1);
1577 const_offset = nir_src_as_const_value(instr->src[0]);
1578 assert(const_offset && "vc4 doesn't support indirect inputs");
1579 if (instr->const_index[0] >= VC4_NIR_TLB_COLOR_READ_INPUT) {
1580 assert(const_offset->u[0] == 0);
1581 /* Reads of the per-sample color need to be done in
1584 int sample_index = (instr->const_index[0] -
1585 VC4_NIR_TLB_COLOR_READ_INPUT);
1586 for (int i = 0; i <= sample_index; i++) {
1587 if (c->color_reads[i].file == QFILE_NULL) {
1589 qir_TLB_COLOR_READ(c);
1592 *dest = c->color_reads[sample_index];
1594 offset = instr->const_index[0] + const_offset->u[0];
1595 *dest = c->inputs[offset];
1599 case nir_intrinsic_store_output:
1600 const_offset = nir_src_as_const_value(instr->src[1]);
1601 assert(const_offset && "vc4 doesn't support indirect outputs");
1602 offset = instr->const_index[0] + const_offset->u[0];
1604 /* MSAA color outputs are the only case where we have an
1605 * output that's not lowered to being a store of a single 32
1608 if (c->stage == QSTAGE_FRAG && instr->num_components == 4) {
1609 assert(offset == c->output_color_index);
1610 for (int i = 0; i < 4; i++) {
1611 c->sample_colors[i] =
1612 qir_MOV(c, ntq_get_src(c, instr->src[0],
1616 assert(instr->num_components == 1);
1617 c->outputs[offset] =
1618 qir_MOV(c, ntq_get_src(c, instr->src[0], 0));
1619 c->num_outputs = MAX2(c->num_outputs, offset + 1);
1623 case nir_intrinsic_discard:
1624 c->discard = qir_uniform_ui(c, ~0);
1627 case nir_intrinsic_discard_if:
1628 if (c->discard.file == QFILE_NULL)
1629 c->discard = qir_uniform_ui(c, 0);
1630 c->discard = qir_OR(c, c->discard,
1631 ntq_get_src(c, instr->src[0], 0));
1635 fprintf(stderr, "Unknown intrinsic: ");
1636 nir_print_instr(&instr->instr, stderr);
1637 fprintf(stderr, "\n");
1643 ntq_emit_if(struct vc4_compile *c, nir_if *if_stmt)
1645 fprintf(stderr, "general IF statements not handled.\n");
1649 ntq_emit_instr(struct vc4_compile *c, nir_instr *instr)
1651 switch (instr->type) {
1652 case nir_instr_type_alu:
1653 ntq_emit_alu(c, nir_instr_as_alu(instr));
1656 case nir_instr_type_intrinsic:
1657 ntq_emit_intrinsic(c, nir_instr_as_intrinsic(instr));
1660 case nir_instr_type_load_const:
1661 ntq_emit_load_const(c, nir_instr_as_load_const(instr));
1664 case nir_instr_type_ssa_undef:
1665 ntq_emit_ssa_undef(c, nir_instr_as_ssa_undef(instr));
1668 case nir_instr_type_tex:
1669 ntq_emit_tex(c, nir_instr_as_tex(instr));
1673 fprintf(stderr, "Unknown NIR instr type: ");
1674 nir_print_instr(instr, stderr);
1675 fprintf(stderr, "\n");
1681 ntq_emit_block(struct vc4_compile *c, nir_block *block)
1683 nir_foreach_instr(block, instr) {
1684 ntq_emit_instr(c, instr);
1689 ntq_emit_cf_list(struct vc4_compile *c, struct exec_list *list)
1691 foreach_list_typed(nir_cf_node, node, node, list) {
1692 switch (node->type) {
1693 /* case nir_cf_node_loop: */
1694 case nir_cf_node_block:
1695 ntq_emit_block(c, nir_cf_node_as_block(node));
1698 case nir_cf_node_if:
1699 ntq_emit_if(c, nir_cf_node_as_if(node));
1709 ntq_emit_impl(struct vc4_compile *c, nir_function_impl *impl)
1711 ntq_setup_registers(c, &impl->registers);
1712 ntq_emit_cf_list(c, &impl->body);
1716 nir_to_qir(struct vc4_compile *c)
1718 ntq_setup_inputs(c);
1719 ntq_setup_outputs(c);
1720 ntq_setup_uniforms(c);
1721 ntq_setup_registers(c, &c->s->registers);
1723 /* Find the main function and emit the body. */
1724 nir_foreach_function(c->s, function) {
1725 assert(strcmp(function->name, "main") == 0);
1726 assert(function->impl);
1727 ntq_emit_impl(c, function->impl);
1731 static const nir_shader_compiler_options nir_options = {
1736 .lower_fsqrt = true,
1737 .lower_negate = true,
1741 count_nir_instrs_in_block(nir_block *block, void *state)
1743 int *count = (int *) state;
1744 nir_foreach_instr(block, instr) {
1745 *count = *count + 1;
1751 count_nir_instrs(nir_shader *nir)
1754 nir_foreach_function(nir, function) {
1755 if (!function->impl)
1757 nir_foreach_block(function->impl, count_nir_instrs_in_block, &count);
1762 static struct vc4_compile *
1763 vc4_shader_ntq(struct vc4_context *vc4, enum qstage stage,
1764 struct vc4_key *key)
1766 struct vc4_compile *c = qir_compile_init();
1769 c->shader_state = &key->shader_state->base;
1770 c->program_id = key->shader_state->program_id;
1771 c->variant_id = key->shader_state->compiled_variant_count++;
1776 c->fs_key = (struct vc4_fs_key *)key;
1777 if (c->fs_key->is_points) {
1778 c->point_x = emit_fragment_varying(c, ~0, 0);
1779 c->point_y = emit_fragment_varying(c, ~0, 0);
1780 } else if (c->fs_key->is_lines) {
1781 c->line_x = emit_fragment_varying(c, ~0, 0);
1785 c->vs_key = (struct vc4_vs_key *)key;
1788 c->vs_key = (struct vc4_vs_key *)key;
1792 const struct tgsi_token *tokens = key->shader_state->base.tokens;
1794 if (vc4_debug & VC4_DEBUG_TGSI) {
1795 fprintf(stderr, "%s prog %d/%d TGSI:\n",
1796 qir_get_stage_name(c->stage),
1797 c->program_id, c->variant_id);
1798 tgsi_dump(tokens, 0);
1801 c->s = tgsi_to_nir(tokens, &nir_options);
1802 nir_opt_global_to_local(c->s);
1803 nir_convert_to_ssa(c->s);
1805 if (stage == QSTAGE_FRAG)
1806 vc4_nir_lower_blend(c);
1808 struct nir_lower_tex_options tex_options = {
1809 /* We would need to implement txs, but we don't want the
1810 * int/float conversions
1812 .lower_rect = false,
1814 /* We want to use this, but we don't want to newton-raphson
1819 /* Apply swizzles to all samplers. */
1820 .swizzle_result = ~0,
1823 /* Lower the format swizzle and ARB_texture_swizzle-style swizzle.
1824 * The format swizzling applies before sRGB decode, and
1825 * ARB_texture_swizzle is the last thing before returning the sample.
1827 for (int i = 0; i < ARRAY_SIZE(key->tex); i++) {
1828 enum pipe_format format = c->key->tex[i].format;
1833 const uint8_t *format_swizzle = vc4_get_format_swizzle(format);
1835 for (int j = 0; j < 4; j++) {
1836 uint8_t arb_swiz = c->key->tex[i].swizzle[j];
1838 if (arb_swiz <= 3) {
1839 tex_options.swizzles[i][j] =
1840 format_swizzle[arb_swiz];
1842 tex_options.swizzles[i][j] = arb_swiz;
1845 /* If ARB_texture_swizzle is reading from the R, G, or
1846 * B channels of an sRGB texture, then we need to
1847 * apply sRGB decode to this channel at sample time.
1849 if (arb_swiz < 3 && util_format_is_srgb(format)) {
1850 c->tex_srgb_decode[i] |= (1 << j);
1856 nir_lower_tex(c->s, &tex_options);
1858 if (c->fs_key && c->fs_key->light_twoside)
1859 nir_lower_two_sided_color(c->s);
1861 if (stage == QSTAGE_FRAG)
1862 nir_lower_clip_fs(c->s, c->key->ucp_enables);
1864 nir_lower_clip_vs(c->s, c->key->ucp_enables);
1866 vc4_nir_lower_io(c);
1867 vc4_nir_lower_txf_ms(c);
1868 nir_lower_idiv(c->s);
1869 nir_lower_load_const_to_scalar(c->s);
1871 vc4_optimize_nir(c->s);
1873 nir_remove_dead_variables(c->s);
1875 nir_convert_from_ssa(c->s, true);
1877 if (vc4_debug & VC4_DEBUG_SHADERDB) {
1878 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d NIR instructions\n",
1879 qir_get_stage_name(c->stage),
1880 c->program_id, c->variant_id,
1881 count_nir_instrs(c->s));
1884 if (vc4_debug & VC4_DEBUG_NIR) {
1885 fprintf(stderr, "%s prog %d/%d NIR:\n",
1886 qir_get_stage_name(c->stage),
1887 c->program_id, c->variant_id);
1888 nir_print_shader(c->s, stderr);
1899 vc4->prog.fs->input_slots,
1900 vc4->prog.fs->num_inputs);
1907 if (vc4_debug & VC4_DEBUG_QIR) {
1908 fprintf(stderr, "%s prog %d/%d pre-opt QIR:\n",
1909 qir_get_stage_name(c->stage),
1910 c->program_id, c->variant_id);
1915 qir_lower_uniforms(c);
1917 qir_schedule_instructions(c);
1919 if (vc4_debug & VC4_DEBUG_QIR) {
1920 fprintf(stderr, "%s prog %d/%d QIR:\n",
1921 qir_get_stage_name(c->stage),
1922 c->program_id, c->variant_id);
1926 qir_reorder_uniforms(c);
1927 vc4_generate_code(vc4, c);
1929 if (vc4_debug & VC4_DEBUG_SHADERDB) {
1930 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d instructions\n",
1931 qir_get_stage_name(c->stage),
1932 c->program_id, c->variant_id,
1934 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d uniforms\n",
1935 qir_get_stage_name(c->stage),
1936 c->program_id, c->variant_id,
1946 vc4_shader_state_create(struct pipe_context *pctx,
1947 const struct pipe_shader_state *cso)
1949 struct vc4_context *vc4 = vc4_context(pctx);
1950 struct vc4_uncompiled_shader *so = CALLOC_STRUCT(vc4_uncompiled_shader);
1954 so->base.tokens = tgsi_dup_tokens(cso->tokens);
1955 so->program_id = vc4->next_uncompiled_program_id++;
1961 copy_uniform_state_to_shader(struct vc4_compiled_shader *shader,
1962 struct vc4_compile *c)
1964 int count = c->num_uniforms;
1965 struct vc4_shader_uniform_info *uinfo = &shader->uniforms;
1967 uinfo->count = count;
1968 uinfo->data = ralloc_array(shader, uint32_t, count);
1969 memcpy(uinfo->data, c->uniform_data,
1970 count * sizeof(*uinfo->data));
1971 uinfo->contents = ralloc_array(shader, enum quniform_contents, count);
1972 memcpy(uinfo->contents, c->uniform_contents,
1973 count * sizeof(*uinfo->contents));
1974 uinfo->num_texture_samples = c->num_texture_samples;
1976 vc4_set_shader_uniform_dirty_flags(shader);
1979 static struct vc4_compiled_shader *
1980 vc4_get_compiled_shader(struct vc4_context *vc4, enum qstage stage,
1981 struct vc4_key *key)
1983 struct hash_table *ht;
1985 if (stage == QSTAGE_FRAG) {
1987 key_size = sizeof(struct vc4_fs_key);
1990 key_size = sizeof(struct vc4_vs_key);
1993 struct vc4_compiled_shader *shader;
1994 struct hash_entry *entry = _mesa_hash_table_search(ht, key);
1998 struct vc4_compile *c = vc4_shader_ntq(vc4, stage, key);
1999 shader = rzalloc(NULL, struct vc4_compiled_shader);
2001 shader->program_id = vc4->next_compiled_program_id++;
2002 if (stage == QSTAGE_FRAG) {
2003 bool input_live[c->num_input_slots];
2005 memset(input_live, 0, sizeof(input_live));
2006 list_for_each_entry(struct qinst, inst, &c->instructions, link) {
2007 for (int i = 0; i < qir_get_op_nsrc(inst->op); i++) {
2008 if (inst->src[i].file == QFILE_VARY)
2009 input_live[inst->src[i].index] = true;
2013 shader->input_slots = ralloc_array(shader,
2014 struct vc4_varying_slot,
2015 c->num_input_slots);
2017 for (int i = 0; i < c->num_input_slots; i++) {
2018 struct vc4_varying_slot *slot = &c->input_slots[i];
2023 /* Skip non-VS-output inputs. */
2024 if (slot->slot == (uint8_t)~0)
2027 if (slot->slot == VARYING_SLOT_COL0 ||
2028 slot->slot == VARYING_SLOT_COL1 ||
2029 slot->slot == VARYING_SLOT_BFC0 ||
2030 slot->slot == VARYING_SLOT_BFC1) {
2031 shader->color_inputs |= (1 << shader->num_inputs);
2034 shader->input_slots[shader->num_inputs] = *slot;
2035 shader->num_inputs++;
2038 shader->num_inputs = c->num_inputs;
2040 shader->vattr_offsets[0] = 0;
2041 for (int i = 0; i < 8; i++) {
2042 shader->vattr_offsets[i + 1] =
2043 shader->vattr_offsets[i] + c->vattr_sizes[i];
2045 if (c->vattr_sizes[i])
2046 shader->vattrs_live |= (1 << i);
2050 copy_uniform_state_to_shader(shader, c);
2051 shader->bo = vc4_bo_alloc_shader(vc4->screen, c->qpu_insts,
2052 c->qpu_inst_count * sizeof(uint64_t));
2054 /* Copy the compiler UBO range state to the compiled shader, dropping
2055 * out arrays that were never referenced by an indirect load.
2057 * (Note that QIR dead code elimination of an array access still
2058 * leaves that array alive, though)
2060 if (c->num_ubo_ranges) {
2061 shader->num_ubo_ranges = c->num_ubo_ranges;
2062 shader->ubo_ranges = ralloc_array(shader, struct vc4_ubo_range,
2065 for (int i = 0; i < c->num_uniform_ranges; i++) {
2066 struct vc4_compiler_ubo_range *range =
2071 shader->ubo_ranges[j].dst_offset = range->dst_offset;
2072 shader->ubo_ranges[j].src_offset = range->src_offset;
2073 shader->ubo_ranges[j].size = range->size;
2074 shader->ubo_size += c->ubo_ranges[i].size;
2078 if (shader->ubo_size) {
2079 if (vc4_debug & VC4_DEBUG_SHADERDB) {
2080 fprintf(stderr, "SHADER-DB: %s prog %d/%d: %d UBO uniforms\n",
2081 qir_get_stage_name(c->stage),
2082 c->program_id, c->variant_id,
2083 shader->ubo_size / 4);
2087 qir_compile_destroy(c);
2089 struct vc4_key *dup_key;
2090 dup_key = ralloc_size(shader, key_size);
2091 memcpy(dup_key, key, key_size);
2092 _mesa_hash_table_insert(ht, dup_key, shader);
2098 vc4_setup_shared_key(struct vc4_context *vc4, struct vc4_key *key,
2099 struct vc4_texture_stateobj *texstate)
2101 for (int i = 0; i < texstate->num_textures; i++) {
2102 struct pipe_sampler_view *sampler = texstate->textures[i];
2103 struct pipe_sampler_state *sampler_state =
2104 texstate->samplers[i];
2109 key->tex[i].format = sampler->format;
2110 key->tex[i].swizzle[0] = sampler->swizzle_r;
2111 key->tex[i].swizzle[1] = sampler->swizzle_g;
2112 key->tex[i].swizzle[2] = sampler->swizzle_b;
2113 key->tex[i].swizzle[3] = sampler->swizzle_a;
2115 if (sampler->texture->nr_samples > 1) {
2116 key->tex[i].msaa_width = sampler->texture->width0;
2117 key->tex[i].msaa_height = sampler->texture->height0;
2118 } else if (sampler){
2119 key->tex[i].compare_mode = sampler_state->compare_mode;
2120 key->tex[i].compare_func = sampler_state->compare_func;
2121 key->tex[i].wrap_s = sampler_state->wrap_s;
2122 key->tex[i].wrap_t = sampler_state->wrap_t;
2126 key->ucp_enables = vc4->rasterizer->base.clip_plane_enable;
2130 vc4_update_compiled_fs(struct vc4_context *vc4, uint8_t prim_mode)
2132 struct vc4_fs_key local_key;
2133 struct vc4_fs_key *key = &local_key;
2135 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2137 VC4_DIRTY_FRAMEBUFFER |
2139 VC4_DIRTY_RASTERIZER |
2141 VC4_DIRTY_TEXSTATE |
2142 VC4_DIRTY_UNCOMPILED_FS))) {
2146 memset(key, 0, sizeof(*key));
2147 vc4_setup_shared_key(vc4, &key->base, &vc4->fragtex);
2148 key->base.shader_state = vc4->prog.bind_fs;
2149 key->is_points = (prim_mode == PIPE_PRIM_POINTS);
2150 key->is_lines = (prim_mode >= PIPE_PRIM_LINES &&
2151 prim_mode <= PIPE_PRIM_LINE_STRIP);
2152 key->blend = vc4->blend->rt[0];
2153 if (vc4->blend->logicop_enable) {
2154 key->logicop_func = vc4->blend->logicop_func;
2156 key->logicop_func = PIPE_LOGICOP_COPY;
2158 key->msaa = vc4->rasterizer->base.multisample;
2159 key->sample_coverage = (vc4->rasterizer->base.multisample &&
2160 vc4->sample_mask != (1 << VC4_MAX_SAMPLES) - 1);
2161 key->sample_alpha_to_coverage = vc4->blend->alpha_to_coverage;
2162 key->sample_alpha_to_one = vc4->blend->alpha_to_one;
2163 if (vc4->framebuffer.cbufs[0])
2164 key->color_format = vc4->framebuffer.cbufs[0]->format;
2166 key->stencil_enabled = vc4->zsa->stencil_uniforms[0] != 0;
2167 key->stencil_twoside = vc4->zsa->stencil_uniforms[1] != 0;
2168 key->stencil_full_writemasks = vc4->zsa->stencil_uniforms[2] != 0;
2169 key->depth_enabled = (vc4->zsa->base.depth.enabled ||
2170 key->stencil_enabled);
2171 if (vc4->zsa->base.alpha.enabled) {
2172 key->alpha_test = true;
2173 key->alpha_test_func = vc4->zsa->base.alpha.func;
2176 if (key->is_points) {
2177 key->point_sprite_mask =
2178 vc4->rasterizer->base.sprite_coord_enable;
2179 key->point_coord_upper_left =
2180 (vc4->rasterizer->base.sprite_coord_mode ==
2181 PIPE_SPRITE_COORD_UPPER_LEFT);
2184 key->light_twoside = vc4->rasterizer->base.light_twoside;
2186 struct vc4_compiled_shader *old_fs = vc4->prog.fs;
2187 vc4->prog.fs = vc4_get_compiled_shader(vc4, QSTAGE_FRAG, &key->base);
2188 if (vc4->prog.fs == old_fs)
2191 vc4->dirty |= VC4_DIRTY_COMPILED_FS;
2192 if (vc4->rasterizer->base.flatshade &&
2193 old_fs && vc4->prog.fs->color_inputs != old_fs->color_inputs) {
2194 vc4->dirty |= VC4_DIRTY_FLAT_SHADE_FLAGS;
2199 vc4_update_compiled_vs(struct vc4_context *vc4, uint8_t prim_mode)
2201 struct vc4_vs_key local_key;
2202 struct vc4_vs_key *key = &local_key;
2204 if (!(vc4->dirty & (VC4_DIRTY_PRIM_MODE |
2205 VC4_DIRTY_RASTERIZER |
2207 VC4_DIRTY_TEXSTATE |
2208 VC4_DIRTY_VTXSTATE |
2209 VC4_DIRTY_UNCOMPILED_VS |
2210 VC4_DIRTY_COMPILED_FS))) {
2214 memset(key, 0, sizeof(*key));
2215 vc4_setup_shared_key(vc4, &key->base, &vc4->verttex);
2216 key->base.shader_state = vc4->prog.bind_vs;
2217 key->compiled_fs_id = vc4->prog.fs->program_id;
2219 for (int i = 0; i < ARRAY_SIZE(key->attr_formats); i++)
2220 key->attr_formats[i] = vc4->vtx->pipe[i].src_format;
2222 key->per_vertex_point_size =
2223 (prim_mode == PIPE_PRIM_POINTS &&
2224 vc4->rasterizer->base.point_size_per_vertex);
2226 struct vc4_compiled_shader *vs =
2227 vc4_get_compiled_shader(vc4, QSTAGE_VERT, &key->base);
2228 if (vs != vc4->prog.vs) {
2230 vc4->dirty |= VC4_DIRTY_COMPILED_VS;
2233 key->is_coord = true;
2234 struct vc4_compiled_shader *cs =
2235 vc4_get_compiled_shader(vc4, QSTAGE_COORD, &key->base);
2236 if (cs != vc4->prog.cs) {
2238 vc4->dirty |= VC4_DIRTY_COMPILED_CS;
2243 vc4_update_compiled_shaders(struct vc4_context *vc4, uint8_t prim_mode)
2245 vc4_update_compiled_fs(vc4, prim_mode);
2246 vc4_update_compiled_vs(vc4, prim_mode);
2250 fs_cache_hash(const void *key)
2252 return _mesa_hash_data(key, sizeof(struct vc4_fs_key));
2256 vs_cache_hash(const void *key)
2258 return _mesa_hash_data(key, sizeof(struct vc4_vs_key));
2262 fs_cache_compare(const void *key1, const void *key2)
2264 return memcmp(key1, key2, sizeof(struct vc4_fs_key)) == 0;
2268 vs_cache_compare(const void *key1, const void *key2)
2270 return memcmp(key1, key2, sizeof(struct vc4_vs_key)) == 0;
2274 delete_from_cache_if_matches(struct hash_table *ht,
2275 struct hash_entry *entry,
2276 struct vc4_uncompiled_shader *so)
2278 const struct vc4_key *key = entry->key;
2280 if (key->shader_state == so) {
2281 struct vc4_compiled_shader *shader = entry->data;
2282 _mesa_hash_table_remove(ht, entry);
2283 vc4_bo_unreference(&shader->bo);
2284 ralloc_free(shader);
2289 vc4_shader_state_delete(struct pipe_context *pctx, void *hwcso)
2291 struct vc4_context *vc4 = vc4_context(pctx);
2292 struct vc4_uncompiled_shader *so = hwcso;
2294 struct hash_entry *entry;
2295 hash_table_foreach(vc4->fs_cache, entry)
2296 delete_from_cache_if_matches(vc4->fs_cache, entry, so);
2297 hash_table_foreach(vc4->vs_cache, entry)
2298 delete_from_cache_if_matches(vc4->vs_cache, entry, so);
2300 free((void *)so->base.tokens);
2305 vc4_fp_state_bind(struct pipe_context *pctx, void *hwcso)
2307 struct vc4_context *vc4 = vc4_context(pctx);
2308 vc4->prog.bind_fs = hwcso;
2309 vc4->dirty |= VC4_DIRTY_UNCOMPILED_FS;
2313 vc4_vp_state_bind(struct pipe_context *pctx, void *hwcso)
2315 struct vc4_context *vc4 = vc4_context(pctx);
2316 vc4->prog.bind_vs = hwcso;
2317 vc4->dirty |= VC4_DIRTY_UNCOMPILED_VS;
2321 vc4_program_init(struct pipe_context *pctx)
2323 struct vc4_context *vc4 = vc4_context(pctx);
2325 pctx->create_vs_state = vc4_shader_state_create;
2326 pctx->delete_vs_state = vc4_shader_state_delete;
2328 pctx->create_fs_state = vc4_shader_state_create;
2329 pctx->delete_fs_state = vc4_shader_state_delete;
2331 pctx->bind_fs_state = vc4_fp_state_bind;
2332 pctx->bind_vs_state = vc4_vp_state_bind;
2334 vc4->fs_cache = _mesa_hash_table_create(pctx, fs_cache_hash,
2336 vc4->vs_cache = _mesa_hash_table_create(pctx, vs_cache_hash,
2341 vc4_program_fini(struct pipe_context *pctx)
2343 struct vc4_context *vc4 = vc4_context(pctx);
2345 struct hash_entry *entry;
2346 hash_table_foreach(vc4->fs_cache, entry) {
2347 struct vc4_compiled_shader *shader = entry->data;
2348 vc4_bo_unreference(&shader->bo);
2349 ralloc_free(shader);
2350 _mesa_hash_table_remove(vc4->fs_cache, entry);
2353 hash_table_foreach(vc4->vs_cache, entry) {
2354 struct vc4_compiled_shader *shader = entry->data;
2355 vc4_bo_unreference(&shader->bo);
2356 ralloc_free(shader);
2357 _mesa_hash_table_remove(vc4->vs_cache, entry);