2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "GLSL.std.450.h"
31 #define M_PIf ((float) M_PI)
32 #define M_PI_2f ((float) M_PI_2)
33 #define M_PI_4f ((float) M_PI_4)
36 build_mat2_det(nir_builder *b, nir_ssa_def *col[2])
38 unsigned swiz[4] = {1, 0, 0, 0};
39 nir_ssa_def *p = nir_fmul(b, col[0], nir_swizzle(b, col[1], swiz, 2, true));
40 return nir_fsub(b, nir_channel(b, p, 0), nir_channel(b, p, 1));
44 build_mat3_det(nir_builder *b, nir_ssa_def *col[3])
46 unsigned yzx[4] = {1, 2, 0, 0};
47 unsigned zxy[4] = {2, 0, 1, 0};
51 nir_fmul(b, nir_swizzle(b, col[1], yzx, 3, true),
52 nir_swizzle(b, col[2], zxy, 3, true)));
55 nir_fmul(b, nir_swizzle(b, col[1], zxy, 3, true),
56 nir_swizzle(b, col[2], yzx, 3, true)));
58 nir_ssa_def *diff = nir_fsub(b, prod0, prod1);
60 return nir_fadd(b, nir_channel(b, diff, 0),
61 nir_fadd(b, nir_channel(b, diff, 1),
62 nir_channel(b, diff, 2)));
66 build_mat4_det(nir_builder *b, nir_ssa_def **col)
68 nir_ssa_def *subdet[4];
69 for (unsigned i = 0; i < 4; i++) {
71 for (unsigned j = 0; j < 4; j++)
72 swiz[j - (j > i)] = j;
74 nir_ssa_def *subcol[3];
75 subcol[0] = nir_swizzle(b, col[1], swiz, 3, true);
76 subcol[1] = nir_swizzle(b, col[2], swiz, 3, true);
77 subcol[2] = nir_swizzle(b, col[3], swiz, 3, true);
79 subdet[i] = build_mat3_det(b, subcol);
82 nir_ssa_def *prod = nir_fmul(b, col[0], nir_vec(b, subdet, 4));
84 return nir_fadd(b, nir_fsub(b, nir_channel(b, prod, 0),
85 nir_channel(b, prod, 1)),
86 nir_fsub(b, nir_channel(b, prod, 2),
87 nir_channel(b, prod, 3)));
91 build_mat_det(struct vtn_builder *b, struct vtn_ssa_value *src)
93 unsigned size = glsl_get_vector_elements(src->type);
96 for (unsigned i = 0; i < size; i++)
97 cols[i] = src->elems[i]->def;
100 case 2: return build_mat2_det(&b->nb, cols);
101 case 3: return build_mat3_det(&b->nb, cols);
102 case 4: return build_mat4_det(&b->nb, cols);
104 unreachable("Invalid matrix size");
108 /* Computes the determinate of the submatrix given by taking src and
109 * removing the specified row and column.
112 build_mat_subdet(struct nir_builder *b, struct vtn_ssa_value *src,
113 unsigned size, unsigned row, unsigned col)
115 assert(row < size && col < size);
117 return nir_channel(b, src->elems[1 - col]->def, 1 - row);
119 /* Swizzle to get all but the specified row */
121 for (unsigned j = 0; j < 4; j++)
122 swiz[j - (j > row)] = j;
124 /* Grab all but the specified column */
125 nir_ssa_def *subcol[3];
126 for (unsigned j = 0; j < size; j++) {
128 subcol[j - (j > col)] = nir_swizzle(b, src->elems[j]->def,
129 swiz, size - 1, true);
134 return build_mat2_det(b, subcol);
137 return build_mat3_det(b, subcol);
142 static struct vtn_ssa_value *
143 matrix_inverse(struct vtn_builder *b, struct vtn_ssa_value *src)
145 nir_ssa_def *adj_col[4];
146 unsigned size = glsl_get_vector_elements(src->type);
148 /* Build up an adjugate matrix */
149 for (unsigned c = 0; c < size; c++) {
150 nir_ssa_def *elem[4];
151 for (unsigned r = 0; r < size; r++) {
152 elem[r] = build_mat_subdet(&b->nb, src, size, c, r);
155 elem[r] = nir_fneg(&b->nb, elem[r]);
158 adj_col[c] = nir_vec(&b->nb, elem, size);
161 nir_ssa_def *det_inv = nir_frcp(&b->nb, build_mat_det(b, src));
163 struct vtn_ssa_value *val = vtn_create_ssa_value(b, src->type);
164 for (unsigned i = 0; i < size; i++)
165 val->elems[i]->def = nir_fmul(&b->nb, adj_col[i], det_inv);
171 build_length(nir_builder *b, nir_ssa_def *vec)
173 switch (vec->num_components) {
174 case 1: return nir_fsqrt(b, nir_fmul(b, vec, vec));
175 case 2: return nir_fsqrt(b, nir_fdot2(b, vec, vec));
176 case 3: return nir_fsqrt(b, nir_fdot3(b, vec, vec));
177 case 4: return nir_fsqrt(b, nir_fdot4(b, vec, vec));
179 unreachable("Invalid number of components");
183 static inline nir_ssa_def *
184 build_fclamp(nir_builder *b,
185 nir_ssa_def *x, nir_ssa_def *min_val, nir_ssa_def *max_val)
187 return nir_fmin(b, nir_fmax(b, x, min_val), max_val);
194 build_exp(nir_builder *b, nir_ssa_def *x)
196 return nir_fexp2(b, nir_fmul(b, x, nir_imm_float(b, M_LOG2E)));
200 * Return ln(x) - the natural logarithm of x.
203 build_log(nir_builder *b, nir_ssa_def *x)
205 return nir_fmul(b, nir_flog2(b, x), nir_imm_float(b, 1.0 / M_LOG2E));
209 build_asin(nir_builder *b, nir_ssa_def *x)
211 nir_ssa_def *abs_x = nir_fabs(b, x);
212 return nir_fmul(b, nir_fsign(b, x),
213 nir_fsub(b, nir_imm_float(b, M_PI_2f),
214 nir_fmul(b, nir_fsqrt(b, nir_fsub(b, nir_imm_float(b, 1.0f), abs_x)),
215 nir_fadd(b, nir_imm_float(b, M_PI_2f),
217 nir_fadd(b, nir_imm_float(b, M_PI_4f - 1.0f),
219 nir_fadd(b, nir_imm_float(b, 0.086566724f),
221 nir_imm_float(b, -0.03102955f))))))))));
225 * Compute xs[0] + xs[1] + xs[2] + ... using fadd.
228 build_fsum(nir_builder *b, nir_ssa_def **xs, int terms)
230 nir_ssa_def *accum = xs[0];
232 for (int i = 1; i < terms; i++)
233 accum = nir_fadd(b, accum, xs[i]);
239 build_atan(nir_builder *b, nir_ssa_def *y_over_x)
241 nir_ssa_def *abs_y_over_x = nir_fabs(b, y_over_x);
242 nir_ssa_def *one = nir_imm_float(b, 1.0f);
245 * range-reduction, first step:
247 * / y_over_x if |y_over_x| <= 1.0;
249 * \ 1.0 / y_over_x otherwise
251 nir_ssa_def *x = nir_fdiv(b, nir_fmin(b, abs_y_over_x, one),
252 nir_fmax(b, abs_y_over_x, one));
255 * approximate atan by evaluating polynomial:
257 * x * 0.9999793128310355 - x^3 * 0.3326756418091246 +
258 * x^5 * 0.1938924977115610 - x^7 * 0.1173503194786851 +
259 * x^9 * 0.0536813784310406 - x^11 * 0.0121323213173444
261 nir_ssa_def *x_2 = nir_fmul(b, x, x);
262 nir_ssa_def *x_3 = nir_fmul(b, x_2, x);
263 nir_ssa_def *x_5 = nir_fmul(b, x_3, x_2);
264 nir_ssa_def *x_7 = nir_fmul(b, x_5, x_2);
265 nir_ssa_def *x_9 = nir_fmul(b, x_7, x_2);
266 nir_ssa_def *x_11 = nir_fmul(b, x_9, x_2);
268 nir_ssa_def *polynomial_terms[] = {
269 nir_fmul(b, x, nir_imm_float(b, 0.9999793128310355f)),
270 nir_fmul(b, x_3, nir_imm_float(b, -0.3326756418091246f)),
271 nir_fmul(b, x_5, nir_imm_float(b, 0.1938924977115610f)),
272 nir_fmul(b, x_7, nir_imm_float(b, -0.1173503194786851f)),
273 nir_fmul(b, x_9, nir_imm_float(b, 0.0536813784310406f)),
274 nir_fmul(b, x_11, nir_imm_float(b, -0.0121323213173444f)),
278 build_fsum(b, polynomial_terms, ARRAY_SIZE(polynomial_terms));
280 /* range-reduction fixup */
281 tmp = nir_fadd(b, tmp,
283 nir_b2f(b, nir_flt(b, one, abs_y_over_x)),
284 nir_fadd(b, nir_fmul(b, tmp,
285 nir_imm_float(b, -2.0f)),
286 nir_imm_float(b, M_PI_2f))));
289 return nir_fmul(b, tmp, nir_fsign(b, y_over_x));
293 build_atan2(nir_builder *b, nir_ssa_def *y, nir_ssa_def *x)
295 nir_ssa_def *zero = nir_imm_float(b, 0.0f);
297 /* If |x| >= 1.0e-8 * |y|: */
298 nir_ssa_def *condition =
299 nir_fge(b, nir_fabs(b, x),
300 nir_fmul(b, nir_imm_float(b, 1.0e-8f), nir_fabs(b, y)));
302 /* Then...call atan(y/x) and fix it up: */
303 nir_ssa_def *atan1 = build_atan(b, nir_fdiv(b, y, x));
304 nir_ssa_def *r_then =
305 nir_bcsel(b, nir_flt(b, x, zero),
307 nir_bcsel(b, nir_fge(b, y, zero),
308 nir_imm_float(b, M_PIf),
309 nir_imm_float(b, -M_PIf))),
313 nir_ssa_def *r_else =
314 nir_fmul(b, nir_fsign(b, y), nir_imm_float(b, M_PI_2f));
316 return nir_bcsel(b, condition, r_then, r_else);
320 build_frexp(nir_builder *b, nir_ssa_def *x, nir_ssa_def **exponent)
322 nir_ssa_def *abs_x = nir_fabs(b, x);
323 nir_ssa_def *zero = nir_imm_float(b, 0.0f);
325 /* Single-precision floating-point values are stored as
330 * An exponent shift of 23 will shift the mantissa out, leaving only the
331 * exponent and sign bit (which itself may be zero, if the absolute value
332 * was taken before the bitcast and shift.
334 nir_ssa_def *exponent_shift = nir_imm_int(b, 23);
335 nir_ssa_def *exponent_bias = nir_imm_int(b, -126);
337 nir_ssa_def *sign_mantissa_mask = nir_imm_int(b, 0x807fffffu);
339 /* Exponent of floating-point values in the range [0.5, 1.0). */
340 nir_ssa_def *exponent_value = nir_imm_int(b, 0x3f000000u);
342 nir_ssa_def *is_not_zero = nir_fne(b, abs_x, zero);
345 nir_iadd(b, nir_ushr(b, abs_x, exponent_shift),
346 nir_bcsel(b, is_not_zero, exponent_bias, zero));
348 return nir_ior(b, nir_iand(b, x, sign_mantissa_mask),
349 nir_bcsel(b, is_not_zero, exponent_value, zero));
353 handle_glsl450_alu(struct vtn_builder *b, enum GLSLstd450 entrypoint,
354 const uint32_t *w, unsigned count)
356 struct nir_builder *nb = &b->nb;
357 const struct glsl_type *dest_type =
358 vtn_value(b, w[1], vtn_value_type_type)->type->type;
360 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
361 val->ssa = vtn_create_ssa_value(b, dest_type);
363 /* Collect the various SSA sources */
364 unsigned num_inputs = count - 5;
366 for (unsigned i = 0; i < num_inputs; i++)
367 src[i] = vtn_ssa_value(b, w[i + 5])->def;
370 switch (entrypoint) {
371 case GLSLstd450Round: op = nir_op_fround_even; break; /* TODO */
372 case GLSLstd450RoundEven: op = nir_op_fround_even; break;
373 case GLSLstd450Trunc: op = nir_op_ftrunc; break;
374 case GLSLstd450FAbs: op = nir_op_fabs; break;
375 case GLSLstd450SAbs: op = nir_op_iabs; break;
376 case GLSLstd450FSign: op = nir_op_fsign; break;
377 case GLSLstd450SSign: op = nir_op_isign; break;
378 case GLSLstd450Floor: op = nir_op_ffloor; break;
379 case GLSLstd450Ceil: op = nir_op_fceil; break;
380 case GLSLstd450Fract: op = nir_op_ffract; break;
381 case GLSLstd450Radians:
382 val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 0.01745329251));
384 case GLSLstd450Degrees:
385 val->ssa->def = nir_fmul(nb, src[0], nir_imm_float(nb, 57.2957795131));
387 case GLSLstd450Sin: op = nir_op_fsin; break;
388 case GLSLstd450Cos: op = nir_op_fcos; break;
390 val->ssa->def = nir_fdiv(nb, nir_fsin(nb, src[0]),
391 nir_fcos(nb, src[0]));
393 case GLSLstd450Pow: op = nir_op_fpow; break;
394 case GLSLstd450Exp2: op = nir_op_fexp2; break;
395 case GLSLstd450Log2: op = nir_op_flog2; break;
396 case GLSLstd450Sqrt: op = nir_op_fsqrt; break;
397 case GLSLstd450InverseSqrt: op = nir_op_frsq; break;
399 case GLSLstd450Modf: {
400 nir_ssa_def *sign = nir_fsign(nb, src[0]);
401 nir_ssa_def *abs = nir_fabs(nb, src[0]);
402 val->ssa->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
403 nir_store_deref_var(nb, vtn_nir_deref(b, w[6]),
404 nir_fmul(nb, sign, nir_ffloor(nb, abs)), 0xf);
408 case GLSLstd450ModfStruct: {
409 nir_ssa_def *sign = nir_fsign(nb, src[0]);
410 nir_ssa_def *abs = nir_fabs(nb, src[0]);
411 assert(glsl_type_is_struct(val->ssa->type));
412 val->ssa->elems[0]->def = nir_fmul(nb, sign, nir_ffract(nb, abs));
413 val->ssa->elems[1]->def = nir_fmul(nb, sign, nir_ffloor(nb, abs));
417 case GLSLstd450FMin: op = nir_op_fmin; break;
418 case GLSLstd450UMin: op = nir_op_umin; break;
419 case GLSLstd450SMin: op = nir_op_imin; break;
420 case GLSLstd450FMax: op = nir_op_fmax; break;
421 case GLSLstd450UMax: op = nir_op_umax; break;
422 case GLSLstd450SMax: op = nir_op_imax; break;
423 case GLSLstd450FMix: op = nir_op_flrp; break;
425 val->ssa->def = nir_sge(nb, src[1], src[0]);
428 case GLSLstd450Fma: op = nir_op_ffma; break;
429 case GLSLstd450Ldexp: op = nir_op_ldexp; break;
431 /* Packing/Unpacking functions */
432 case GLSLstd450PackSnorm4x8: op = nir_op_pack_snorm_4x8; break;
433 case GLSLstd450PackUnorm4x8: op = nir_op_pack_unorm_4x8; break;
434 case GLSLstd450PackSnorm2x16: op = nir_op_pack_snorm_2x16; break;
435 case GLSLstd450PackUnorm2x16: op = nir_op_pack_unorm_2x16; break;
436 case GLSLstd450PackHalf2x16: op = nir_op_pack_half_2x16; break;
437 case GLSLstd450UnpackSnorm4x8: op = nir_op_unpack_snorm_4x8; break;
438 case GLSLstd450UnpackUnorm4x8: op = nir_op_unpack_unorm_4x8; break;
439 case GLSLstd450UnpackSnorm2x16: op = nir_op_unpack_snorm_2x16; break;
440 case GLSLstd450UnpackUnorm2x16: op = nir_op_unpack_unorm_2x16; break;
441 case GLSLstd450UnpackHalf2x16: op = nir_op_unpack_half_2x16; break;
443 case GLSLstd450Length:
444 val->ssa->def = build_length(nb, src[0]);
446 case GLSLstd450Distance:
447 val->ssa->def = build_length(nb, nir_fsub(nb, src[0], src[1]));
449 case GLSLstd450Normalize:
450 val->ssa->def = nir_fdiv(nb, src[0], build_length(nb, src[0]));
454 val->ssa->def = build_exp(nb, src[0]);
458 val->ssa->def = build_log(nb, src[0]);
461 case GLSLstd450FClamp:
462 val->ssa->def = build_fclamp(nb, src[0], src[1], src[2]);
464 case GLSLstd450UClamp:
465 val->ssa->def = nir_umin(nb, nir_umax(nb, src[0], src[1]), src[2]);
467 case GLSLstd450SClamp:
468 val->ssa->def = nir_imin(nb, nir_imax(nb, src[0], src[1]), src[2]);
471 case GLSLstd450Cross: {
472 unsigned yzx[4] = { 1, 2, 0, 0 };
473 unsigned zxy[4] = { 2, 0, 1, 0 };
475 nir_fsub(nb, nir_fmul(nb, nir_swizzle(nb, src[0], yzx, 3, true),
476 nir_swizzle(nb, src[1], zxy, 3, true)),
477 nir_fmul(nb, nir_swizzle(nb, src[0], zxy, 3, true),
478 nir_swizzle(nb, src[1], yzx, 3, true)));
482 case GLSLstd450SmoothStep: {
483 /* t = clamp((x - edge0) / (edge1 - edge0), 0, 1) */
485 build_fclamp(nb, nir_fdiv(nb, nir_fsub(nb, src[2], src[0]),
486 nir_fsub(nb, src[1], src[0])),
487 nir_imm_float(nb, 0.0), nir_imm_float(nb, 1.0));
488 /* result = t * t * (3 - 2 * t) */
490 nir_fmul(nb, t, nir_fmul(nb, t,
491 nir_fsub(nb, nir_imm_float(nb, 3.0),
492 nir_fmul(nb, nir_imm_float(nb, 2.0), t))));
496 case GLSLstd450FaceForward:
498 nir_bcsel(nb, nir_flt(nb, nir_fdot(nb, src[2], src[1]),
499 nir_imm_float(nb, 0.0)),
500 src[0], nir_fneg(nb, src[0]));
503 case GLSLstd450Reflect:
504 /* I - 2 * dot(N, I) * N */
506 nir_fsub(nb, src[0], nir_fmul(nb, nir_imm_float(nb, 2.0),
507 nir_fmul(nb, nir_fdot(nb, src[0], src[1]),
511 case GLSLstd450Refract: {
512 nir_ssa_def *I = src[0];
513 nir_ssa_def *N = src[1];
514 nir_ssa_def *eta = src[2];
515 nir_ssa_def *n_dot_i = nir_fdot(nb, N, I);
516 nir_ssa_def *one = nir_imm_float(nb, 1.0);
517 nir_ssa_def *zero = nir_imm_float(nb, 0.0);
518 /* k = 1.0 - eta * eta * (1.0 - dot(N, I) * dot(N, I)) */
520 nir_fsub(nb, one, nir_fmul(nb, eta, nir_fmul(nb, eta,
521 nir_fsub(nb, one, nir_fmul(nb, n_dot_i, n_dot_i)))));
522 nir_ssa_def *result =
523 nir_fsub(nb, nir_fmul(nb, eta, I),
524 nir_fmul(nb, nir_fadd(nb, nir_fmul(nb, eta, n_dot_i),
525 nir_fsqrt(nb, k)), N));
526 /* XXX: bcsel, or if statement? */
527 val->ssa->def = nir_bcsel(nb, nir_flt(nb, k, zero), zero, result);
532 /* 0.5 * (e^x - e^(-x)) */
534 nir_fmul(nb, nir_imm_float(nb, 0.5f),
535 nir_fsub(nb, build_exp(nb, src[0]),
536 build_exp(nb, nir_fneg(nb, src[0]))));
540 /* 0.5 * (e^x + e^(-x)) */
542 nir_fmul(nb, nir_imm_float(nb, 0.5f),
543 nir_fadd(nb, build_exp(nb, src[0]),
544 build_exp(nb, nir_fneg(nb, src[0]))));
548 /* (0.5 * (e^x - e^(-x))) / (0.5 * (e^x + e^(-x))) */
550 nir_fdiv(nb, nir_fmul(nb, nir_imm_float(nb, 0.5f),
551 nir_fsub(nb, build_exp(nb, src[0]),
552 build_exp(nb, nir_fneg(nb, src[0])))),
553 nir_fmul(nb, nir_imm_float(nb, 0.5f),
554 nir_fadd(nb, build_exp(nb, src[0]),
555 build_exp(nb, nir_fneg(nb, src[0])))));
558 case GLSLstd450Asinh:
559 val->ssa->def = nir_fmul(nb, nir_fsign(nb, src[0]),
560 build_log(nb, nir_fadd(nb, nir_fabs(nb, src[0]),
561 nir_fsqrt(nb, nir_fadd(nb, nir_fmul(nb, src[0], src[0]),
562 nir_imm_float(nb, 1.0f))))));
564 case GLSLstd450Acosh:
565 val->ssa->def = build_log(nb, nir_fadd(nb, src[0],
566 nir_fsqrt(nb, nir_fsub(nb, nir_fmul(nb, src[0], src[0]),
567 nir_imm_float(nb, 1.0f)))));
569 case GLSLstd450Atanh: {
570 nir_ssa_def *one = nir_imm_float(nb, 1.0);
571 val->ssa->def = nir_fmul(nb, nir_imm_float(nb, 0.5f),
572 build_log(nb, nir_fdiv(nb, nir_fadd(nb, one, src[0]),
573 nir_fsub(nb, one, src[0]))));
577 case GLSLstd450FindILsb: op = nir_op_find_lsb; break;
578 case GLSLstd450FindSMsb: op = nir_op_ifind_msb; break;
579 case GLSLstd450FindUMsb: op = nir_op_ufind_msb; break;
582 val->ssa->def = build_asin(nb, src[0]);
586 val->ssa->def = nir_fsub(nb, nir_imm_float(nb, M_PI_2f),
587 build_asin(nb, src[0]));
591 val->ssa->def = build_atan(nb, src[0]);
594 case GLSLstd450Atan2:
595 val->ssa->def = build_atan2(nb, src[0], src[1]);
598 case GLSLstd450Frexp: {
599 nir_ssa_def *exponent;
600 val->ssa->def = build_frexp(nb, src[0], &exponent);
601 nir_store_deref_var(nb, vtn_nir_deref(b, w[6]), exponent, 0xf);
605 case GLSLstd450FrexpStruct: {
606 assert(glsl_type_is_struct(val->ssa->type));
607 val->ssa->elems[0]->def = build_frexp(nb, src[0],
608 &val->ssa->elems[1]->def);
612 case GLSLstd450PackDouble2x32:
613 case GLSLstd450UnpackDouble2x32:
615 unreachable("Unhandled opcode");
618 nir_alu_instr *instr = nir_alu_instr_create(b->shader, op);
619 nir_ssa_dest_init(&instr->instr, &instr->dest.dest,
620 glsl_get_vector_elements(val->ssa->type), val->name);
621 instr->dest.write_mask = (1 << instr->dest.dest.ssa.num_components) - 1;
622 val->ssa->def = &instr->dest.dest.ssa;
624 for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++)
625 instr->src[i].src = nir_src_for_ssa(src[i]);
627 nir_builder_instr_insert(nb, &instr->instr);
631 vtn_handle_glsl450_instruction(struct vtn_builder *b, uint32_t ext_opcode,
632 const uint32_t *w, unsigned count)
634 switch ((enum GLSLstd450)ext_opcode) {
635 case GLSLstd450Determinant: {
636 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
637 val->ssa = rzalloc(b, struct vtn_ssa_value);
638 val->ssa->type = vtn_value(b, w[1], vtn_value_type_type)->type->type;
639 val->ssa->def = build_mat_det(b, vtn_ssa_value(b, w[5]));
643 case GLSLstd450MatrixInverse: {
644 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
645 val->ssa = matrix_inverse(b, vtn_ssa_value(b, w[5]));
649 case GLSLstd450InterpolateAtCentroid:
650 case GLSLstd450InterpolateAtSample:
651 case GLSLstd450InterpolateAtOffset:
652 unreachable("Unhandled opcode");
655 handle_glsl450_alu(b, (enum GLSLstd450)ext_opcode, w, count);