2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Jason Ekstrand (jason@jlekstrand.net)
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
38 vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level,
39 size_t spirv_offset, const char *message)
41 if (b->options->debug.func) {
42 b->options->debug.func(b->options->debug.private_data,
43 level, spirv_offset, message);
47 if (level >= NIR_SPIRV_DEBUG_LEVEL_WARNING)
48 fprintf(stderr, "%s\n", message);
53 vtn_logf(struct vtn_builder *b, enum nir_spirv_debug_level level,
54 size_t spirv_offset, const char *fmt, ...)
60 msg = ralloc_vasprintf(NULL, fmt, args);
63 vtn_log(b, level, spirv_offset, msg);
69 vtn_log_err(struct vtn_builder *b,
70 enum nir_spirv_debug_level level, const char *prefix,
71 const char *file, unsigned line,
72 const char *fmt, va_list args)
76 msg = ralloc_strdup(NULL, prefix);
79 ralloc_asprintf_append(&msg, " In file %s:%u\n", file, line);
82 ralloc_asprintf_append(&msg, " ");
84 ralloc_vasprintf_append(&msg, fmt, args);
86 ralloc_asprintf_append(&msg, "\n %zu bytes into the SPIR-V binary",
90 ralloc_asprintf_append(&msg,
91 "\n in SPIR-V source file %s, line %d, col %d",
92 b->file, b->line, b->col);
95 vtn_log(b, level, b->spirv_offset, msg);
101 vtn_dump_shader(struct vtn_builder *b, const char *path, const char *prefix)
106 int len = snprintf(filename, sizeof(filename), "%s/%s-%d.spirv",
107 path, prefix, idx++);
108 if (len < 0 || len >= sizeof(filename))
111 FILE *f = fopen(filename, "w");
115 fwrite(b->spirv, sizeof(*b->spirv), b->spirv_word_count, f);
118 vtn_info("SPIR-V shader dumped to %s", filename);
122 _vtn_warn(struct vtn_builder *b, const char *file, unsigned line,
123 const char *fmt, ...)
128 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_WARNING, "SPIR-V WARNING:\n",
129 file, line, fmt, args);
134 _vtn_err(struct vtn_builder *b, const char *file, unsigned line,
135 const char *fmt, ...)
140 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V ERROR:\n",
141 file, line, fmt, args);
146 _vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
147 const char *fmt, ...)
152 vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V parsing FAILED:\n",
153 file, line, fmt, args);
156 const char *dump_path = getenv("MESA_SPIRV_FAIL_DUMP_PATH");
158 vtn_dump_shader(b, dump_path, "fail");
160 longjmp(b->fail_jump, 1);
163 struct spec_constant_value {
171 static struct vtn_ssa_value *
172 vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
174 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
177 if (glsl_type_is_vector_or_scalar(type)) {
178 unsigned num_components = glsl_get_vector_elements(val->type);
179 unsigned bit_size = glsl_get_bit_size(val->type);
180 val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
182 unsigned elems = glsl_get_length(val->type);
183 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
184 if (glsl_type_is_matrix(type)) {
185 const struct glsl_type *elem_type =
186 glsl_vector_type(glsl_get_base_type(type),
187 glsl_get_vector_elements(type));
189 for (unsigned i = 0; i < elems; i++)
190 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
191 } else if (glsl_type_is_array(type)) {
192 const struct glsl_type *elem_type = glsl_get_array_element(type);
193 for (unsigned i = 0; i < elems; i++)
194 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
196 for (unsigned i = 0; i < elems; i++) {
197 const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
198 val->elems[i] = vtn_undef_ssa_value(b, elem_type);
206 static struct vtn_ssa_value *
207 vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
208 const struct glsl_type *type)
210 struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
215 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
218 switch (glsl_get_base_type(type)) {
221 case GLSL_TYPE_INT16:
222 case GLSL_TYPE_UINT16:
223 case GLSL_TYPE_UINT8:
225 case GLSL_TYPE_INT64:
226 case GLSL_TYPE_UINT64:
228 case GLSL_TYPE_FLOAT:
229 case GLSL_TYPE_FLOAT16:
230 case GLSL_TYPE_DOUBLE: {
231 int bit_size = glsl_get_bit_size(type);
232 if (glsl_type_is_vector_or_scalar(type)) {
233 unsigned num_components = glsl_get_vector_elements(val->type);
234 nir_load_const_instr *load =
235 nir_load_const_instr_create(b->shader, num_components, bit_size);
237 load->value = constant->values[0];
239 nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
240 val->def = &load->def;
242 assert(glsl_type_is_matrix(type));
243 unsigned rows = glsl_get_vector_elements(val->type);
244 unsigned columns = glsl_get_matrix_columns(val->type);
245 val->elems = ralloc_array(b, struct vtn_ssa_value *, columns);
247 for (unsigned i = 0; i < columns; i++) {
248 struct vtn_ssa_value *col_val = rzalloc(b, struct vtn_ssa_value);
249 col_val->type = glsl_get_column_type(val->type);
250 nir_load_const_instr *load =
251 nir_load_const_instr_create(b->shader, rows, bit_size);
253 load->value = constant->values[i];
255 nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
256 col_val->def = &load->def;
258 val->elems[i] = col_val;
264 case GLSL_TYPE_ARRAY: {
265 unsigned elems = glsl_get_length(val->type);
266 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
267 const struct glsl_type *elem_type = glsl_get_array_element(val->type);
268 for (unsigned i = 0; i < elems; i++)
269 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
274 case GLSL_TYPE_STRUCT: {
275 unsigned elems = glsl_get_length(val->type);
276 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
277 for (unsigned i = 0; i < elems; i++) {
278 const struct glsl_type *elem_type =
279 glsl_get_struct_field(val->type, i);
280 val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
287 vtn_fail("bad constant type");
293 struct vtn_ssa_value *
294 vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
296 struct vtn_value *val = vtn_untyped_value(b, value_id);
297 switch (val->value_type) {
298 case vtn_value_type_undef:
299 return vtn_undef_ssa_value(b, val->type->type);
301 case vtn_value_type_constant:
302 return vtn_const_ssa_value(b, val->constant, val->type->type);
304 case vtn_value_type_ssa:
307 case vtn_value_type_pointer:
308 vtn_assert(val->pointer->ptr_type && val->pointer->ptr_type->type);
309 struct vtn_ssa_value *ssa =
310 vtn_create_ssa_value(b, val->pointer->ptr_type->type);
311 ssa->def = vtn_pointer_to_ssa(b, val->pointer);
315 vtn_fail("Invalid type for an SSA value");
320 vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
321 unsigned word_count, unsigned *words_used)
323 char *dup = ralloc_strndup(b, (char *)words, word_count * sizeof(*words));
325 /* Ammount of space taken by the string (including the null) */
326 unsigned len = strlen(dup) + 1;
327 *words_used = DIV_ROUND_UP(len, sizeof(*words));
333 vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
334 const uint32_t *end, vtn_instruction_handler handler)
340 const uint32_t *w = start;
342 SpvOp opcode = w[0] & SpvOpCodeMask;
343 unsigned count = w[0] >> SpvWordCountShift;
344 vtn_assert(count >= 1 && w + count <= end);
346 b->spirv_offset = (uint8_t *)w - (uint8_t *)b->spirv;
350 break; /* Do nothing */
353 b->file = vtn_value(b, w[1], vtn_value_type_string)->str;
365 if (!handler(b, opcode, w, count))
383 vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
384 const uint32_t *w, unsigned count)
386 const char *ext = (const char *)&w[2];
388 case SpvOpExtInstImport: {
389 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
390 if (strcmp(ext, "GLSL.std.450") == 0) {
391 val->ext_handler = vtn_handle_glsl450_instruction;
392 } else if ((strcmp(ext, "SPV_AMD_gcn_shader") == 0)
393 && (b->options && b->options->caps.gcn_shader)) {
394 val->ext_handler = vtn_handle_amd_gcn_shader_instruction;
395 } else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0)
396 && (b->options && b->options->caps.trinary_minmax)) {
397 val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction;
399 vtn_fail("Unsupported extension: %s", ext);
405 struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
406 bool handled = val->ext_handler(b, w[4], w, count);
412 vtn_fail("Unhandled opcode");
417 _foreach_decoration_helper(struct vtn_builder *b,
418 struct vtn_value *base_value,
420 struct vtn_value *value,
421 vtn_decoration_foreach_cb cb, void *data)
423 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
425 if (dec->scope == VTN_DEC_DECORATION) {
426 member = parent_member;
427 } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
428 vtn_fail_if(value->value_type != vtn_value_type_type ||
429 value->type->base_type != vtn_base_type_struct,
430 "OpMemberDecorate and OpGroupMemberDecorate are only "
431 "allowed on OpTypeStruct");
432 /* This means we haven't recursed yet */
433 assert(value == base_value);
435 member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
437 vtn_fail_if(member >= base_value->type->length,
438 "OpMemberDecorate specifies member %d but the "
439 "OpTypeStruct has only %u members",
440 member, base_value->type->length);
442 /* Not a decoration */
443 assert(dec->scope == VTN_DEC_EXECUTION_MODE);
448 assert(dec->group->value_type == vtn_value_type_decoration_group);
449 _foreach_decoration_helper(b, base_value, member, dec->group,
452 cb(b, base_value, member, dec, data);
457 /** Iterates (recursively if needed) over all of the decorations on a value
459 * This function iterates over all of the decorations applied to a given
460 * value. If it encounters a decoration group, it recurses into the group
461 * and iterates over all of those decorations as well.
464 vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
465 vtn_decoration_foreach_cb cb, void *data)
467 _foreach_decoration_helper(b, value, -1, value, cb, data);
471 vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
472 vtn_execution_mode_foreach_cb cb, void *data)
474 for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
475 if (dec->scope != VTN_DEC_EXECUTION_MODE)
478 assert(dec->group == NULL);
479 cb(b, value, dec, data);
484 vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
485 const uint32_t *w, unsigned count)
487 const uint32_t *w_end = w + count;
488 const uint32_t target = w[1];
492 case SpvOpDecorationGroup:
493 vtn_push_value(b, target, vtn_value_type_decoration_group);
497 case SpvOpMemberDecorate:
498 case SpvOpDecorateStringGOOGLE:
499 case SpvOpMemberDecorateStringGOOGLE:
500 case SpvOpExecutionMode: {
501 struct vtn_value *val = vtn_untyped_value(b, target);
503 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
506 case SpvOpDecorateStringGOOGLE:
507 dec->scope = VTN_DEC_DECORATION;
509 case SpvOpMemberDecorate:
510 case SpvOpMemberDecorateStringGOOGLE:
511 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
512 vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */
513 "Member argument of OpMemberDecorate too large");
515 case SpvOpExecutionMode:
516 dec->scope = VTN_DEC_EXECUTION_MODE;
519 unreachable("Invalid decoration opcode");
521 dec->decoration = *(w++);
524 /* Link into the list */
525 dec->next = val->decoration;
526 val->decoration = dec;
530 case SpvOpGroupMemberDecorate:
531 case SpvOpGroupDecorate: {
532 struct vtn_value *group =
533 vtn_value(b, target, vtn_value_type_decoration_group);
535 for (; w < w_end; w++) {
536 struct vtn_value *val = vtn_untyped_value(b, *w);
537 struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
540 if (opcode == SpvOpGroupDecorate) {
541 dec->scope = VTN_DEC_DECORATION;
543 dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
544 vtn_fail_if(dec->scope < 0, /* Check for overflow */
545 "Member argument of OpGroupMemberDecorate too large");
548 /* Link into the list */
549 dec->next = val->decoration;
550 val->decoration = dec;
556 unreachable("Unhandled opcode");
560 struct member_decoration_ctx {
562 struct glsl_struct_field *fields;
563 struct vtn_type *type;
566 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
567 * OpStore, or OpCopyMemory between them without breaking anything.
568 * Technically, the SPIR-V rules require the exact same type ID but this lets
569 * us internally be a bit looser.
572 vtn_types_compatible(struct vtn_builder *b,
573 struct vtn_type *t1, struct vtn_type *t2)
575 if (t1->id == t2->id)
578 if (t1->base_type != t2->base_type)
581 switch (t1->base_type) {
582 case vtn_base_type_void:
583 case vtn_base_type_scalar:
584 case vtn_base_type_vector:
585 case vtn_base_type_matrix:
586 case vtn_base_type_image:
587 case vtn_base_type_sampler:
588 case vtn_base_type_sampled_image:
589 return t1->type == t2->type;
591 case vtn_base_type_array:
592 return t1->length == t2->length &&
593 vtn_types_compatible(b, t1->array_element, t2->array_element);
595 case vtn_base_type_pointer:
596 return vtn_types_compatible(b, t1->deref, t2->deref);
598 case vtn_base_type_struct:
599 if (t1->length != t2->length)
602 for (unsigned i = 0; i < t1->length; i++) {
603 if (!vtn_types_compatible(b, t1->members[i], t2->members[i]))
608 case vtn_base_type_function:
609 /* This case shouldn't get hit since you can't copy around function
610 * types. Just require them to be identical.
615 vtn_fail("Invalid base type");
618 /* does a shallow copy of a vtn_type */
620 static struct vtn_type *
621 vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
623 struct vtn_type *dest = ralloc(b, struct vtn_type);
626 switch (src->base_type) {
627 case vtn_base_type_void:
628 case vtn_base_type_scalar:
629 case vtn_base_type_vector:
630 case vtn_base_type_matrix:
631 case vtn_base_type_array:
632 case vtn_base_type_pointer:
633 case vtn_base_type_image:
634 case vtn_base_type_sampler:
635 case vtn_base_type_sampled_image:
636 /* Nothing more to do */
639 case vtn_base_type_struct:
640 dest->members = ralloc_array(b, struct vtn_type *, src->length);
641 memcpy(dest->members, src->members,
642 src->length * sizeof(src->members[0]));
644 dest->offsets = ralloc_array(b, unsigned, src->length);
645 memcpy(dest->offsets, src->offsets,
646 src->length * sizeof(src->offsets[0]));
649 case vtn_base_type_function:
650 dest->params = ralloc_array(b, struct vtn_type *, src->length);
651 memcpy(dest->params, src->params, src->length * sizeof(src->params[0]));
658 static struct vtn_type *
659 mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
661 type->members[member] = vtn_type_copy(b, type->members[member]);
662 type = type->members[member];
664 /* We may have an array of matrices.... Oh, joy! */
665 while (glsl_type_is_array(type->type)) {
666 type->array_element = vtn_type_copy(b, type->array_element);
667 type = type->array_element;
670 vtn_assert(glsl_type_is_matrix(type->type));
676 vtn_handle_access_qualifier(struct vtn_builder *b, struct vtn_type *type,
677 int member, enum gl_access_qualifier access)
679 type->members[member] = vtn_type_copy(b, type->members[member]);
680 type = type->members[member];
682 type->access |= access;
686 struct_member_decoration_cb(struct vtn_builder *b,
687 struct vtn_value *val, int member,
688 const struct vtn_decoration *dec, void *void_ctx)
690 struct member_decoration_ctx *ctx = void_ctx;
695 assert(member < ctx->num_fields);
697 switch (dec->decoration) {
698 case SpvDecorationRelaxedPrecision:
699 case SpvDecorationUniform:
700 break; /* FIXME: Do nothing with this for now. */
701 case SpvDecorationNonWritable:
702 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_WRITEABLE);
704 case SpvDecorationNonReadable:
705 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_READABLE);
707 case SpvDecorationVolatile:
708 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_VOLATILE);
710 case SpvDecorationCoherent:
711 vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_COHERENT);
713 case SpvDecorationNoPerspective:
714 ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
716 case SpvDecorationFlat:
717 ctx->fields[member].interpolation = INTERP_MODE_FLAT;
719 case SpvDecorationCentroid:
720 ctx->fields[member].centroid = true;
722 case SpvDecorationSample:
723 ctx->fields[member].sample = true;
725 case SpvDecorationStream:
726 /* Vulkan only allows one GS stream */
727 vtn_assert(dec->literals[0] == 0);
729 case SpvDecorationLocation:
730 ctx->fields[member].location = dec->literals[0];
732 case SpvDecorationComponent:
733 break; /* FIXME: What should we do with these? */
734 case SpvDecorationBuiltIn:
735 ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
736 ctx->type->members[member]->is_builtin = true;
737 ctx->type->members[member]->builtin = dec->literals[0];
738 ctx->type->builtin_block = true;
740 case SpvDecorationOffset:
741 ctx->type->offsets[member] = dec->literals[0];
743 case SpvDecorationMatrixStride:
744 /* Handled as a second pass */
746 case SpvDecorationColMajor:
747 break; /* Nothing to do here. Column-major is the default. */
748 case SpvDecorationRowMajor:
749 mutable_matrix_member(b, ctx->type, member)->row_major = true;
752 case SpvDecorationPatch:
755 case SpvDecorationSpecId:
756 case SpvDecorationBlock:
757 case SpvDecorationBufferBlock:
758 case SpvDecorationArrayStride:
759 case SpvDecorationGLSLShared:
760 case SpvDecorationGLSLPacked:
761 case SpvDecorationInvariant:
762 case SpvDecorationRestrict:
763 case SpvDecorationAliased:
764 case SpvDecorationConstant:
765 case SpvDecorationIndex:
766 case SpvDecorationBinding:
767 case SpvDecorationDescriptorSet:
768 case SpvDecorationLinkageAttributes:
769 case SpvDecorationNoContraction:
770 case SpvDecorationInputAttachmentIndex:
771 vtn_warn("Decoration not allowed on struct members: %s",
772 spirv_decoration_to_string(dec->decoration));
775 case SpvDecorationXfbBuffer:
776 case SpvDecorationXfbStride:
777 vtn_warn("Vulkan does not have transform feedback");
780 case SpvDecorationCPacked:
781 case SpvDecorationSaturatedConversion:
782 case SpvDecorationFuncParamAttr:
783 case SpvDecorationFPRoundingMode:
784 case SpvDecorationFPFastMathMode:
785 case SpvDecorationAlignment:
786 vtn_warn("Decoration only allowed for CL-style kernels: %s",
787 spirv_decoration_to_string(dec->decoration));
790 case SpvDecorationHlslSemanticGOOGLE:
791 /* HLSL semantic decorations can safely be ignored by the driver. */
795 vtn_fail("Unhandled decoration");
799 /* Matrix strides are handled as a separate pass because we need to know
800 * whether the matrix is row-major or not first.
803 struct_member_matrix_stride_cb(struct vtn_builder *b,
804 struct vtn_value *val, int member,
805 const struct vtn_decoration *dec,
808 if (dec->decoration != SpvDecorationMatrixStride)
811 vtn_fail_if(member < 0,
812 "The MatrixStride decoration is only allowed on members "
815 struct member_decoration_ctx *ctx = void_ctx;
817 struct vtn_type *mat_type = mutable_matrix_member(b, ctx->type, member);
818 if (mat_type->row_major) {
819 mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
820 mat_type->stride = mat_type->array_element->stride;
821 mat_type->array_element->stride = dec->literals[0];
823 vtn_assert(mat_type->array_element->stride > 0);
824 mat_type->stride = dec->literals[0];
829 type_decoration_cb(struct vtn_builder *b,
830 struct vtn_value *val, int member,
831 const struct vtn_decoration *dec, void *ctx)
833 struct vtn_type *type = val->type;
836 /* This should have been handled by OpTypeStruct */
837 assert(val->type->base_type == vtn_base_type_struct);
838 assert(member >= 0 && member < val->type->length);
842 switch (dec->decoration) {
843 case SpvDecorationArrayStride:
844 vtn_assert(type->base_type == vtn_base_type_matrix ||
845 type->base_type == vtn_base_type_array ||
846 type->base_type == vtn_base_type_pointer);
847 type->stride = dec->literals[0];
849 case SpvDecorationBlock:
850 vtn_assert(type->base_type == vtn_base_type_struct);
853 case SpvDecorationBufferBlock:
854 vtn_assert(type->base_type == vtn_base_type_struct);
855 type->buffer_block = true;
857 case SpvDecorationGLSLShared:
858 case SpvDecorationGLSLPacked:
859 /* Ignore these, since we get explicit offsets anyways */
862 case SpvDecorationRowMajor:
863 case SpvDecorationColMajor:
864 case SpvDecorationMatrixStride:
865 case SpvDecorationBuiltIn:
866 case SpvDecorationNoPerspective:
867 case SpvDecorationFlat:
868 case SpvDecorationPatch:
869 case SpvDecorationCentroid:
870 case SpvDecorationSample:
871 case SpvDecorationVolatile:
872 case SpvDecorationCoherent:
873 case SpvDecorationNonWritable:
874 case SpvDecorationNonReadable:
875 case SpvDecorationUniform:
876 case SpvDecorationLocation:
877 case SpvDecorationComponent:
878 case SpvDecorationOffset:
879 case SpvDecorationXfbBuffer:
880 case SpvDecorationXfbStride:
881 case SpvDecorationHlslSemanticGOOGLE:
882 vtn_warn("Decoration only allowed for struct members: %s",
883 spirv_decoration_to_string(dec->decoration));
886 case SpvDecorationStream:
887 /* We don't need to do anything here, as stream is filled up when
888 * aplying the decoration to a variable, just check that if it is not a
889 * struct member, it should be a struct.
891 vtn_assert(type->base_type == vtn_base_type_struct);
894 case SpvDecorationRelaxedPrecision:
895 case SpvDecorationSpecId:
896 case SpvDecorationInvariant:
897 case SpvDecorationRestrict:
898 case SpvDecorationAliased:
899 case SpvDecorationConstant:
900 case SpvDecorationIndex:
901 case SpvDecorationBinding:
902 case SpvDecorationDescriptorSet:
903 case SpvDecorationLinkageAttributes:
904 case SpvDecorationNoContraction:
905 case SpvDecorationInputAttachmentIndex:
906 vtn_warn("Decoration not allowed on types: %s",
907 spirv_decoration_to_string(dec->decoration));
910 case SpvDecorationCPacked:
911 case SpvDecorationSaturatedConversion:
912 case SpvDecorationFuncParamAttr:
913 case SpvDecorationFPRoundingMode:
914 case SpvDecorationFPFastMathMode:
915 case SpvDecorationAlignment:
916 vtn_warn("Decoration only allowed for CL-style kernels: %s",
917 spirv_decoration_to_string(dec->decoration));
921 vtn_fail("Unhandled decoration");
926 translate_image_format(struct vtn_builder *b, SpvImageFormat format)
929 case SpvImageFormatUnknown: return 0; /* GL_NONE */
930 case SpvImageFormatRgba32f: return 0x8814; /* GL_RGBA32F */
931 case SpvImageFormatRgba16f: return 0x881A; /* GL_RGBA16F */
932 case SpvImageFormatR32f: return 0x822E; /* GL_R32F */
933 case SpvImageFormatRgba8: return 0x8058; /* GL_RGBA8 */
934 case SpvImageFormatRgba8Snorm: return 0x8F97; /* GL_RGBA8_SNORM */
935 case SpvImageFormatRg32f: return 0x8230; /* GL_RG32F */
936 case SpvImageFormatRg16f: return 0x822F; /* GL_RG16F */
937 case SpvImageFormatR11fG11fB10f: return 0x8C3A; /* GL_R11F_G11F_B10F */
938 case SpvImageFormatR16f: return 0x822D; /* GL_R16F */
939 case SpvImageFormatRgba16: return 0x805B; /* GL_RGBA16 */
940 case SpvImageFormatRgb10A2: return 0x8059; /* GL_RGB10_A2 */
941 case SpvImageFormatRg16: return 0x822C; /* GL_RG16 */
942 case SpvImageFormatRg8: return 0x822B; /* GL_RG8 */
943 case SpvImageFormatR16: return 0x822A; /* GL_R16 */
944 case SpvImageFormatR8: return 0x8229; /* GL_R8 */
945 case SpvImageFormatRgba16Snorm: return 0x8F9B; /* GL_RGBA16_SNORM */
946 case SpvImageFormatRg16Snorm: return 0x8F99; /* GL_RG16_SNORM */
947 case SpvImageFormatRg8Snorm: return 0x8F95; /* GL_RG8_SNORM */
948 case SpvImageFormatR16Snorm: return 0x8F98; /* GL_R16_SNORM */
949 case SpvImageFormatR8Snorm: return 0x8F94; /* GL_R8_SNORM */
950 case SpvImageFormatRgba32i: return 0x8D82; /* GL_RGBA32I */
951 case SpvImageFormatRgba16i: return 0x8D88; /* GL_RGBA16I */
952 case SpvImageFormatRgba8i: return 0x8D8E; /* GL_RGBA8I */
953 case SpvImageFormatR32i: return 0x8235; /* GL_R32I */
954 case SpvImageFormatRg32i: return 0x823B; /* GL_RG32I */
955 case SpvImageFormatRg16i: return 0x8239; /* GL_RG16I */
956 case SpvImageFormatRg8i: return 0x8237; /* GL_RG8I */
957 case SpvImageFormatR16i: return 0x8233; /* GL_R16I */
958 case SpvImageFormatR8i: return 0x8231; /* GL_R8I */
959 case SpvImageFormatRgba32ui: return 0x8D70; /* GL_RGBA32UI */
960 case SpvImageFormatRgba16ui: return 0x8D76; /* GL_RGBA16UI */
961 case SpvImageFormatRgba8ui: return 0x8D7C; /* GL_RGBA8UI */
962 case SpvImageFormatR32ui: return 0x8236; /* GL_R32UI */
963 case SpvImageFormatRgb10a2ui: return 0x906F; /* GL_RGB10_A2UI */
964 case SpvImageFormatRg32ui: return 0x823C; /* GL_RG32UI */
965 case SpvImageFormatRg16ui: return 0x823A; /* GL_RG16UI */
966 case SpvImageFormatRg8ui: return 0x8238; /* GL_RG8UI */
967 case SpvImageFormatR16ui: return 0x8234; /* GL_R16UI */
968 case SpvImageFormatR8ui: return 0x8232; /* GL_R8UI */
970 vtn_fail("Invalid image format");
974 static struct vtn_type *
975 vtn_type_layout_std430(struct vtn_builder *b, struct vtn_type *type,
976 uint32_t *size_out, uint32_t *align_out)
978 switch (type->base_type) {
979 case vtn_base_type_scalar: {
980 uint32_t comp_size = glsl_get_bit_size(type->type) / 8;
981 *size_out = comp_size;
982 *align_out = comp_size;
986 case vtn_base_type_vector: {
987 uint32_t comp_size = glsl_get_bit_size(type->type) / 8;
988 unsigned align_comps = type->length == 3 ? 4 : type->length;
989 *size_out = comp_size * type->length,
990 *align_out = comp_size * align_comps;
994 case vtn_base_type_matrix:
995 case vtn_base_type_array: {
996 /* We're going to add an array stride */
997 type = vtn_type_copy(b, type);
998 uint32_t elem_size, elem_align;
999 type->array_element = vtn_type_layout_std430(b, type->array_element,
1000 &elem_size, &elem_align);
1001 type->stride = vtn_align_u32(elem_size, elem_align);
1002 *size_out = type->stride * type->length;
1003 *align_out = elem_align;
1007 case vtn_base_type_struct: {
1008 /* We're going to add member offsets */
1009 type = vtn_type_copy(b, type);
1010 uint32_t offset = 0;
1012 for (unsigned i = 0; i < type->length; i++) {
1013 uint32_t mem_size, mem_align;
1014 type->members[i] = vtn_type_layout_std430(b, type->members[i],
1015 &mem_size, &mem_align);
1016 offset = vtn_align_u32(offset, mem_align);
1017 type->offsets[i] = offset;
1019 align = MAX2(align, mem_align);
1027 unreachable("Invalid SPIR-V type for std430");
1032 vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
1033 const uint32_t *w, unsigned count)
1035 struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_type);
1037 val->type = rzalloc(b, struct vtn_type);
1038 val->type->id = w[1];
1042 val->type->base_type = vtn_base_type_void;
1043 val->type->type = glsl_void_type();
1046 val->type->base_type = vtn_base_type_scalar;
1047 val->type->type = glsl_bool_type();
1048 val->type->length = 1;
1050 case SpvOpTypeInt: {
1051 int bit_size = w[2];
1052 const bool signedness = w[3];
1053 val->type->base_type = vtn_base_type_scalar;
1056 val->type->type = (signedness ? glsl_int64_t_type() : glsl_uint64_t_type());
1059 val->type->type = (signedness ? glsl_int_type() : glsl_uint_type());
1062 val->type->type = (signedness ? glsl_int16_t_type() : glsl_uint16_t_type());
1065 val->type->type = (signedness ? glsl_int8_t_type() : glsl_uint8_t_type());
1068 vtn_fail("Invalid int bit size");
1070 val->type->length = 1;
1074 case SpvOpTypeFloat: {
1075 int bit_size = w[2];
1076 val->type->base_type = vtn_base_type_scalar;
1079 val->type->type = glsl_float16_t_type();
1082 val->type->type = glsl_float_type();
1085 val->type->type = glsl_double_type();
1088 vtn_fail("Invalid float bit size");
1090 val->type->length = 1;
1094 case SpvOpTypeVector: {
1095 struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
1096 unsigned elems = w[3];
1098 vtn_fail_if(base->base_type != vtn_base_type_scalar,
1099 "Base type for OpTypeVector must be a scalar");
1100 vtn_fail_if((elems < 2 || elems > 4) && (elems != 8) && (elems != 16),
1101 "Invalid component count for OpTypeVector");
1103 val->type->base_type = vtn_base_type_vector;
1104 val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
1105 val->type->length = elems;
1106 val->type->stride = glsl_get_bit_size(base->type) / 8;
1107 val->type->array_element = base;
1111 case SpvOpTypeMatrix: {
1112 struct vtn_type *base = vtn_value(b, w[2], vtn_value_type_type)->type;
1113 unsigned columns = w[3];
1115 vtn_fail_if(base->base_type != vtn_base_type_vector,
1116 "Base type for OpTypeMatrix must be a vector");
1117 vtn_fail_if(columns < 2 || columns > 4,
1118 "Invalid column count for OpTypeMatrix");
1120 val->type->base_type = vtn_base_type_matrix;
1121 val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
1122 glsl_get_vector_elements(base->type),
1124 vtn_fail_if(glsl_type_is_error(val->type->type),
1125 "Unsupported base type for OpTypeMatrix");
1126 assert(!glsl_type_is_error(val->type->type));
1127 val->type->length = columns;
1128 val->type->array_element = base;
1129 val->type->row_major = false;
1130 val->type->stride = 0;
1134 case SpvOpTypeRuntimeArray:
1135 case SpvOpTypeArray: {
1136 struct vtn_type *array_element =
1137 vtn_value(b, w[2], vtn_value_type_type)->type;
1139 if (opcode == SpvOpTypeRuntimeArray) {
1140 /* A length of 0 is used to denote unsized arrays */
1141 val->type->length = 0;
1144 vtn_value(b, w[3], vtn_value_type_constant)->constant->values[0].u32[0];
1147 val->type->base_type = vtn_base_type_array;
1148 val->type->type = glsl_array_type(array_element->type, val->type->length);
1149 val->type->array_element = array_element;
1150 val->type->stride = 0;
1154 case SpvOpTypeStruct: {
1155 unsigned num_fields = count - 2;
1156 val->type->base_type = vtn_base_type_struct;
1157 val->type->length = num_fields;
1158 val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
1159 val->type->offsets = ralloc_array(b, unsigned, num_fields);
1161 NIR_VLA(struct glsl_struct_field, fields, count);
1162 for (unsigned i = 0; i < num_fields; i++) {
1163 val->type->members[i] =
1164 vtn_value(b, w[i + 2], vtn_value_type_type)->type;
1165 fields[i] = (struct glsl_struct_field) {
1166 .type = val->type->members[i]->type,
1167 .name = ralloc_asprintf(b, "field%d", i),
1172 struct member_decoration_ctx ctx = {
1173 .num_fields = num_fields,
1178 vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
1179 vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
1181 const char *name = val->name ? val->name : "struct";
1183 val->type->type = glsl_struct_type(fields, num_fields, name);
1187 case SpvOpTypeFunction: {
1188 val->type->base_type = vtn_base_type_function;
1189 val->type->type = NULL;
1191 val->type->return_type = vtn_value(b, w[2], vtn_value_type_type)->type;
1193 const unsigned num_params = count - 3;
1194 val->type->length = num_params;
1195 val->type->params = ralloc_array(b, struct vtn_type *, num_params);
1196 for (unsigned i = 0; i < count - 3; i++) {
1197 val->type->params[i] =
1198 vtn_value(b, w[i + 3], vtn_value_type_type)->type;
1203 case SpvOpTypePointer: {
1204 SpvStorageClass storage_class = w[2];
1205 struct vtn_type *deref_type =
1206 vtn_value(b, w[3], vtn_value_type_type)->type;
1208 val->type->base_type = vtn_base_type_pointer;
1209 val->type->storage_class = storage_class;
1210 val->type->deref = deref_type;
1212 if (storage_class == SpvStorageClassUniform ||
1213 storage_class == SpvStorageClassStorageBuffer) {
1214 /* These can actually be stored to nir_variables and used as SSA
1215 * values so they need a real glsl_type.
1217 val->type->type = glsl_vector_type(GLSL_TYPE_UINT, 2);
1220 if (storage_class == SpvStorageClassPushConstant) {
1221 /* These can actually be stored to nir_variables and used as SSA
1222 * values so they need a real glsl_type.
1224 val->type->type = glsl_uint_type();
1227 if (storage_class == SpvStorageClassWorkgroup &&
1228 b->options->lower_workgroup_access_to_offsets) {
1229 uint32_t size, align;
1230 val->type->deref = vtn_type_layout_std430(b, val->type->deref,
1232 val->type->length = size;
1233 val->type->align = align;
1234 /* These can actually be stored to nir_variables and used as SSA
1235 * values so they need a real glsl_type.
1237 val->type->type = glsl_uint_type();
1242 case SpvOpTypeImage: {
1243 val->type->base_type = vtn_base_type_image;
1245 const struct vtn_type *sampled_type =
1246 vtn_value(b, w[2], vtn_value_type_type)->type;
1248 vtn_fail_if(sampled_type->base_type != vtn_base_type_scalar ||
1249 glsl_get_bit_size(sampled_type->type) != 32,
1250 "Sampled type of OpTypeImage must be a 32-bit scalar");
1252 enum glsl_sampler_dim dim;
1253 switch ((SpvDim)w[3]) {
1254 case SpvDim1D: dim = GLSL_SAMPLER_DIM_1D; break;
1255 case SpvDim2D: dim = GLSL_SAMPLER_DIM_2D; break;
1256 case SpvDim3D: dim = GLSL_SAMPLER_DIM_3D; break;
1257 case SpvDimCube: dim = GLSL_SAMPLER_DIM_CUBE; break;
1258 case SpvDimRect: dim = GLSL_SAMPLER_DIM_RECT; break;
1259 case SpvDimBuffer: dim = GLSL_SAMPLER_DIM_BUF; break;
1260 case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
1262 vtn_fail("Invalid SPIR-V image dimensionality");
1265 bool is_shadow = w[4];
1266 bool is_array = w[5];
1267 bool multisampled = w[6];
1268 unsigned sampled = w[7];
1269 SpvImageFormat format = w[8];
1272 val->type->access_qualifier = w[9];
1274 val->type->access_qualifier = SpvAccessQualifierReadWrite;
1277 if (dim == GLSL_SAMPLER_DIM_2D)
1278 dim = GLSL_SAMPLER_DIM_MS;
1279 else if (dim == GLSL_SAMPLER_DIM_SUBPASS)
1280 dim = GLSL_SAMPLER_DIM_SUBPASS_MS;
1282 vtn_fail("Unsupported multisampled image type");
1285 val->type->image_format = translate_image_format(b, format);
1287 enum glsl_base_type sampled_base_type =
1288 glsl_get_base_type(sampled_type->type);
1290 val->type->sampled = true;
1291 val->type->type = glsl_sampler_type(dim, is_shadow, is_array,
1293 } else if (sampled == 2) {
1294 vtn_assert(!is_shadow);
1295 val->type->sampled = false;
1296 val->type->type = glsl_image_type(dim, is_array, sampled_base_type);
1298 vtn_fail("We need to know if the image will be sampled");
1303 case SpvOpTypeSampledImage:
1304 val->type->base_type = vtn_base_type_sampled_image;
1305 val->type->image = vtn_value(b, w[2], vtn_value_type_type)->type;
1306 val->type->type = val->type->image->type;
1309 case SpvOpTypeSampler:
1310 /* The actual sampler type here doesn't really matter. It gets
1311 * thrown away the moment you combine it with an image. What really
1312 * matters is that it's a sampler type as opposed to an integer type
1313 * so the backend knows what to do.
1315 val->type->base_type = vtn_base_type_sampler;
1316 val->type->type = glsl_bare_sampler_type();
1319 case SpvOpTypeOpaque:
1320 case SpvOpTypeEvent:
1321 case SpvOpTypeDeviceEvent:
1322 case SpvOpTypeReserveId:
1323 case SpvOpTypeQueue:
1326 vtn_fail("Unhandled opcode");
1329 vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
1332 static nir_constant *
1333 vtn_null_constant(struct vtn_builder *b, const struct glsl_type *type)
1335 nir_constant *c = rzalloc(b, nir_constant);
1337 /* For pointers and other typeless things, we have to return something but
1338 * it doesn't matter what.
1343 switch (glsl_get_base_type(type)) {
1345 case GLSL_TYPE_UINT:
1346 case GLSL_TYPE_INT16:
1347 case GLSL_TYPE_UINT16:
1348 case GLSL_TYPE_UINT8:
1349 case GLSL_TYPE_INT8:
1350 case GLSL_TYPE_INT64:
1351 case GLSL_TYPE_UINT64:
1352 case GLSL_TYPE_BOOL:
1353 case GLSL_TYPE_FLOAT:
1354 case GLSL_TYPE_FLOAT16:
1355 case GLSL_TYPE_DOUBLE:
1356 /* Nothing to do here. It's already initialized to zero */
1359 case GLSL_TYPE_ARRAY:
1360 vtn_assert(glsl_get_length(type) > 0);
1361 c->num_elements = glsl_get_length(type);
1362 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1364 c->elements[0] = vtn_null_constant(b, glsl_get_array_element(type));
1365 for (unsigned i = 1; i < c->num_elements; i++)
1366 c->elements[i] = c->elements[0];
1369 case GLSL_TYPE_STRUCT:
1370 c->num_elements = glsl_get_length(type);
1371 c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1373 for (unsigned i = 0; i < c->num_elements; i++) {
1374 c->elements[i] = vtn_null_constant(b, glsl_get_struct_field(type, i));
1379 vtn_fail("Invalid type for null constant");
1386 spec_constant_decoration_cb(struct vtn_builder *b, struct vtn_value *v,
1387 int member, const struct vtn_decoration *dec,
1390 vtn_assert(member == -1);
1391 if (dec->decoration != SpvDecorationSpecId)
1394 struct spec_constant_value *const_value = data;
1396 for (unsigned i = 0; i < b->num_specializations; i++) {
1397 if (b->specializations[i].id == dec->literals[0]) {
1398 if (const_value->is_double)
1399 const_value->data64 = b->specializations[i].data64;
1401 const_value->data32 = b->specializations[i].data32;
1408 get_specialization(struct vtn_builder *b, struct vtn_value *val,
1409 uint32_t const_value)
1411 struct spec_constant_value data;
1412 data.is_double = false;
1413 data.data32 = const_value;
1414 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
1419 get_specialization64(struct vtn_builder *b, struct vtn_value *val,
1420 uint64_t const_value)
1422 struct spec_constant_value data;
1423 data.is_double = true;
1424 data.data64 = const_value;
1425 vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &data);
1430 handle_workgroup_size_decoration_cb(struct vtn_builder *b,
1431 struct vtn_value *val,
1433 const struct vtn_decoration *dec,
1436 vtn_assert(member == -1);
1437 if (dec->decoration != SpvDecorationBuiltIn ||
1438 dec->literals[0] != SpvBuiltInWorkgroupSize)
1441 vtn_assert(val->type->type == glsl_vector_type(GLSL_TYPE_UINT, 3));
1443 b->shader->info.cs.local_size[0] = val->constant->values[0].u32[0];
1444 b->shader->info.cs.local_size[1] = val->constant->values[0].u32[1];
1445 b->shader->info.cs.local_size[2] = val->constant->values[0].u32[2];
1449 vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
1450 const uint32_t *w, unsigned count)
1452 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
1453 val->constant = rzalloc(b, nir_constant);
1455 case SpvOpConstantTrue:
1456 case SpvOpConstantFalse:
1457 case SpvOpSpecConstantTrue:
1458 case SpvOpSpecConstantFalse: {
1459 vtn_fail_if(val->type->type != glsl_bool_type(),
1460 "Result type of %s must be OpTypeBool",
1461 spirv_op_to_string(opcode));
1463 uint32_t int_val = (opcode == SpvOpConstantTrue ||
1464 opcode == SpvOpSpecConstantTrue);
1466 if (opcode == SpvOpSpecConstantTrue ||
1467 opcode == SpvOpSpecConstantFalse)
1468 int_val = get_specialization(b, val, int_val);
1470 val->constant->values[0].u32[0] = int_val ? NIR_TRUE : NIR_FALSE;
1474 case SpvOpConstant: {
1475 vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
1476 "Result type of %s must be a scalar",
1477 spirv_op_to_string(opcode));
1478 int bit_size = glsl_get_bit_size(val->type->type);
1481 val->constant->values->u64[0] = vtn_u64_literal(&w[3]);
1484 val->constant->values->u32[0] = w[3];
1487 val->constant->values->u16[0] = w[3];
1490 val->constant->values->u8[0] = w[3];
1493 vtn_fail("Unsupported SpvOpConstant bit size");
1498 case SpvOpSpecConstant: {
1499 vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
1500 "Result type of %s must be a scalar",
1501 spirv_op_to_string(opcode));
1502 int bit_size = glsl_get_bit_size(val->type->type);
1505 val->constant->values[0].u64[0] =
1506 get_specialization64(b, val, vtn_u64_literal(&w[3]));
1509 val->constant->values[0].u32[0] = get_specialization(b, val, w[3]);
1512 val->constant->values[0].u16[0] = get_specialization(b, val, w[3]);
1515 val->constant->values[0].u8[0] = get_specialization(b, val, w[3]);
1518 vtn_fail("Unsupported SpvOpSpecConstant bit size");
1523 case SpvOpSpecConstantComposite:
1524 case SpvOpConstantComposite: {
1525 unsigned elem_count = count - 3;
1526 vtn_fail_if(elem_count != val->type->length,
1527 "%s has %u constituents, expected %u",
1528 spirv_op_to_string(opcode), elem_count, val->type->length);
1530 nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
1531 for (unsigned i = 0; i < elem_count; i++) {
1532 struct vtn_value *val = vtn_untyped_value(b, w[i + 3]);
1534 if (val->value_type == vtn_value_type_constant) {
1535 elems[i] = val->constant;
1537 vtn_fail_if(val->value_type != vtn_value_type_undef,
1538 "only constants or undefs allowed for "
1539 "SpvOpConstantComposite");
1540 /* to make it easier, just insert a NULL constant for now */
1541 elems[i] = vtn_null_constant(b, val->type->type);
1545 switch (val->type->base_type) {
1546 case vtn_base_type_vector: {
1547 assert(glsl_type_is_vector(val->type->type));
1548 int bit_size = glsl_get_bit_size(val->type->type);
1549 for (unsigned i = 0; i < elem_count; i++) {
1552 val->constant->values[0].u64[i] = elems[i]->values[0].u64[0];
1555 val->constant->values[0].u32[i] = elems[i]->values[0].u32[0];
1558 val->constant->values[0].u16[i] = elems[i]->values[0].u16[0];
1561 val->constant->values[0].u8[i] = elems[i]->values[0].u8[0];
1564 vtn_fail("Invalid SpvOpConstantComposite bit size");
1570 case vtn_base_type_matrix:
1571 assert(glsl_type_is_matrix(val->type->type));
1572 for (unsigned i = 0; i < elem_count; i++)
1573 val->constant->values[i] = elems[i]->values[0];
1576 case vtn_base_type_struct:
1577 case vtn_base_type_array:
1578 ralloc_steal(val->constant, elems);
1579 val->constant->num_elements = elem_count;
1580 val->constant->elements = elems;
1584 vtn_fail("Result type of %s must be a composite type",
1585 spirv_op_to_string(opcode));
1590 case SpvOpSpecConstantOp: {
1591 SpvOp opcode = get_specialization(b, val, w[3]);
1593 case SpvOpVectorShuffle: {
1594 struct vtn_value *v0 = &b->values[w[4]];
1595 struct vtn_value *v1 = &b->values[w[5]];
1597 vtn_assert(v0->value_type == vtn_value_type_constant ||
1598 v0->value_type == vtn_value_type_undef);
1599 vtn_assert(v1->value_type == vtn_value_type_constant ||
1600 v1->value_type == vtn_value_type_undef);
1602 unsigned len0 = glsl_get_vector_elements(v0->type->type);
1603 unsigned len1 = glsl_get_vector_elements(v1->type->type);
1605 vtn_assert(len0 + len1 < 16);
1607 unsigned bit_size = glsl_get_bit_size(val->type->type);
1608 unsigned bit_size0 = glsl_get_bit_size(v0->type->type);
1609 unsigned bit_size1 = glsl_get_bit_size(v1->type->type);
1611 vtn_assert(bit_size == bit_size0 && bit_size == bit_size1);
1612 (void)bit_size0; (void)bit_size1;
1614 if (bit_size == 64) {
1616 if (v0->value_type == vtn_value_type_constant) {
1617 for (unsigned i = 0; i < len0; i++)
1618 u64[i] = v0->constant->values[0].u64[i];
1620 if (v1->value_type == vtn_value_type_constant) {
1621 for (unsigned i = 0; i < len1; i++)
1622 u64[len0 + i] = v1->constant->values[0].u64[i];
1625 for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
1626 uint32_t comp = w[i + 6];
1627 /* If component is not used, set the value to a known constant
1628 * to detect if it is wrongly used.
1630 if (comp == (uint32_t)-1)
1631 val->constant->values[0].u64[j] = 0xdeadbeefdeadbeef;
1633 val->constant->values[0].u64[j] = u64[comp];
1636 /* This is for both 32-bit and 16-bit values */
1638 if (v0->value_type == vtn_value_type_constant) {
1639 for (unsigned i = 0; i < len0; i++)
1640 u32[i] = v0->constant->values[0].u32[i];
1642 if (v1->value_type == vtn_value_type_constant) {
1643 for (unsigned i = 0; i < len1; i++)
1644 u32[len0 + i] = v1->constant->values[0].u32[i];
1647 for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
1648 uint32_t comp = w[i + 6];
1649 /* If component is not used, set the value to a known constant
1650 * to detect if it is wrongly used.
1652 if (comp == (uint32_t)-1)
1653 val->constant->values[0].u32[j] = 0xdeadbeef;
1655 val->constant->values[0].u32[j] = u32[comp];
1661 case SpvOpCompositeExtract:
1662 case SpvOpCompositeInsert: {
1663 struct vtn_value *comp;
1664 unsigned deref_start;
1665 struct nir_constant **c;
1666 if (opcode == SpvOpCompositeExtract) {
1667 comp = vtn_value(b, w[4], vtn_value_type_constant);
1669 c = &comp->constant;
1671 comp = vtn_value(b, w[5], vtn_value_type_constant);
1673 val->constant = nir_constant_clone(comp->constant,
1680 const struct vtn_type *type = comp->type;
1681 for (unsigned i = deref_start; i < count; i++) {
1682 vtn_fail_if(w[i] > type->length,
1683 "%uth index of %s is %u but the type has only "
1684 "%u elements", i - deref_start,
1685 spirv_op_to_string(opcode), w[i], type->length);
1687 switch (type->base_type) {
1688 case vtn_base_type_vector:
1690 type = type->array_element;
1693 case vtn_base_type_matrix:
1694 assert(col == 0 && elem == -1);
1697 type = type->array_element;
1700 case vtn_base_type_array:
1701 c = &(*c)->elements[w[i]];
1702 type = type->array_element;
1705 case vtn_base_type_struct:
1706 c = &(*c)->elements[w[i]];
1707 type = type->members[w[i]];
1711 vtn_fail("%s must only index into composite types",
1712 spirv_op_to_string(opcode));
1716 if (opcode == SpvOpCompositeExtract) {
1720 unsigned num_components = type->length;
1721 unsigned bit_size = glsl_get_bit_size(type->type);
1722 for (unsigned i = 0; i < num_components; i++)
1725 val->constant->values[0].u64[i] = (*c)->values[col].u64[elem + i];
1728 val->constant->values[0].u32[i] = (*c)->values[col].u32[elem + i];
1731 val->constant->values[0].u16[i] = (*c)->values[col].u16[elem + i];
1734 val->constant->values[0].u8[i] = (*c)->values[col].u8[elem + i];
1737 vtn_fail("Invalid SpvOpCompositeExtract bit size");
1741 struct vtn_value *insert =
1742 vtn_value(b, w[4], vtn_value_type_constant);
1743 vtn_assert(insert->type == type);
1745 *c = insert->constant;
1747 unsigned num_components = type->length;
1748 unsigned bit_size = glsl_get_bit_size(type->type);
1749 for (unsigned i = 0; i < num_components; i++)
1752 (*c)->values[col].u64[elem + i] = insert->constant->values[0].u64[i];
1755 (*c)->values[col].u32[elem + i] = insert->constant->values[0].u32[i];
1758 (*c)->values[col].u16[elem + i] = insert->constant->values[0].u16[i];
1761 (*c)->values[col].u8[elem + i] = insert->constant->values[0].u8[i];
1764 vtn_fail("Invalid SpvOpCompositeInsert bit size");
1773 nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->type->type);
1774 nir_alu_type src_alu_type = dst_alu_type;
1775 unsigned num_components = glsl_get_vector_elements(val->type->type);
1778 vtn_assert(count <= 7);
1783 /* We have a source in a conversion */
1785 nir_get_nir_type_for_glsl_type(
1786 vtn_value(b, w[4], vtn_value_type_constant)->type->type);
1787 /* We use the bitsize of the conversion source to evaluate the opcode later */
1788 bit_size = glsl_get_bit_size(
1789 vtn_value(b, w[4], vtn_value_type_constant)->type->type);
1792 bit_size = glsl_get_bit_size(val->type->type);
1795 nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap,
1796 nir_alu_type_get_type_size(src_alu_type),
1797 nir_alu_type_get_type_size(dst_alu_type));
1798 nir_const_value src[4];
1800 for (unsigned i = 0; i < count - 4; i++) {
1801 struct vtn_value *src_val =
1802 vtn_value(b, w[4 + i], vtn_value_type_constant);
1804 /* If this is an unsized source, pull the bit size from the
1805 * source; otherwise, we'll use the bit size from the destination.
1807 if (!nir_alu_type_get_type_size(nir_op_infos[op].input_types[i]))
1808 bit_size = glsl_get_bit_size(src_val->type->type);
1810 unsigned j = swap ? 1 - i : i;
1811 src[j] = src_val->constant->values[0];
1814 /* fix up fixed size sources */
1821 for (unsigned i = 0; i < num_components; ++i) {
1823 case 64: src[1].u32[i] = src[1].u64[i]; break;
1824 case 16: src[1].u32[i] = src[1].u16[i]; break;
1825 case 8: src[1].u32[i] = src[1].u8[i]; break;
1834 val->constant->values[0] =
1835 nir_eval_const_opcode(op, num_components, bit_size, src);
1842 case SpvOpConstantNull:
1843 val->constant = vtn_null_constant(b, val->type->type);
1846 case SpvOpConstantSampler:
1847 vtn_fail("OpConstantSampler requires Kernel Capability");
1851 vtn_fail("Unhandled opcode");
1854 /* Now that we have the value, update the workgroup size if needed */
1855 vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL);
1858 struct vtn_ssa_value *
1859 vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
1861 struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
1864 if (!glsl_type_is_vector_or_scalar(type)) {
1865 unsigned elems = glsl_get_length(type);
1866 val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
1867 for (unsigned i = 0; i < elems; i++) {
1868 const struct glsl_type *child_type;
1870 switch (glsl_get_base_type(type)) {
1872 case GLSL_TYPE_UINT:
1873 case GLSL_TYPE_INT16:
1874 case GLSL_TYPE_UINT16:
1875 case GLSL_TYPE_UINT8:
1876 case GLSL_TYPE_INT8:
1877 case GLSL_TYPE_INT64:
1878 case GLSL_TYPE_UINT64:
1879 case GLSL_TYPE_BOOL:
1880 case GLSL_TYPE_FLOAT:
1881 case GLSL_TYPE_FLOAT16:
1882 case GLSL_TYPE_DOUBLE:
1883 child_type = glsl_get_column_type(type);
1885 case GLSL_TYPE_ARRAY:
1886 child_type = glsl_get_array_element(type);
1888 case GLSL_TYPE_STRUCT:
1889 child_type = glsl_get_struct_field(type, i);
1892 vtn_fail("unkown base type");
1895 val->elems[i] = vtn_create_ssa_value(b, child_type);
1903 vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
1906 src.src = nir_src_for_ssa(vtn_ssa_value(b, index)->def);
1907 src.src_type = type;
1912 vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
1913 const uint32_t *w, unsigned count)
1915 if (opcode == SpvOpSampledImage) {
1916 struct vtn_value *val =
1917 vtn_push_value(b, w[2], vtn_value_type_sampled_image);
1918 val->sampled_image = ralloc(b, struct vtn_sampled_image);
1919 val->sampled_image->type =
1920 vtn_value(b, w[1], vtn_value_type_type)->type;
1921 val->sampled_image->image =
1922 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
1923 val->sampled_image->sampler =
1924 vtn_value(b, w[4], vtn_value_type_pointer)->pointer;
1926 } else if (opcode == SpvOpImage) {
1927 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_pointer);
1928 struct vtn_value *src_val = vtn_untyped_value(b, w[3]);
1929 if (src_val->value_type == vtn_value_type_sampled_image) {
1930 val->pointer = src_val->sampled_image->image;
1932 vtn_assert(src_val->value_type == vtn_value_type_pointer);
1933 val->pointer = src_val->pointer;
1938 struct vtn_type *ret_type = vtn_value(b, w[1], vtn_value_type_type)->type;
1939 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
1941 struct vtn_sampled_image sampled;
1942 struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
1943 if (sampled_val->value_type == vtn_value_type_sampled_image) {
1944 sampled = *sampled_val->sampled_image;
1946 vtn_assert(sampled_val->value_type == vtn_value_type_pointer);
1947 sampled.type = sampled_val->pointer->type;
1948 sampled.image = NULL;
1949 sampled.sampler = sampled_val->pointer;
1952 const struct glsl_type *image_type = sampled.type->type;
1953 const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image_type);
1954 const bool is_array = glsl_sampler_type_is_array(image_type);
1956 /* Figure out the base texture operation */
1959 case SpvOpImageSampleImplicitLod:
1960 case SpvOpImageSampleDrefImplicitLod:
1961 case SpvOpImageSampleProjImplicitLod:
1962 case SpvOpImageSampleProjDrefImplicitLod:
1963 texop = nir_texop_tex;
1966 case SpvOpImageSampleExplicitLod:
1967 case SpvOpImageSampleDrefExplicitLod:
1968 case SpvOpImageSampleProjExplicitLod:
1969 case SpvOpImageSampleProjDrefExplicitLod:
1970 texop = nir_texop_txl;
1973 case SpvOpImageFetch:
1974 if (glsl_get_sampler_dim(image_type) == GLSL_SAMPLER_DIM_MS) {
1975 texop = nir_texop_txf_ms;
1977 texop = nir_texop_txf;
1981 case SpvOpImageGather:
1982 case SpvOpImageDrefGather:
1983 texop = nir_texop_tg4;
1986 case SpvOpImageQuerySizeLod:
1987 case SpvOpImageQuerySize:
1988 texop = nir_texop_txs;
1991 case SpvOpImageQueryLod:
1992 texop = nir_texop_lod;
1995 case SpvOpImageQueryLevels:
1996 texop = nir_texop_query_levels;
1999 case SpvOpImageQuerySamples:
2000 texop = nir_texop_texture_samples;
2004 vtn_fail("Unhandled opcode");
2007 nir_tex_src srcs[10]; /* 10 should be enough */
2008 nir_tex_src *p = srcs;
2010 nir_deref_instr *sampler = vtn_pointer_to_deref(b, sampled.sampler);
2011 nir_deref_instr *texture =
2012 sampled.image ? vtn_pointer_to_deref(b, sampled.image) : sampler;
2014 p->src = nir_src_for_ssa(&texture->dest.ssa);
2015 p->src_type = nir_tex_src_texture_deref;
2024 /* These operations require a sampler */
2025 p->src = nir_src_for_ssa(&sampler->dest.ssa);
2026 p->src_type = nir_tex_src_sampler_deref;
2030 case nir_texop_txf_ms:
2033 case nir_texop_query_levels:
2034 case nir_texop_texture_samples:
2035 case nir_texop_samples_identical:
2038 case nir_texop_txf_ms_mcs:
2039 vtn_fail("unexpected nir_texop_txf_ms_mcs");
2044 struct nir_ssa_def *coord;
2045 unsigned coord_components;
2047 case SpvOpImageSampleImplicitLod:
2048 case SpvOpImageSampleExplicitLod:
2049 case SpvOpImageSampleDrefImplicitLod:
2050 case SpvOpImageSampleDrefExplicitLod:
2051 case SpvOpImageSampleProjImplicitLod:
2052 case SpvOpImageSampleProjExplicitLod:
2053 case SpvOpImageSampleProjDrefImplicitLod:
2054 case SpvOpImageSampleProjDrefExplicitLod:
2055 case SpvOpImageFetch:
2056 case SpvOpImageGather:
2057 case SpvOpImageDrefGather:
2058 case SpvOpImageQueryLod: {
2059 /* All these types have the coordinate as their first real argument */
2060 switch (sampler_dim) {
2061 case GLSL_SAMPLER_DIM_1D:
2062 case GLSL_SAMPLER_DIM_BUF:
2063 coord_components = 1;
2065 case GLSL_SAMPLER_DIM_2D:
2066 case GLSL_SAMPLER_DIM_RECT:
2067 case GLSL_SAMPLER_DIM_MS:
2068 coord_components = 2;
2070 case GLSL_SAMPLER_DIM_3D:
2071 case GLSL_SAMPLER_DIM_CUBE:
2072 coord_components = 3;
2075 vtn_fail("Invalid sampler type");
2078 if (is_array && texop != nir_texop_lod)
2081 coord = vtn_ssa_value(b, w[idx++])->def;
2082 p->src = nir_src_for_ssa(nir_channels(&b->nb, coord,
2083 (1 << coord_components) - 1));
2084 p->src_type = nir_tex_src_coord;
2091 coord_components = 0;
2096 case SpvOpImageSampleProjImplicitLod:
2097 case SpvOpImageSampleProjExplicitLod:
2098 case SpvOpImageSampleProjDrefImplicitLod:
2099 case SpvOpImageSampleProjDrefExplicitLod:
2100 /* These have the projector as the last coordinate component */
2101 p->src = nir_src_for_ssa(nir_channel(&b->nb, coord, coord_components));
2102 p->src_type = nir_tex_src_projector;
2110 bool is_shadow = false;
2111 unsigned gather_component = 0;
2113 case SpvOpImageSampleDrefImplicitLod:
2114 case SpvOpImageSampleDrefExplicitLod:
2115 case SpvOpImageSampleProjDrefImplicitLod:
2116 case SpvOpImageSampleProjDrefExplicitLod:
2117 case SpvOpImageDrefGather:
2118 /* These all have an explicit depth value as their next source */
2120 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
2123 case SpvOpImageGather:
2124 /* This has a component as its next source */
2126 vtn_value(b, w[idx++], vtn_value_type_constant)->constant->values[0].u32[0];
2133 /* For OpImageQuerySizeLod, we always have an LOD */
2134 if (opcode == SpvOpImageQuerySizeLod)
2135 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
2137 /* Now we need to handle some number of optional arguments */
2138 const struct vtn_ssa_value *gather_offsets = NULL;
2140 uint32_t operands = w[idx++];
2142 if (operands & SpvImageOperandsBiasMask) {
2143 vtn_assert(texop == nir_texop_tex);
2144 texop = nir_texop_txb;
2145 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_bias);
2148 if (operands & SpvImageOperandsLodMask) {
2149 vtn_assert(texop == nir_texop_txl || texop == nir_texop_txf ||
2150 texop == nir_texop_txs);
2151 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
2154 if (operands & SpvImageOperandsGradMask) {
2155 vtn_assert(texop == nir_texop_txl);
2156 texop = nir_texop_txd;
2157 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddx);
2158 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ddy);
2161 if (operands & SpvImageOperandsOffsetMask ||
2162 operands & SpvImageOperandsConstOffsetMask)
2163 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_offset);
2165 if (operands & SpvImageOperandsConstOffsetsMask) {
2166 nir_tex_src none = {0};
2167 gather_offsets = vtn_ssa_value(b, w[idx++]);
2171 if (operands & SpvImageOperandsSampleMask) {
2172 vtn_assert(texop == nir_texop_txf_ms);
2173 texop = nir_texop_txf_ms;
2174 (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
2177 /* We should have now consumed exactly all of the arguments */
2178 vtn_assert(idx == count);
2180 nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
2183 memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
2185 instr->coord_components = coord_components;
2186 instr->sampler_dim = sampler_dim;
2187 instr->is_array = is_array;
2188 instr->is_shadow = is_shadow;
2189 instr->is_new_style_shadow =
2190 is_shadow && glsl_get_components(ret_type->type) == 1;
2191 instr->component = gather_component;
2193 switch (glsl_get_sampler_result_type(image_type)) {
2194 case GLSL_TYPE_FLOAT: instr->dest_type = nir_type_float; break;
2195 case GLSL_TYPE_INT: instr->dest_type = nir_type_int; break;
2196 case GLSL_TYPE_UINT: instr->dest_type = nir_type_uint; break;
2197 case GLSL_TYPE_BOOL: instr->dest_type = nir_type_bool; break;
2199 vtn_fail("Invalid base type for sampler result");
2202 nir_ssa_dest_init(&instr->instr, &instr->dest,
2203 nir_tex_instr_dest_size(instr), 32, NULL);
2205 vtn_assert(glsl_get_vector_elements(ret_type->type) ==
2206 nir_tex_instr_dest_size(instr));
2209 nir_instr *instruction;
2210 if (gather_offsets) {
2211 vtn_assert(glsl_get_base_type(gather_offsets->type) == GLSL_TYPE_ARRAY);
2212 vtn_assert(glsl_get_length(gather_offsets->type) == 4);
2213 nir_tex_instr *instrs[4] = {instr, NULL, NULL, NULL};
2215 /* Copy the current instruction 4x */
2216 for (uint32_t i = 1; i < 4; i++) {
2217 instrs[i] = nir_tex_instr_create(b->shader, instr->num_srcs);
2218 instrs[i]->op = instr->op;
2219 instrs[i]->coord_components = instr->coord_components;
2220 instrs[i]->sampler_dim = instr->sampler_dim;
2221 instrs[i]->is_array = instr->is_array;
2222 instrs[i]->is_shadow = instr->is_shadow;
2223 instrs[i]->is_new_style_shadow = instr->is_new_style_shadow;
2224 instrs[i]->component = instr->component;
2225 instrs[i]->dest_type = instr->dest_type;
2227 memcpy(instrs[i]->src, srcs, instr->num_srcs * sizeof(*instr->src));
2229 nir_ssa_dest_init(&instrs[i]->instr, &instrs[i]->dest,
2230 nir_tex_instr_dest_size(instr), 32, NULL);
2233 /* Fill in the last argument with the offset from the passed in offsets
2234 * and insert the instruction into the stream.
2236 for (uint32_t i = 0; i < 4; i++) {
2238 src.src = nir_src_for_ssa(gather_offsets->elems[i]->def);
2239 src.src_type = nir_tex_src_offset;
2240 instrs[i]->src[instrs[i]->num_srcs - 1] = src;
2241 nir_builder_instr_insert(&b->nb, &instrs[i]->instr);
2244 /* Combine the results of the 4 instructions by taking their .w
2247 nir_alu_instr *vec4 = nir_alu_instr_create(b->shader, nir_op_vec4);
2248 nir_ssa_dest_init(&vec4->instr, &vec4->dest.dest, 4, 32, NULL);
2249 vec4->dest.write_mask = 0xf;
2250 for (uint32_t i = 0; i < 4; i++) {
2251 vec4->src[i].src = nir_src_for_ssa(&instrs[i]->dest.ssa);
2252 vec4->src[i].swizzle[0] = 3;
2254 def = &vec4->dest.dest.ssa;
2255 instruction = &vec4->instr;
2257 def = &instr->dest.ssa;
2258 instruction = &instr->instr;
2261 val->ssa = vtn_create_ssa_value(b, ret_type->type);
2262 val->ssa->def = def;
2264 nir_builder_instr_insert(&b->nb, instruction);
2268 fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
2269 const uint32_t *w, nir_src *src)
2272 case SpvOpAtomicIIncrement:
2273 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, 1));
2276 case SpvOpAtomicIDecrement:
2277 src[0] = nir_src_for_ssa(nir_imm_int(&b->nb, -1));
2280 case SpvOpAtomicISub:
2282 nir_src_for_ssa(nir_ineg(&b->nb, vtn_ssa_value(b, w[6])->def));
2285 case SpvOpAtomicCompareExchange:
2286 src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[8])->def);
2287 src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[7])->def);
2290 case SpvOpAtomicExchange:
2291 case SpvOpAtomicIAdd:
2292 case SpvOpAtomicSMin:
2293 case SpvOpAtomicUMin:
2294 case SpvOpAtomicSMax:
2295 case SpvOpAtomicUMax:
2296 case SpvOpAtomicAnd:
2298 case SpvOpAtomicXor:
2299 src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[6])->def);
2303 vtn_fail("Invalid SPIR-V atomic");
2307 static nir_ssa_def *
2308 get_image_coord(struct vtn_builder *b, uint32_t value)
2310 struct vtn_ssa_value *coord = vtn_ssa_value(b, value);
2312 /* The image_load_store intrinsics assume a 4-dim coordinate */
2313 unsigned dim = glsl_get_vector_elements(coord->type);
2314 unsigned swizzle[4];
2315 for (unsigned i = 0; i < 4; i++)
2316 swizzle[i] = MIN2(i, dim - 1);
2318 return nir_swizzle(&b->nb, coord->def, swizzle, 4, false);
2321 static nir_ssa_def *
2322 expand_to_vec4(nir_builder *b, nir_ssa_def *value)
2324 if (value->num_components == 4)
2328 for (unsigned i = 0; i < 4; i++)
2329 swiz[i] = i < value->num_components ? i : 0;
2330 return nir_swizzle(b, value, swiz, 4, false);
2334 vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
2335 const uint32_t *w, unsigned count)
2337 /* Just get this one out of the way */
2338 if (opcode == SpvOpImageTexelPointer) {
2339 struct vtn_value *val =
2340 vtn_push_value(b, w[2], vtn_value_type_image_pointer);
2341 val->image = ralloc(b, struct vtn_image_pointer);
2343 val->image->image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2344 val->image->coord = get_image_coord(b, w[4]);
2345 val->image->sample = vtn_ssa_value(b, w[5])->def;
2349 struct vtn_image_pointer image;
2352 case SpvOpAtomicExchange:
2353 case SpvOpAtomicCompareExchange:
2354 case SpvOpAtomicCompareExchangeWeak:
2355 case SpvOpAtomicIIncrement:
2356 case SpvOpAtomicIDecrement:
2357 case SpvOpAtomicIAdd:
2358 case SpvOpAtomicISub:
2359 case SpvOpAtomicLoad:
2360 case SpvOpAtomicSMin:
2361 case SpvOpAtomicUMin:
2362 case SpvOpAtomicSMax:
2363 case SpvOpAtomicUMax:
2364 case SpvOpAtomicAnd:
2366 case SpvOpAtomicXor:
2367 image = *vtn_value(b, w[3], vtn_value_type_image_pointer)->image;
2370 case SpvOpAtomicStore:
2371 image = *vtn_value(b, w[1], vtn_value_type_image_pointer)->image;
2374 case SpvOpImageQuerySize:
2375 image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2377 image.sample = NULL;
2380 case SpvOpImageRead:
2381 image.image = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2382 image.coord = get_image_coord(b, w[4]);
2384 if (count > 5 && (w[5] & SpvImageOperandsSampleMask)) {
2385 vtn_assert(w[5] == SpvImageOperandsSampleMask);
2386 image.sample = vtn_ssa_value(b, w[6])->def;
2388 image.sample = nir_ssa_undef(&b->nb, 1, 32);
2392 case SpvOpImageWrite:
2393 image.image = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
2394 image.coord = get_image_coord(b, w[2]);
2398 if (count > 4 && (w[4] & SpvImageOperandsSampleMask)) {
2399 vtn_assert(w[4] == SpvImageOperandsSampleMask);
2400 image.sample = vtn_ssa_value(b, w[5])->def;
2402 image.sample = nir_ssa_undef(&b->nb, 1, 32);
2407 vtn_fail("Invalid image opcode");
2410 nir_intrinsic_op op;
2412 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
2413 OP(ImageQuerySize, size)
2415 OP(ImageWrite, store)
2416 OP(AtomicLoad, load)
2417 OP(AtomicStore, store)
2418 OP(AtomicExchange, atomic_exchange)
2419 OP(AtomicCompareExchange, atomic_comp_swap)
2420 OP(AtomicIIncrement, atomic_add)
2421 OP(AtomicIDecrement, atomic_add)
2422 OP(AtomicIAdd, atomic_add)
2423 OP(AtomicISub, atomic_add)
2424 OP(AtomicSMin, atomic_min)
2425 OP(AtomicUMin, atomic_min)
2426 OP(AtomicSMax, atomic_max)
2427 OP(AtomicUMax, atomic_max)
2428 OP(AtomicAnd, atomic_and)
2429 OP(AtomicOr, atomic_or)
2430 OP(AtomicXor, atomic_xor)
2433 vtn_fail("Invalid image opcode");
2436 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
2438 nir_deref_instr *image_deref = vtn_pointer_to_deref(b, image.image);
2439 intrin->src[0] = nir_src_for_ssa(&image_deref->dest.ssa);
2441 /* ImageQuerySize doesn't take any extra parameters */
2442 if (opcode != SpvOpImageQuerySize) {
2443 /* The image coordinate is always 4 components but we may not have that
2444 * many. Swizzle to compensate.
2446 intrin->src[1] = nir_src_for_ssa(expand_to_vec4(&b->nb, image.coord));
2447 intrin->src[2] = nir_src_for_ssa(image.sample);
2451 case SpvOpAtomicLoad:
2452 case SpvOpImageQuerySize:
2453 case SpvOpImageRead:
2455 case SpvOpAtomicStore:
2456 case SpvOpImageWrite: {
2457 const uint32_t value_id = opcode == SpvOpAtomicStore ? w[4] : w[3];
2458 nir_ssa_def *value = vtn_ssa_value(b, value_id)->def;
2459 /* nir_intrinsic_image_deref_store always takes a vec4 value */
2460 assert(op == nir_intrinsic_image_deref_store);
2461 intrin->num_components = 4;
2462 intrin->src[3] = nir_src_for_ssa(expand_to_vec4(&b->nb, value));
2466 case SpvOpAtomicCompareExchange:
2467 case SpvOpAtomicIIncrement:
2468 case SpvOpAtomicIDecrement:
2469 case SpvOpAtomicExchange:
2470 case SpvOpAtomicIAdd:
2471 case SpvOpAtomicISub:
2472 case SpvOpAtomicSMin:
2473 case SpvOpAtomicUMin:
2474 case SpvOpAtomicSMax:
2475 case SpvOpAtomicUMax:
2476 case SpvOpAtomicAnd:
2478 case SpvOpAtomicXor:
2479 fill_common_atomic_sources(b, opcode, w, &intrin->src[3]);
2483 vtn_fail("Invalid image opcode");
2486 if (opcode != SpvOpImageWrite && opcode != SpvOpAtomicStore) {
2487 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2488 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
2490 unsigned dest_components = glsl_get_vector_elements(type->type);
2491 intrin->num_components = nir_intrinsic_infos[op].dest_components;
2492 if (intrin->num_components == 0)
2493 intrin->num_components = dest_components;
2495 nir_ssa_dest_init(&intrin->instr, &intrin->dest,
2496 intrin->num_components, 32, NULL);
2498 nir_builder_instr_insert(&b->nb, &intrin->instr);
2500 nir_ssa_def *result = &intrin->dest.ssa;
2501 if (intrin->num_components != dest_components)
2502 result = nir_channels(&b->nb, result, (1 << dest_components) - 1);
2504 val->ssa = vtn_create_ssa_value(b, type->type);
2505 val->ssa->def = result;
2507 nir_builder_instr_insert(&b->nb, &intrin->instr);
2511 static nir_intrinsic_op
2512 get_ssbo_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
2515 case SpvOpAtomicLoad: return nir_intrinsic_load_ssbo;
2516 case SpvOpAtomicStore: return nir_intrinsic_store_ssbo;
2517 #define OP(S, N) case SpvOp##S: return nir_intrinsic_ssbo_##N;
2518 OP(AtomicExchange, atomic_exchange)
2519 OP(AtomicCompareExchange, atomic_comp_swap)
2520 OP(AtomicIIncrement, atomic_add)
2521 OP(AtomicIDecrement, atomic_add)
2522 OP(AtomicIAdd, atomic_add)
2523 OP(AtomicISub, atomic_add)
2524 OP(AtomicSMin, atomic_imin)
2525 OP(AtomicUMin, atomic_umin)
2526 OP(AtomicSMax, atomic_imax)
2527 OP(AtomicUMax, atomic_umax)
2528 OP(AtomicAnd, atomic_and)
2529 OP(AtomicOr, atomic_or)
2530 OP(AtomicXor, atomic_xor)
2533 vtn_fail("Invalid SSBO atomic");
2537 static nir_intrinsic_op
2538 get_uniform_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
2541 #define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
2542 OP(AtomicLoad, read_deref)
2543 OP(AtomicExchange, exchange)
2544 OP(AtomicCompareExchange, comp_swap)
2545 OP(AtomicIIncrement, inc_deref)
2546 OP(AtomicIDecrement, post_dec_deref)
2547 OP(AtomicIAdd, add_deref)
2548 OP(AtomicISub, add_deref)
2549 OP(AtomicUMin, min_deref)
2550 OP(AtomicUMax, max_deref)
2551 OP(AtomicAnd, and_deref)
2552 OP(AtomicOr, or_deref)
2553 OP(AtomicXor, xor_deref)
2556 /* We left the following out: AtomicStore, AtomicSMin and
2557 * AtomicSmax. Right now there are not nir intrinsics for them. At this
2558 * moment Atomic Counter support is needed for ARB_spirv support, so is
2559 * only need to support GLSL Atomic Counters that are uints and don't
2560 * allow direct storage.
2562 unreachable("Invalid uniform atomic");
2566 static nir_intrinsic_op
2567 get_shared_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
2570 case SpvOpAtomicLoad: return nir_intrinsic_load_shared;
2571 case SpvOpAtomicStore: return nir_intrinsic_store_shared;
2572 #define OP(S, N) case SpvOp##S: return nir_intrinsic_shared_##N;
2573 OP(AtomicExchange, atomic_exchange)
2574 OP(AtomicCompareExchange, atomic_comp_swap)
2575 OP(AtomicIIncrement, atomic_add)
2576 OP(AtomicIDecrement, atomic_add)
2577 OP(AtomicIAdd, atomic_add)
2578 OP(AtomicISub, atomic_add)
2579 OP(AtomicSMin, atomic_imin)
2580 OP(AtomicUMin, atomic_umin)
2581 OP(AtomicSMax, atomic_imax)
2582 OP(AtomicUMax, atomic_umax)
2583 OP(AtomicAnd, atomic_and)
2584 OP(AtomicOr, atomic_or)
2585 OP(AtomicXor, atomic_xor)
2588 vtn_fail("Invalid shared atomic");
2592 static nir_intrinsic_op
2593 get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
2596 case SpvOpAtomicLoad: return nir_intrinsic_load_deref;
2597 case SpvOpAtomicStore: return nir_intrinsic_store_deref;
2598 #define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
2599 OP(AtomicExchange, atomic_exchange)
2600 OP(AtomicCompareExchange, atomic_comp_swap)
2601 OP(AtomicIIncrement, atomic_add)
2602 OP(AtomicIDecrement, atomic_add)
2603 OP(AtomicIAdd, atomic_add)
2604 OP(AtomicISub, atomic_add)
2605 OP(AtomicSMin, atomic_imin)
2606 OP(AtomicUMin, atomic_umin)
2607 OP(AtomicSMax, atomic_imax)
2608 OP(AtomicUMax, atomic_umax)
2609 OP(AtomicAnd, atomic_and)
2610 OP(AtomicOr, atomic_or)
2611 OP(AtomicXor, atomic_xor)
2614 vtn_fail("Invalid shared atomic");
2619 * Handles shared atomics, ssbo atomics and atomic counters.
2622 vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
2623 const uint32_t *w, unsigned count)
2625 struct vtn_pointer *ptr;
2626 nir_intrinsic_instr *atomic;
2629 case SpvOpAtomicLoad:
2630 case SpvOpAtomicExchange:
2631 case SpvOpAtomicCompareExchange:
2632 case SpvOpAtomicCompareExchangeWeak:
2633 case SpvOpAtomicIIncrement:
2634 case SpvOpAtomicIDecrement:
2635 case SpvOpAtomicIAdd:
2636 case SpvOpAtomicISub:
2637 case SpvOpAtomicSMin:
2638 case SpvOpAtomicUMin:
2639 case SpvOpAtomicSMax:
2640 case SpvOpAtomicUMax:
2641 case SpvOpAtomicAnd:
2643 case SpvOpAtomicXor:
2644 ptr = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
2647 case SpvOpAtomicStore:
2648 ptr = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
2652 vtn_fail("Invalid SPIR-V atomic");
2656 SpvScope scope = w[4];
2657 SpvMemorySemanticsMask semantics = w[5];
2660 /* uniform as "atomic counter uniform" */
2661 if (ptr->mode == vtn_variable_mode_uniform) {
2662 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
2663 const struct glsl_type *deref_type = deref->type;
2664 nir_intrinsic_op op = get_uniform_nir_atomic_op(b, opcode);
2665 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
2666 atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
2668 /* SSBO needs to initialize index/offset. In this case we don't need to,
2669 * as that info is already stored on the ptr->var->var nir_variable (see
2670 * vtn_create_variable)
2674 case SpvOpAtomicLoad:
2675 atomic->num_components = glsl_get_vector_elements(deref_type);
2678 case SpvOpAtomicStore:
2679 atomic->num_components = glsl_get_vector_elements(deref_type);
2680 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
2683 case SpvOpAtomicExchange:
2684 case SpvOpAtomicCompareExchange:
2685 case SpvOpAtomicCompareExchangeWeak:
2686 case SpvOpAtomicIIncrement:
2687 case SpvOpAtomicIDecrement:
2688 case SpvOpAtomicIAdd:
2689 case SpvOpAtomicISub:
2690 case SpvOpAtomicSMin:
2691 case SpvOpAtomicUMin:
2692 case SpvOpAtomicSMax:
2693 case SpvOpAtomicUMax:
2694 case SpvOpAtomicAnd:
2696 case SpvOpAtomicXor:
2697 /* Nothing: we don't need to call fill_common_atomic_sources here, as
2698 * atomic counter uniforms doesn't have sources
2703 unreachable("Invalid SPIR-V atomic");
2706 } else if (ptr->mode == vtn_variable_mode_workgroup &&
2707 !b->options->lower_workgroup_access_to_offsets) {
2708 nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
2709 const struct glsl_type *deref_type = deref->type;
2710 nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode);
2711 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
2712 atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
2715 case SpvOpAtomicLoad:
2716 atomic->num_components = glsl_get_vector_elements(deref_type);
2719 case SpvOpAtomicStore:
2720 atomic->num_components = glsl_get_vector_elements(deref_type);
2721 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
2722 atomic->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
2725 case SpvOpAtomicExchange:
2726 case SpvOpAtomicCompareExchange:
2727 case SpvOpAtomicCompareExchangeWeak:
2728 case SpvOpAtomicIIncrement:
2729 case SpvOpAtomicIDecrement:
2730 case SpvOpAtomicIAdd:
2731 case SpvOpAtomicISub:
2732 case SpvOpAtomicSMin:
2733 case SpvOpAtomicUMin:
2734 case SpvOpAtomicSMax:
2735 case SpvOpAtomicUMax:
2736 case SpvOpAtomicAnd:
2738 case SpvOpAtomicXor:
2739 fill_common_atomic_sources(b, opcode, w, &atomic->src[1]);
2743 vtn_fail("Invalid SPIR-V atomic");
2747 nir_ssa_def *offset, *index;
2748 offset = vtn_pointer_to_offset(b, ptr, &index);
2750 nir_intrinsic_op op;
2751 if (ptr->mode == vtn_variable_mode_ssbo) {
2752 op = get_ssbo_nir_atomic_op(b, opcode);
2754 vtn_assert(ptr->mode == vtn_variable_mode_workgroup &&
2755 b->options->lower_workgroup_access_to_offsets);
2756 op = get_shared_nir_atomic_op(b, opcode);
2759 atomic = nir_intrinsic_instr_create(b->nb.shader, op);
2763 case SpvOpAtomicLoad:
2764 atomic->num_components = glsl_get_vector_elements(ptr->type->type);
2765 if (ptr->mode == vtn_variable_mode_ssbo)
2766 atomic->src[src++] = nir_src_for_ssa(index);
2767 atomic->src[src++] = nir_src_for_ssa(offset);
2770 case SpvOpAtomicStore:
2771 atomic->num_components = glsl_get_vector_elements(ptr->type->type);
2772 nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
2773 atomic->src[src++] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
2774 if (ptr->mode == vtn_variable_mode_ssbo)
2775 atomic->src[src++] = nir_src_for_ssa(index);
2776 atomic->src[src++] = nir_src_for_ssa(offset);
2779 case SpvOpAtomicExchange:
2780 case SpvOpAtomicCompareExchange:
2781 case SpvOpAtomicCompareExchangeWeak:
2782 case SpvOpAtomicIIncrement:
2783 case SpvOpAtomicIDecrement:
2784 case SpvOpAtomicIAdd:
2785 case SpvOpAtomicISub:
2786 case SpvOpAtomicSMin:
2787 case SpvOpAtomicUMin:
2788 case SpvOpAtomicSMax:
2789 case SpvOpAtomicUMax:
2790 case SpvOpAtomicAnd:
2792 case SpvOpAtomicXor:
2793 if (ptr->mode == vtn_variable_mode_ssbo)
2794 atomic->src[src++] = nir_src_for_ssa(index);
2795 atomic->src[src++] = nir_src_for_ssa(offset);
2796 fill_common_atomic_sources(b, opcode, w, &atomic->src[src]);
2800 vtn_fail("Invalid SPIR-V atomic");
2804 if (opcode != SpvOpAtomicStore) {
2805 struct vtn_type *type = vtn_value(b, w[1], vtn_value_type_type)->type;
2807 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
2808 glsl_get_vector_elements(type->type),
2809 glsl_get_bit_size(type->type), NULL);
2811 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
2812 val->ssa = rzalloc(b, struct vtn_ssa_value);
2813 val->ssa->def = &atomic->dest.ssa;
2814 val->ssa->type = type->type;
2817 nir_builder_instr_insert(&b->nb, &atomic->instr);
2820 static nir_alu_instr *
2821 create_vec(struct vtn_builder *b, unsigned num_components, unsigned bit_size)
2824 switch (num_components) {
2825 case 1: op = nir_op_imov; break;
2826 case 2: op = nir_op_vec2; break;
2827 case 3: op = nir_op_vec3; break;
2828 case 4: op = nir_op_vec4; break;
2829 default: vtn_fail("bad vector size");
2832 nir_alu_instr *vec = nir_alu_instr_create(b->shader, op);
2833 nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
2835 vec->dest.write_mask = (1 << num_components) - 1;
2840 struct vtn_ssa_value *
2841 vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
2843 if (src->transposed)
2844 return src->transposed;
2846 struct vtn_ssa_value *dest =
2847 vtn_create_ssa_value(b, glsl_transposed_type(src->type));
2849 for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
2850 nir_alu_instr *vec = create_vec(b, glsl_get_matrix_columns(src->type),
2851 glsl_get_bit_size(src->type));
2852 if (glsl_type_is_vector_or_scalar(src->type)) {
2853 vec->src[0].src = nir_src_for_ssa(src->def);
2854 vec->src[0].swizzle[0] = i;
2856 for (unsigned j = 0; j < glsl_get_matrix_columns(src->type); j++) {
2857 vec->src[j].src = nir_src_for_ssa(src->elems[j]->def);
2858 vec->src[j].swizzle[0] = i;
2861 nir_builder_instr_insert(&b->nb, &vec->instr);
2862 dest->elems[i]->def = &vec->dest.dest.ssa;
2865 dest->transposed = src;
2871 vtn_vector_extract(struct vtn_builder *b, nir_ssa_def *src, unsigned index)
2873 return nir_channel(&b->nb, src, index);
2877 vtn_vector_insert(struct vtn_builder *b, nir_ssa_def *src, nir_ssa_def *insert,
2880 nir_alu_instr *vec = create_vec(b, src->num_components,
2883 for (unsigned i = 0; i < src->num_components; i++) {
2885 vec->src[i].src = nir_src_for_ssa(insert);
2887 vec->src[i].src = nir_src_for_ssa(src);
2888 vec->src[i].swizzle[0] = i;
2892 nir_builder_instr_insert(&b->nb, &vec->instr);
2894 return &vec->dest.dest.ssa;
2898 vtn_vector_extract_dynamic(struct vtn_builder *b, nir_ssa_def *src,
2901 nir_ssa_def *dest = vtn_vector_extract(b, src, 0);
2902 for (unsigned i = 1; i < src->num_components; i++)
2903 dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
2904 vtn_vector_extract(b, src, i), dest);
2910 vtn_vector_insert_dynamic(struct vtn_builder *b, nir_ssa_def *src,
2911 nir_ssa_def *insert, nir_ssa_def *index)
2913 nir_ssa_def *dest = vtn_vector_insert(b, src, insert, 0);
2914 for (unsigned i = 1; i < src->num_components; i++)
2915 dest = nir_bcsel(&b->nb, nir_ieq(&b->nb, index, nir_imm_int(&b->nb, i)),
2916 vtn_vector_insert(b, src, insert, i), dest);
2921 static nir_ssa_def *
2922 vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
2923 nir_ssa_def *src0, nir_ssa_def *src1,
2924 const uint32_t *indices)
2926 nir_alu_instr *vec = create_vec(b, num_components, src0->bit_size);
2928 for (unsigned i = 0; i < num_components; i++) {
2929 uint32_t index = indices[i];
2930 if (index == 0xffffffff) {
2932 nir_src_for_ssa(nir_ssa_undef(&b->nb, 1, src0->bit_size));
2933 } else if (index < src0->num_components) {
2934 vec->src[i].src = nir_src_for_ssa(src0);
2935 vec->src[i].swizzle[0] = index;
2937 vec->src[i].src = nir_src_for_ssa(src1);
2938 vec->src[i].swizzle[0] = index - src0->num_components;
2942 nir_builder_instr_insert(&b->nb, &vec->instr);
2944 return &vec->dest.dest.ssa;
2948 * Concatentates a number of vectors/scalars together to produce a vector
2950 static nir_ssa_def *
2951 vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
2952 unsigned num_srcs, nir_ssa_def **srcs)
2954 nir_alu_instr *vec = create_vec(b, num_components, srcs[0]->bit_size);
2956 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2958 * "When constructing a vector, there must be at least two Constituent
2961 vtn_assert(num_srcs >= 2);
2963 unsigned dest_idx = 0;
2964 for (unsigned i = 0; i < num_srcs; i++) {
2965 nir_ssa_def *src = srcs[i];
2966 vtn_assert(dest_idx + src->num_components <= num_components);
2967 for (unsigned j = 0; j < src->num_components; j++) {
2968 vec->src[dest_idx].src = nir_src_for_ssa(src);
2969 vec->src[dest_idx].swizzle[0] = j;
2974 /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
2976 * "When constructing a vector, the total number of components in all
2977 * the operands must equal the number of components in Result Type."
2979 vtn_assert(dest_idx == num_components);
2981 nir_builder_instr_insert(&b->nb, &vec->instr);
2983 return &vec->dest.dest.ssa;
2986 static struct vtn_ssa_value *
2987 vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
2989 struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
2990 dest->type = src->type;
2992 if (glsl_type_is_vector_or_scalar(src->type)) {
2993 dest->def = src->def;
2995 unsigned elems = glsl_get_length(src->type);
2997 dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
2998 for (unsigned i = 0; i < elems; i++)
2999 dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
3005 static struct vtn_ssa_value *
3006 vtn_composite_insert(struct vtn_builder *b, struct vtn_ssa_value *src,
3007 struct vtn_ssa_value *insert, const uint32_t *indices,
3008 unsigned num_indices)
3010 struct vtn_ssa_value *dest = vtn_composite_copy(b, src);
3012 struct vtn_ssa_value *cur = dest;
3014 for (i = 0; i < num_indices - 1; i++) {
3015 cur = cur->elems[indices[i]];
3018 if (glsl_type_is_vector_or_scalar(cur->type)) {
3019 /* According to the SPIR-V spec, OpCompositeInsert may work down to
3020 * the component granularity. In that case, the last index will be
3021 * the index to insert the scalar into the vector.
3024 cur->def = vtn_vector_insert(b, cur->def, insert->def, indices[i]);
3026 cur->elems[indices[i]] = insert;
3032 static struct vtn_ssa_value *
3033 vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src,
3034 const uint32_t *indices, unsigned num_indices)
3036 struct vtn_ssa_value *cur = src;
3037 for (unsigned i = 0; i < num_indices; i++) {
3038 if (glsl_type_is_vector_or_scalar(cur->type)) {
3039 vtn_assert(i == num_indices - 1);
3040 /* According to the SPIR-V spec, OpCompositeExtract may work down to
3041 * the component granularity. The last index will be the index of the
3042 * vector to extract.
3045 struct vtn_ssa_value *ret = rzalloc(b, struct vtn_ssa_value);
3046 ret->type = glsl_scalar_type(glsl_get_base_type(cur->type));
3047 ret->def = vtn_vector_extract(b, cur->def, indices[i]);
3050 cur = cur->elems[indices[i]];
3058 vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
3059 const uint32_t *w, unsigned count)
3061 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_ssa);
3062 const struct glsl_type *type =
3063 vtn_value(b, w[1], vtn_value_type_type)->type->type;
3064 val->ssa = vtn_create_ssa_value(b, type);
3067 case SpvOpVectorExtractDynamic:
3068 val->ssa->def = vtn_vector_extract_dynamic(b, vtn_ssa_value(b, w[3])->def,
3069 vtn_ssa_value(b, w[4])->def);
3072 case SpvOpVectorInsertDynamic:
3073 val->ssa->def = vtn_vector_insert_dynamic(b, vtn_ssa_value(b, w[3])->def,
3074 vtn_ssa_value(b, w[4])->def,
3075 vtn_ssa_value(b, w[5])->def);
3078 case SpvOpVectorShuffle:
3079 val->ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type),
3080 vtn_ssa_value(b, w[3])->def,
3081 vtn_ssa_value(b, w[4])->def,
3085 case SpvOpCompositeConstruct: {
3086 unsigned elems = count - 3;
3088 if (glsl_type_is_vector_or_scalar(type)) {
3089 nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS];
3090 for (unsigned i = 0; i < elems; i++)
3091 srcs[i] = vtn_ssa_value(b, w[3 + i])->def;
3093 vtn_vector_construct(b, glsl_get_vector_elements(type),
3096 val->ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
3097 for (unsigned i = 0; i < elems; i++)
3098 val->ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
3102 case SpvOpCompositeExtract:
3103 val->ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
3107 case SpvOpCompositeInsert:
3108 val->ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
3109 vtn_ssa_value(b, w[3]),
3113 case SpvOpCopyObject:
3114 val->ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
3118 vtn_fail("unknown composite operation");
3123 vtn_emit_barrier(struct vtn_builder *b, nir_intrinsic_op op)
3125 nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
3126 nir_builder_instr_insert(&b->nb, &intrin->instr);
3130 vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
3131 SpvMemorySemanticsMask semantics)
3133 static const SpvMemorySemanticsMask all_memory_semantics =
3134 SpvMemorySemanticsUniformMemoryMask |
3135 SpvMemorySemanticsWorkgroupMemoryMask |
3136 SpvMemorySemanticsAtomicCounterMemoryMask |
3137 SpvMemorySemanticsImageMemoryMask;
3139 /* If we're not actually doing a memory barrier, bail */
3140 if (!(semantics & all_memory_semantics))
3143 /* GL and Vulkan don't have these */
3144 vtn_assert(scope != SpvScopeCrossDevice);
3146 if (scope == SpvScopeSubgroup)
3147 return; /* Nothing to do here */
3149 if (scope == SpvScopeWorkgroup) {
3150 vtn_emit_barrier(b, nir_intrinsic_group_memory_barrier);
3154 /* There's only two scopes thing left */
3155 vtn_assert(scope == SpvScopeInvocation || scope == SpvScopeDevice);
3157 if ((semantics & all_memory_semantics) == all_memory_semantics) {
3158 vtn_emit_barrier(b, nir_intrinsic_memory_barrier);
3162 /* Issue a bunch of more specific barriers */
3163 uint32_t bits = semantics;
3165 SpvMemorySemanticsMask semantic = 1 << u_bit_scan(&bits);
3167 case SpvMemorySemanticsUniformMemoryMask:
3168 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_buffer);
3170 case SpvMemorySemanticsWorkgroupMemoryMask:
3171 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_shared);
3173 case SpvMemorySemanticsAtomicCounterMemoryMask:
3174 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_atomic_counter);
3176 case SpvMemorySemanticsImageMemoryMask:
3177 vtn_emit_barrier(b, nir_intrinsic_memory_barrier_image);
3186 vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
3187 const uint32_t *w, unsigned count)
3190 case SpvOpEmitVertex:
3191 case SpvOpEmitStreamVertex:
3192 case SpvOpEndPrimitive:
3193 case SpvOpEndStreamPrimitive: {
3194 nir_intrinsic_op intrinsic_op;
3196 case SpvOpEmitVertex:
3197 case SpvOpEmitStreamVertex:
3198 intrinsic_op = nir_intrinsic_emit_vertex;
3200 case SpvOpEndPrimitive:
3201 case SpvOpEndStreamPrimitive:
3202 intrinsic_op = nir_intrinsic_end_primitive;
3205 unreachable("Invalid opcode");
3208 nir_intrinsic_instr *intrin =
3209 nir_intrinsic_instr_create(b->shader, intrinsic_op);
3212 case SpvOpEmitStreamVertex:
3213 case SpvOpEndStreamPrimitive: {
3214 unsigned stream = vtn_constant_value(b, w[1])->values[0].u32[0];
3215 nir_intrinsic_set_stream_id(intrin, stream);
3223 nir_builder_instr_insert(&b->nb, &intrin->instr);
3227 case SpvOpMemoryBarrier: {
3228 SpvScope scope = vtn_constant_value(b, w[1])->values[0].u32[0];
3229 SpvMemorySemanticsMask semantics =
3230 vtn_constant_value(b, w[2])->values[0].u32[0];
3231 vtn_emit_memory_barrier(b, scope, semantics);
3235 case SpvOpControlBarrier: {
3236 SpvScope execution_scope =
3237 vtn_constant_value(b, w[1])->values[0].u32[0];
3238 if (execution_scope == SpvScopeWorkgroup)
3239 vtn_emit_barrier(b, nir_intrinsic_barrier);
3241 SpvScope memory_scope =
3242 vtn_constant_value(b, w[2])->values[0].u32[0];
3243 SpvMemorySemanticsMask memory_semantics =
3244 vtn_constant_value(b, w[3])->values[0].u32[0];
3245 vtn_emit_memory_barrier(b, memory_scope, memory_semantics);
3250 unreachable("unknown barrier instruction");
3255 gl_primitive_from_spv_execution_mode(struct vtn_builder *b,
3256 SpvExecutionMode mode)
3259 case SpvExecutionModeInputPoints:
3260 case SpvExecutionModeOutputPoints:
3261 return 0; /* GL_POINTS */
3262 case SpvExecutionModeInputLines:
3263 return 1; /* GL_LINES */
3264 case SpvExecutionModeInputLinesAdjacency:
3265 return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
3266 case SpvExecutionModeTriangles:
3267 return 4; /* GL_TRIANGLES */
3268 case SpvExecutionModeInputTrianglesAdjacency:
3269 return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
3270 case SpvExecutionModeQuads:
3271 return 7; /* GL_QUADS */
3272 case SpvExecutionModeIsolines:
3273 return 0x8E7A; /* GL_ISOLINES */
3274 case SpvExecutionModeOutputLineStrip:
3275 return 3; /* GL_LINE_STRIP */
3276 case SpvExecutionModeOutputTriangleStrip:
3277 return 5; /* GL_TRIANGLE_STRIP */
3279 vtn_fail("Invalid primitive type");
3284 vertices_in_from_spv_execution_mode(struct vtn_builder *b,
3285 SpvExecutionMode mode)
3288 case SpvExecutionModeInputPoints:
3290 case SpvExecutionModeInputLines:
3292 case SpvExecutionModeInputLinesAdjacency:
3294 case SpvExecutionModeTriangles:
3296 case SpvExecutionModeInputTrianglesAdjacency:
3299 vtn_fail("Invalid GS input mode");
3303 static gl_shader_stage
3304 stage_for_execution_model(struct vtn_builder *b, SpvExecutionModel model)
3307 case SpvExecutionModelVertex:
3308 return MESA_SHADER_VERTEX;
3309 case SpvExecutionModelTessellationControl:
3310 return MESA_SHADER_TESS_CTRL;
3311 case SpvExecutionModelTessellationEvaluation:
3312 return MESA_SHADER_TESS_EVAL;
3313 case SpvExecutionModelGeometry:
3314 return MESA_SHADER_GEOMETRY;
3315 case SpvExecutionModelFragment:
3316 return MESA_SHADER_FRAGMENT;
3317 case SpvExecutionModelGLCompute:
3318 return MESA_SHADER_COMPUTE;
3320 vtn_fail("Unsupported execution model");
3324 #define spv_check_supported(name, cap) do { \
3325 if (!(b->options && b->options->caps.name)) \
3326 vtn_warn("Unsupported SPIR-V capability: %s", \
3327 spirv_capability_to_string(cap)); \
3332 vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w,
3335 struct vtn_value *entry_point = &b->values[w[2]];
3336 /* Let this be a name label regardless */
3337 unsigned name_words;
3338 entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words);
3340 if (strcmp(entry_point->name, b->entry_point_name) != 0 ||
3341 stage_for_execution_model(b, w[1]) != b->entry_point_stage)
3344 vtn_assert(b->entry_point == NULL);
3345 b->entry_point = entry_point;
3349 vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
3350 const uint32_t *w, unsigned count)
3357 case SpvSourceLanguageUnknown: lang = "unknown"; break;
3358 case SpvSourceLanguageESSL: lang = "ESSL"; break;
3359 case SpvSourceLanguageGLSL: lang = "GLSL"; break;
3360 case SpvSourceLanguageOpenCL_C: lang = "OpenCL C"; break;
3361 case SpvSourceLanguageOpenCL_CPP: lang = "OpenCL C++"; break;
3362 case SpvSourceLanguageHLSL: lang = "HLSL"; break;
3365 uint32_t version = w[2];
3368 (count > 3) ? vtn_value(b, w[3], vtn_value_type_string)->str : "";
3370 vtn_info("Parsing SPIR-V from %s %u source file %s", lang, version, file);
3374 case SpvOpSourceExtension:
3375 case SpvOpSourceContinued:
3376 case SpvOpExtension:
3377 case SpvOpModuleProcessed:
3378 /* Unhandled, but these are for debug so that's ok. */
3381 case SpvOpCapability: {
3382 SpvCapability cap = w[1];
3384 case SpvCapabilityMatrix:
3385 case SpvCapabilityShader:
3386 case SpvCapabilityGeometry:
3387 case SpvCapabilityGeometryPointSize:
3388 case SpvCapabilityUniformBufferArrayDynamicIndexing:
3389 case SpvCapabilitySampledImageArrayDynamicIndexing:
3390 case SpvCapabilityStorageBufferArrayDynamicIndexing:
3391 case SpvCapabilityStorageImageArrayDynamicIndexing:
3392 case SpvCapabilityImageRect:
3393 case SpvCapabilitySampledRect:
3394 case SpvCapabilitySampled1D:
3395 case SpvCapabilityImage1D:
3396 case SpvCapabilitySampledCubeArray:
3397 case SpvCapabilityImageCubeArray:
3398 case SpvCapabilitySampledBuffer:
3399 case SpvCapabilityImageBuffer:
3400 case SpvCapabilityImageQuery:
3401 case SpvCapabilityDerivativeControl:
3402 case SpvCapabilityInterpolationFunction:
3403 case SpvCapabilityMultiViewport:
3404 case SpvCapabilitySampleRateShading:
3405 case SpvCapabilityClipDistance:
3406 case SpvCapabilityCullDistance:
3407 case SpvCapabilityInputAttachment:
3408 case SpvCapabilityImageGatherExtended:
3409 case SpvCapabilityStorageImageExtendedFormats:
3412 case SpvCapabilityLinkage:
3413 case SpvCapabilityVector16:
3414 case SpvCapabilityFloat16Buffer:
3415 case SpvCapabilityFloat16:
3416 case SpvCapabilityInt64Atomics:
3417 case SpvCapabilityStorageImageMultisample:
3418 case SpvCapabilityInt8:
3419 case SpvCapabilitySparseResidency:
3420 case SpvCapabilityMinLod:
3421 vtn_warn("Unsupported SPIR-V capability: %s",
3422 spirv_capability_to_string(cap));
3425 case SpvCapabilityAtomicStorage:
3426 spv_check_supported(atomic_storage, cap);
3429 case SpvCapabilityFloat64:
3430 spv_check_supported(float64, cap);
3432 case SpvCapabilityInt64:
3433 spv_check_supported(int64, cap);
3435 case SpvCapabilityInt16:
3436 spv_check_supported(int16, cap);
3439 case SpvCapabilityTransformFeedback:
3440 spv_check_supported(transform_feedback, cap);
3443 case SpvCapabilityGeometryStreams:
3444 spv_check_supported(geometry_streams, cap);
3447 case SpvCapabilityAddresses:
3448 case SpvCapabilityKernel:
3449 case SpvCapabilityImageBasic:
3450 case SpvCapabilityImageReadWrite:
3451 case SpvCapabilityImageMipmap:
3452 case SpvCapabilityPipes:
3453 case SpvCapabilityGroups:
3454 case SpvCapabilityDeviceEnqueue:
3455 case SpvCapabilityLiteralSampler:
3456 case SpvCapabilityGenericPointer:
3457 vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
3458 spirv_capability_to_string(cap));
3461 case SpvCapabilityImageMSArray:
3462 spv_check_supported(image_ms_array, cap);
3465 case SpvCapabilityTessellation:
3466 case SpvCapabilityTessellationPointSize:
3467 spv_check_supported(tessellation, cap);
3470 case SpvCapabilityDrawParameters:
3471 spv_check_supported(draw_parameters, cap);
3474 case SpvCapabilityStorageImageReadWithoutFormat:
3475 spv_check_supported(image_read_without_format, cap);
3478 case SpvCapabilityStorageImageWriteWithoutFormat:
3479 spv_check_supported(image_write_without_format, cap);
3482 case SpvCapabilityDeviceGroup:
3483 spv_check_supported(device_group, cap);
3486 case SpvCapabilityMultiView:
3487 spv_check_supported(multiview, cap);
3490 case SpvCapabilityGroupNonUniform:
3491 spv_check_supported(subgroup_basic, cap);
3494 case SpvCapabilityGroupNonUniformVote:
3495 spv_check_supported(subgroup_vote, cap);
3498 case SpvCapabilitySubgroupBallotKHR:
3499 case SpvCapabilityGroupNonUniformBallot:
3500 spv_check_supported(subgroup_ballot, cap);
3503 case SpvCapabilityGroupNonUniformShuffle:
3504 case SpvCapabilityGroupNonUniformShuffleRelative:
3505 spv_check_supported(subgroup_shuffle, cap);
3508 case SpvCapabilityGroupNonUniformQuad:
3509 spv_check_supported(subgroup_quad, cap);
3512 case SpvCapabilityGroupNonUniformArithmetic:
3513 case SpvCapabilityGroupNonUniformClustered:
3514 spv_check_supported(subgroup_arithmetic, cap);
3517 case SpvCapabilityVariablePointersStorageBuffer:
3518 case SpvCapabilityVariablePointers:
3519 spv_check_supported(variable_pointers, cap);
3522 case SpvCapabilityStorageUniformBufferBlock16:
3523 case SpvCapabilityStorageUniform16:
3524 case SpvCapabilityStoragePushConstant16:
3525 case SpvCapabilityStorageInputOutput16:
3526 spv_check_supported(storage_16bit, cap);
3529 case SpvCapabilityShaderViewportIndexLayerEXT:
3530 spv_check_supported(shader_viewport_index_layer, cap);
3533 case SpvCapabilityStorageBuffer8BitAccess:
3534 case SpvCapabilityUniformAndStorageBuffer8BitAccess:
3535 case SpvCapabilityStoragePushConstant8:
3536 spv_check_supported(storage_8bit, cap);
3539 case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT:
3540 case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT:
3541 case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT:
3542 spv_check_supported(descriptor_array_dynamic_indexing, cap);
3545 case SpvCapabilityRuntimeDescriptorArrayEXT:
3546 spv_check_supported(runtime_descriptor_array, cap);
3549 case SpvCapabilityStencilExportEXT:
3550 spv_check_supported(stencil_export, cap);
3553 case SpvCapabilitySampleMaskPostDepthCoverage:
3554 spv_check_supported(post_depth_coverage, cap);
3558 vtn_fail("Unhandled capability");
3563 case SpvOpExtInstImport:
3564 vtn_handle_extension(b, opcode, w, count);
3567 case SpvOpMemoryModel:
3568 vtn_assert(w[1] == SpvAddressingModelLogical);
3569 vtn_assert(w[2] == SpvMemoryModelSimple ||
3570 w[2] == SpvMemoryModelGLSL450);
3573 case SpvOpEntryPoint:
3574 vtn_handle_entry_point(b, w, count);
3578 vtn_push_value(b, w[1], vtn_value_type_string)->str =
3579 vtn_string_literal(b, &w[2], count - 2, NULL);
3583 b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2, NULL);
3586 case SpvOpMemberName:
3590 case SpvOpExecutionMode:
3591 case SpvOpDecorationGroup:
3593 case SpvOpMemberDecorate:
3594 case SpvOpGroupDecorate:
3595 case SpvOpGroupMemberDecorate:
3596 case SpvOpDecorateStringGOOGLE:
3597 case SpvOpMemberDecorateStringGOOGLE:
3598 vtn_handle_decoration(b, opcode, w, count);
3602 return false; /* End of preamble */
3609 vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
3610 const struct vtn_decoration *mode, void *data)
3612 vtn_assert(b->entry_point == entry_point);
3614 switch(mode->exec_mode) {
3615 case SpvExecutionModeOriginUpperLeft:
3616 case SpvExecutionModeOriginLowerLeft:
3617 b->origin_upper_left =
3618 (mode->exec_mode == SpvExecutionModeOriginUpperLeft);
3621 case SpvExecutionModeEarlyFragmentTests:
3622 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3623 b->shader->info.fs.early_fragment_tests = true;
3626 case SpvExecutionModePostDepthCoverage:
3627 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3628 b->shader->info.fs.post_depth_coverage = true;
3631 case SpvExecutionModeInvocations:
3632 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
3633 b->shader->info.gs.invocations = MAX2(1, mode->literals[0]);
3636 case SpvExecutionModeDepthReplacing:
3637 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3638 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
3640 case SpvExecutionModeDepthGreater:
3641 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3642 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
3644 case SpvExecutionModeDepthLess:
3645 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3646 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
3648 case SpvExecutionModeDepthUnchanged:
3649 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3650 b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
3653 case SpvExecutionModeLocalSize:
3654 vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
3655 b->shader->info.cs.local_size[0] = mode->literals[0];
3656 b->shader->info.cs.local_size[1] = mode->literals[1];
3657 b->shader->info.cs.local_size[2] = mode->literals[2];
3659 case SpvExecutionModeLocalSizeHint:
3660 break; /* Nothing to do with this */
3662 case SpvExecutionModeOutputVertices:
3663 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3664 b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
3665 b->shader->info.tess.tcs_vertices_out = mode->literals[0];
3667 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
3668 b->shader->info.gs.vertices_out = mode->literals[0];
3672 case SpvExecutionModeInputPoints:
3673 case SpvExecutionModeInputLines:
3674 case SpvExecutionModeInputLinesAdjacency:
3675 case SpvExecutionModeTriangles:
3676 case SpvExecutionModeInputTrianglesAdjacency:
3677 case SpvExecutionModeQuads:
3678 case SpvExecutionModeIsolines:
3679 if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3680 b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
3681 b->shader->info.tess.primitive_mode =
3682 gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
3684 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
3685 b->shader->info.gs.vertices_in =
3686 vertices_in_from_spv_execution_mode(b, mode->exec_mode);
3687 b->shader->info.gs.input_primitive =
3688 gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
3692 case SpvExecutionModeOutputPoints:
3693 case SpvExecutionModeOutputLineStrip:
3694 case SpvExecutionModeOutputTriangleStrip:
3695 vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
3696 b->shader->info.gs.output_primitive =
3697 gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
3700 case SpvExecutionModeSpacingEqual:
3701 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3702 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3703 b->shader->info.tess.spacing = TESS_SPACING_EQUAL;
3705 case SpvExecutionModeSpacingFractionalEven:
3706 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3707 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3708 b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
3710 case SpvExecutionModeSpacingFractionalOdd:
3711 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3712 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3713 b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
3715 case SpvExecutionModeVertexOrderCw:
3716 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3717 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3718 b->shader->info.tess.ccw = false;
3720 case SpvExecutionModeVertexOrderCcw:
3721 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3722 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3723 b->shader->info.tess.ccw = true;
3725 case SpvExecutionModePointMode:
3726 vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
3727 b->shader->info.stage == MESA_SHADER_TESS_EVAL);
3728 b->shader->info.tess.point_mode = true;
3731 case SpvExecutionModePixelCenterInteger:
3732 b->pixel_center_integer = true;
3735 case SpvExecutionModeXfb:
3736 b->shader->info.has_transform_feedback_varyings = true;
3739 case SpvExecutionModeVecTypeHint:
3740 case SpvExecutionModeContractionOff:
3743 case SpvExecutionModeStencilRefReplacingEXT:
3744 vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
3748 vtn_fail("Unhandled execution mode");
3753 vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode,
3754 const uint32_t *w, unsigned count)
3756 vtn_set_instruction_result_type(b, opcode, w, count);
3760 case SpvOpSourceContinued:
3761 case SpvOpSourceExtension:
3762 case SpvOpExtension:
3763 case SpvOpCapability:
3764 case SpvOpExtInstImport:
3765 case SpvOpMemoryModel:
3766 case SpvOpEntryPoint:
3767 case SpvOpExecutionMode:
3770 case SpvOpMemberName:
3771 case SpvOpDecorationGroup:
3773 case SpvOpMemberDecorate:
3774 case SpvOpGroupDecorate:
3775 case SpvOpGroupMemberDecorate:
3776 case SpvOpDecorateStringGOOGLE:
3777 case SpvOpMemberDecorateStringGOOGLE:
3778 vtn_fail("Invalid opcode types and variables section");
3784 case SpvOpTypeFloat:
3785 case SpvOpTypeVector:
3786 case SpvOpTypeMatrix:
3787 case SpvOpTypeImage:
3788 case SpvOpTypeSampler:
3789 case SpvOpTypeSampledImage:
3790 case SpvOpTypeArray:
3791 case SpvOpTypeRuntimeArray:
3792 case SpvOpTypeStruct:
3793 case SpvOpTypeOpaque:
3794 case SpvOpTypePointer:
3795 case SpvOpTypeFunction:
3796 case SpvOpTypeEvent:
3797 case SpvOpTypeDeviceEvent:
3798 case SpvOpTypeReserveId:
3799 case SpvOpTypeQueue:
3801 vtn_handle_type(b, opcode, w, count);
3804 case SpvOpConstantTrue:
3805 case SpvOpConstantFalse:
3807 case SpvOpConstantComposite:
3808 case SpvOpConstantSampler:
3809 case SpvOpConstantNull:
3810 case SpvOpSpecConstantTrue:
3811 case SpvOpSpecConstantFalse:
3812 case SpvOpSpecConstant:
3813 case SpvOpSpecConstantComposite:
3814 case SpvOpSpecConstantOp:
3815 vtn_handle_constant(b, opcode, w, count);
3820 vtn_handle_variables(b, opcode, w, count);
3824 return false; /* End of preamble */
3831 vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
3832 const uint32_t *w, unsigned count)
3838 case SpvOpLoopMerge:
3839 case SpvOpSelectionMerge:
3840 /* This is handled by cfg pre-pass and walk_blocks */
3844 struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
3845 val->type = vtn_value(b, w[1], vtn_value_type_type)->type;
3850 vtn_handle_extension(b, opcode, w, count);
3856 case SpvOpCopyMemory:
3857 case SpvOpCopyMemorySized:
3858 case SpvOpAccessChain:
3859 case SpvOpPtrAccessChain:
3860 case SpvOpInBoundsAccessChain:
3861 case SpvOpArrayLength:
3862 vtn_handle_variables(b, opcode, w, count);
3865 case SpvOpFunctionCall:
3866 vtn_handle_function_call(b, opcode, w, count);
3869 case SpvOpSampledImage:
3871 case SpvOpImageSampleImplicitLod:
3872 case SpvOpImageSampleExplicitLod:
3873 case SpvOpImageSampleDrefImplicitLod:
3874 case SpvOpImageSampleDrefExplicitLod:
3875 case SpvOpImageSampleProjImplicitLod:
3876 case SpvOpImageSampleProjExplicitLod:
3877 case SpvOpImageSampleProjDrefImplicitLod:
3878 case SpvOpImageSampleProjDrefExplicitLod:
3879 case SpvOpImageFetch:
3880 case SpvOpImageGather:
3881 case SpvOpImageDrefGather:
3882 case SpvOpImageQuerySizeLod:
3883 case SpvOpImageQueryLod:
3884 case SpvOpImageQueryLevels:
3885 case SpvOpImageQuerySamples:
3886 vtn_handle_texture(b, opcode, w, count);
3889 case SpvOpImageRead:
3890 case SpvOpImageWrite:
3891 case SpvOpImageTexelPointer:
3892 vtn_handle_image(b, opcode, w, count);
3895 case SpvOpImageQuerySize: {
3896 struct vtn_pointer *image =
3897 vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
3898 if (glsl_type_is_image(image->type->type)) {
3899 vtn_handle_image(b, opcode, w, count);
3901 vtn_assert(glsl_type_is_sampler(image->type->type));
3902 vtn_handle_texture(b, opcode, w, count);
3907 case SpvOpAtomicLoad:
3908 case SpvOpAtomicExchange:
3909 case SpvOpAtomicCompareExchange:
3910 case SpvOpAtomicCompareExchangeWeak:
3911 case SpvOpAtomicIIncrement:
3912 case SpvOpAtomicIDecrement:
3913 case SpvOpAtomicIAdd:
3914 case SpvOpAtomicISub:
3915 case SpvOpAtomicSMin:
3916 case SpvOpAtomicUMin:
3917 case SpvOpAtomicSMax:
3918 case SpvOpAtomicUMax:
3919 case SpvOpAtomicAnd:
3921 case SpvOpAtomicXor: {
3922 struct vtn_value *pointer = vtn_untyped_value(b, w[3]);
3923 if (pointer->value_type == vtn_value_type_image_pointer) {
3924 vtn_handle_image(b, opcode, w, count);
3926 vtn_assert(pointer->value_type == vtn_value_type_pointer);
3927 vtn_handle_atomics(b, opcode, w, count);
3932 case SpvOpAtomicStore: {
3933 struct vtn_value *pointer = vtn_untyped_value(b, w[1]);
3934 if (pointer->value_type == vtn_value_type_image_pointer) {
3935 vtn_handle_image(b, opcode, w, count);
3937 vtn_assert(pointer->value_type == vtn_value_type_pointer);
3938 vtn_handle_atomics(b, opcode, w, count);
3944 /* Handle OpSelect up-front here because it needs to be able to handle
3945 * pointers and not just regular vectors and scalars.
3947 struct vtn_value *res_val = vtn_untyped_value(b, w[2]);
3948 struct vtn_value *sel_val = vtn_untyped_value(b, w[3]);
3949 struct vtn_value *obj1_val = vtn_untyped_value(b, w[4]);
3950 struct vtn_value *obj2_val = vtn_untyped_value(b, w[5]);
3952 const struct glsl_type *sel_type;
3953 switch (res_val->type->base_type) {
3954 case vtn_base_type_scalar:
3955 sel_type = glsl_bool_type();
3957 case vtn_base_type_vector:
3958 sel_type = glsl_vector_type(GLSL_TYPE_BOOL, res_val->type->length);
3960 case vtn_base_type_pointer:
3961 /* We need to have actual storage for pointer types */
3962 vtn_fail_if(res_val->type->type == NULL,
3963 "Invalid pointer result type for OpSelect");
3964 sel_type = glsl_bool_type();
3967 vtn_fail("Result type of OpSelect must be a scalar, vector, or pointer");
3970 if (unlikely(sel_val->type->type != sel_type)) {
3971 if (sel_val->type->type == glsl_bool_type()) {
3972 /* This case is illegal but some older versions of GLSLang produce
3973 * it. The GLSLang issue was fixed on March 30, 2017:
3975 * https://github.com/KhronosGroup/glslang/issues/809
3977 * Unfortunately, there are applications in the wild which are
3978 * shipping with this bug so it isn't nice to fail on them so we
3979 * throw a warning instead. It's not actually a problem for us as
3980 * nir_builder will just splat the condition out which is most
3981 * likely what the client wanted anyway.
3983 vtn_warn("Condition type of OpSelect must have the same number "
3984 "of components as Result Type");
3986 vtn_fail("Condition type of OpSelect must be a scalar or vector "
3987 "of Boolean type. It must have the same number of "
3988 "components as Result Type");
3992 vtn_fail_if(obj1_val->type != res_val->type ||
3993 obj2_val->type != res_val->type,
3994 "Object types must match the result type in OpSelect");
3996 struct vtn_type *res_type = vtn_value(b, w[1], vtn_value_type_type)->type;
3997 struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, res_type->type);
3998 ssa->def = nir_bcsel(&b->nb, vtn_ssa_value(b, w[3])->def,
3999 vtn_ssa_value(b, w[4])->def,
4000 vtn_ssa_value(b, w[5])->def);
4001 vtn_push_ssa(b, w[2], res_type, ssa);
4010 case SpvOpConvertFToU:
4011 case SpvOpConvertFToS:
4012 case SpvOpConvertSToF:
4013 case SpvOpConvertUToF:
4017 case SpvOpQuantizeToF16:
4018 case SpvOpConvertPtrToU:
4019 case SpvOpConvertUToPtr:
4020 case SpvOpPtrCastToGeneric:
4021 case SpvOpGenericCastToPtr:
4027 case SpvOpSignBitSet:
4028 case SpvOpLessOrGreater:
4030 case SpvOpUnordered:
4045 case SpvOpVectorTimesScalar:
4047 case SpvOpIAddCarry:
4048 case SpvOpISubBorrow:
4049 case SpvOpUMulExtended:
4050 case SpvOpSMulExtended:
4051 case SpvOpShiftRightLogical:
4052 case SpvOpShiftRightArithmetic:
4053 case SpvOpShiftLeftLogical:
4054 case SpvOpLogicalEqual:
4055 case SpvOpLogicalNotEqual:
4056 case SpvOpLogicalOr:
4057 case SpvOpLogicalAnd:
4058 case SpvOpLogicalNot:
4059 case SpvOpBitwiseOr:
4060 case SpvOpBitwiseXor:
4061 case SpvOpBitwiseAnd:
4063 case SpvOpFOrdEqual:
4064 case SpvOpFUnordEqual:
4065 case SpvOpINotEqual:
4066 case SpvOpFOrdNotEqual:
4067 case SpvOpFUnordNotEqual:
4068 case SpvOpULessThan:
4069 case SpvOpSLessThan:
4070 case SpvOpFOrdLessThan:
4071 case SpvOpFUnordLessThan:
4072 case SpvOpUGreaterThan:
4073 case SpvOpSGreaterThan:
4074 case SpvOpFOrdGreaterThan:
4075 case SpvOpFUnordGreaterThan:
4076 case SpvOpULessThanEqual:
4077 case SpvOpSLessThanEqual:
4078 case SpvOpFOrdLessThanEqual:
4079 case SpvOpFUnordLessThanEqual:
4080 case SpvOpUGreaterThanEqual:
4081 case SpvOpSGreaterThanEqual:
4082 case SpvOpFOrdGreaterThanEqual:
4083 case SpvOpFUnordGreaterThanEqual:
4089 case SpvOpFwidthFine:
4090 case SpvOpDPdxCoarse:
4091 case SpvOpDPdyCoarse:
4092 case SpvOpFwidthCoarse:
4093 case SpvOpBitFieldInsert:
4094 case SpvOpBitFieldSExtract:
4095 case SpvOpBitFieldUExtract:
4096 case SpvOpBitReverse:
4098 case SpvOpTranspose:
4099 case SpvOpOuterProduct:
4100 case SpvOpMatrixTimesScalar:
4101 case SpvOpVectorTimesMatrix:
4102 case SpvOpMatrixTimesVector:
4103 case SpvOpMatrixTimesMatrix:
4104 vtn_handle_alu(b, opcode, w, count);
4107 case SpvOpVectorExtractDynamic:
4108 case SpvOpVectorInsertDynamic:
4109 case SpvOpVectorShuffle:
4110 case SpvOpCompositeConstruct:
4111 case SpvOpCompositeExtract:
4112 case SpvOpCompositeInsert:
4113 case SpvOpCopyObject:
4114 vtn_handle_composite(b, opcode, w, count);
4117 case SpvOpEmitVertex:
4118 case SpvOpEndPrimitive:
4119 case SpvOpEmitStreamVertex:
4120 case SpvOpEndStreamPrimitive:
4121 case SpvOpControlBarrier:
4122 case SpvOpMemoryBarrier:
4123 vtn_handle_barrier(b, opcode, w, count);
4126 case SpvOpGroupNonUniformElect:
4127 case SpvOpGroupNonUniformAll:
4128 case SpvOpGroupNonUniformAny:
4129 case SpvOpGroupNonUniformAllEqual:
4130 case SpvOpGroupNonUniformBroadcast:
4131 case SpvOpGroupNonUniformBroadcastFirst:
4132 case SpvOpGroupNonUniformBallot:
4133 case SpvOpGroupNonUniformInverseBallot:
4134 case SpvOpGroupNonUniformBallotBitExtract:
4135 case SpvOpGroupNonUniformBallotBitCount:
4136 case SpvOpGroupNonUniformBallotFindLSB:
4137 case SpvOpGroupNonUniformBallotFindMSB:
4138 case SpvOpGroupNonUniformShuffle:
4139 case SpvOpGroupNonUniformShuffleXor:
4140 case SpvOpGroupNonUniformShuffleUp:
4141 case SpvOpGroupNonUniformShuffleDown:
4142 case SpvOpGroupNonUniformIAdd:
4143 case SpvOpGroupNonUniformFAdd:
4144 case SpvOpGroupNonUniformIMul:
4145 case SpvOpGroupNonUniformFMul:
4146 case SpvOpGroupNonUniformSMin:
4147 case SpvOpGroupNonUniformUMin:
4148 case SpvOpGroupNonUniformFMin:
4149 case SpvOpGroupNonUniformSMax:
4150 case SpvOpGroupNonUniformUMax:
4151 case SpvOpGroupNonUniformFMax:
4152 case SpvOpGroupNonUniformBitwiseAnd:
4153 case SpvOpGroupNonUniformBitwiseOr:
4154 case SpvOpGroupNonUniformBitwiseXor:
4155 case SpvOpGroupNonUniformLogicalAnd:
4156 case SpvOpGroupNonUniformLogicalOr:
4157 case SpvOpGroupNonUniformLogicalXor:
4158 case SpvOpGroupNonUniformQuadBroadcast:
4159 case SpvOpGroupNonUniformQuadSwap:
4160 vtn_handle_subgroup(b, opcode, w, count);
4164 vtn_fail("Unhandled opcode");
4171 vtn_create_builder(const uint32_t *words, size_t word_count,
4172 gl_shader_stage stage, const char *entry_point_name,
4173 const struct spirv_to_nir_options *options)
4175 /* Initialize the vtn_builder object */
4176 struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
4178 b->spirv_word_count = word_count;
4182 exec_list_make_empty(&b->functions);
4183 b->entry_point_stage = stage;
4184 b->entry_point_name = entry_point_name;
4185 b->options = options;
4188 * Handle the SPIR-V header (first 5 dwords).
4189 * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
4191 if (word_count <= 5)
4194 if (words[0] != SpvMagicNumber) {
4195 vtn_err("words[0] was 0x%x, want 0x%x", words[0], SpvMagicNumber);
4198 if (words[1] < 0x10000) {
4199 vtn_err("words[1] was 0x%x, want >= 0x10000", words[1]);
4203 /* words[2] == generator magic */
4204 unsigned value_id_bound = words[3];
4205 if (words[4] != 0) {
4206 vtn_err("words[4] was %u, want 0", words[4]);
4210 b->value_id_bound = value_id_bound;
4211 b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
4220 spirv_to_nir(const uint32_t *words, size_t word_count,
4221 struct nir_spirv_specialization *spec, unsigned num_spec,
4222 gl_shader_stage stage, const char *entry_point_name,
4223 const struct spirv_to_nir_options *options,
4224 const nir_shader_compiler_options *nir_options)
4227 const uint32_t *word_end = words + word_count;
4229 struct vtn_builder *b = vtn_create_builder(words, word_count,
4230 stage, entry_point_name,
4236 /* See also _vtn_fail() */
4237 if (setjmp(b->fail_jump)) {
4242 /* Skip the SPIR-V header, handled at vtn_create_builder */
4245 /* Handle all the preamble instructions */
4246 words = vtn_foreach_instruction(b, words, word_end,
4247 vtn_handle_preamble_instruction);
4249 if (b->entry_point == NULL) {
4250 vtn_fail("Entry point not found");
4255 b->shader = nir_shader_create(b, stage, nir_options, NULL);
4257 /* Set shader info defaults */
4258 b->shader->info.gs.invocations = 1;
4260 /* Parse execution modes */
4261 vtn_foreach_execution_mode(b, b->entry_point,
4262 vtn_handle_execution_mode, NULL);
4264 b->specializations = spec;
4265 b->num_specializations = num_spec;
4267 /* Handle all variable, type, and constant instructions */
4268 words = vtn_foreach_instruction(b, words, word_end,
4269 vtn_handle_variable_or_type_instruction);
4271 /* Set types on all vtn_values */
4272 vtn_foreach_instruction(b, words, word_end, vtn_set_instruction_result_type);
4274 vtn_build_cfg(b, words, word_end);
4276 assert(b->entry_point->value_type == vtn_value_type_function);
4277 b->entry_point->func->referenced = true;
4282 foreach_list_typed(struct vtn_function, func, node, &b->functions) {
4283 if (func->referenced && !func->emitted) {
4284 b->const_table = _mesa_hash_table_create(b, _mesa_hash_pointer,
4285 _mesa_key_pointer_equal);
4287 vtn_function_emit(b, func, vtn_handle_body_instruction);
4293 /* We sometimes generate bogus derefs that, while never used, give the
4294 * validator a bit of heartburn. Run dead code to get rid of them.
4296 nir_opt_dce(b->shader);
4298 vtn_assert(b->entry_point->value_type == vtn_value_type_function);
4299 nir_function *entry_point = b->entry_point->func->impl->function;
4300 vtn_assert(entry_point);
4302 /* Unparent the shader from the vtn_builder before we delete the builder */
4303 ralloc_steal(NULL, b->shader);