2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Connor Abbott (cwabbott0@gmail.com)
25 * Jason Ekstrand (jason@jlekstrand.net)
30 * This lowering pass converts references to input/output variables with
31 * loads/stores to actual input/output intrinsics.
35 #include "nir_builder.h"
37 struct lower_io_state {
40 int (*type_size)(const struct glsl_type *type);
41 nir_variable_mode mode;
45 nir_assign_var_locations(struct exec_list *var_list, unsigned *size,
46 int (*type_size)(const struct glsl_type *))
48 unsigned location = 0;
50 nir_foreach_variable(var, var_list) {
52 * UBO's have their own address spaces, so don't count them towards the
53 * number of global uniforms
55 if ((var->data.mode == nir_var_uniform || var->data.mode == nir_var_shader_storage) &&
56 var->interface_type != NULL)
59 var->data.driver_location = location;
60 location += type_size(var->type);
67 * Returns true if we're processing a stage whose inputs are arrays indexed
68 * by a vertex number (such as geometry shader inputs).
71 is_per_vertex_input(struct lower_io_state *state, nir_variable *var)
73 gl_shader_stage stage = state->builder.shader->stage;
75 return var->data.mode == nir_var_shader_in && !var->data.patch &&
76 (stage == MESA_SHADER_TESS_CTRL ||
77 stage == MESA_SHADER_TESS_EVAL ||
78 stage == MESA_SHADER_GEOMETRY);
82 is_per_vertex_output(struct lower_io_state *state, nir_variable *var)
84 gl_shader_stage stage = state->builder.shader->stage;
85 return var->data.mode == nir_var_shader_out && !var->data.patch &&
86 stage == MESA_SHADER_TESS_CTRL;
90 get_io_offset(nir_builder *b, nir_deref_var *deref,
91 nir_ssa_def **vertex_index,
92 int (*type_size)(const struct glsl_type *))
94 nir_deref *tail = &deref->deref;
96 /* For per-vertex input arrays (i.e. geometry shader inputs), keep the
97 * outermost array index separate. Process the rest normally.
99 if (vertex_index != NULL) {
101 assert(tail->deref_type == nir_deref_type_array);
102 nir_deref_array *deref_array = nir_deref_as_array(tail);
104 nir_ssa_def *vtx = nir_imm_int(b, deref_array->base_offset);
105 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
106 vtx = nir_iadd(b, vtx, nir_ssa_for_src(b, deref_array->indirect, 1));
111 /* Just emit code and let constant-folding go to town */
112 nir_ssa_def *offset = nir_imm_int(b, 0);
114 while (tail->child != NULL) {
115 const struct glsl_type *parent_type = tail->type;
118 if (tail->deref_type == nir_deref_type_array) {
119 nir_deref_array *deref_array = nir_deref_as_array(tail);
120 unsigned size = type_size(tail->type);
122 offset = nir_iadd(b, offset,
123 nir_imm_int(b, size * deref_array->base_offset));
125 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
127 nir_imul(b, nir_imm_int(b, size),
128 nir_ssa_for_src(b, deref_array->indirect, 1));
130 offset = nir_iadd(b, offset, mul);
132 } else if (tail->deref_type == nir_deref_type_struct) {
133 nir_deref_struct *deref_struct = nir_deref_as_struct(tail);
135 unsigned field_offset = 0;
136 for (unsigned i = 0; i < deref_struct->index; i++) {
137 field_offset += type_size(glsl_get_struct_field(parent_type, i));
139 offset = nir_iadd(b, offset, nir_imm_int(b, field_offset));
146 static nir_intrinsic_op
147 load_op(struct lower_io_state *state,
148 nir_variable_mode mode, bool per_vertex)
152 case nir_var_shader_in:
153 op = per_vertex ? nir_intrinsic_load_per_vertex_input :
154 nir_intrinsic_load_input;
156 case nir_var_shader_out:
157 op = per_vertex ? nir_intrinsic_load_per_vertex_output :
158 nir_intrinsic_load_output;
160 case nir_var_uniform:
161 op = nir_intrinsic_load_uniform;
164 op = nir_intrinsic_load_shared;
167 unreachable("Unknown variable mode");
172 static nir_intrinsic_op
173 store_op(struct lower_io_state *state,
174 nir_variable_mode mode, bool per_vertex)
178 case nir_var_shader_in:
179 case nir_var_shader_out:
180 op = per_vertex ? nir_intrinsic_store_per_vertex_output :
181 nir_intrinsic_store_output;
184 op = nir_intrinsic_store_shared;
187 unreachable("Unknown variable mode");
192 static nir_intrinsic_op
193 atomic_op(nir_intrinsic_op opcode)
196 #define OP(O) case nir_intrinsic_var_##O: return nir_intrinsic_shared_##O;
209 unreachable("Invalid atomic");
214 nir_lower_io_block(nir_block *block, void *void_state)
216 struct lower_io_state *state = void_state;
218 nir_builder *b = &state->builder;
220 nir_foreach_instr_safe(block, instr) {
221 if (instr->type != nir_instr_type_intrinsic)
224 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
226 switch (intrin->intrinsic) {
227 case nir_intrinsic_load_var:
228 case nir_intrinsic_store_var:
229 case nir_intrinsic_var_atomic_add:
230 case nir_intrinsic_var_atomic_imin:
231 case nir_intrinsic_var_atomic_umin:
232 case nir_intrinsic_var_atomic_imax:
233 case nir_intrinsic_var_atomic_umax:
234 case nir_intrinsic_var_atomic_and:
235 case nir_intrinsic_var_atomic_or:
236 case nir_intrinsic_var_atomic_xor:
237 case nir_intrinsic_var_atomic_exchange:
238 case nir_intrinsic_var_atomic_comp_swap:
239 /* We can lower the io for this nir instrinsic */
242 /* We can't lower the io for this nir instrinsic, so skip it */
246 nir_variable_mode mode = intrin->variables[0]->var->data.mode;
248 if (state->mode != nir_var_all && state->mode != mode)
251 if (mode != nir_var_shader_in &&
252 mode != nir_var_shader_out &&
253 mode != nir_var_shared &&
254 mode != nir_var_uniform)
257 b->cursor = nir_before_instr(instr);
259 switch (intrin->intrinsic) {
260 case nir_intrinsic_load_var: {
262 is_per_vertex_input(state, intrin->variables[0]->var) ||
263 is_per_vertex_output(state, intrin->variables[0]->var);
266 nir_ssa_def *vertex_index;
268 offset = get_io_offset(b, intrin->variables[0],
269 per_vertex ? &vertex_index : NULL,
272 nir_intrinsic_instr *load =
273 nir_intrinsic_instr_create(state->mem_ctx,
274 load_op(state, mode, per_vertex));
275 load->num_components = intrin->num_components;
277 nir_intrinsic_set_base(load,
278 intrin->variables[0]->var->data.driver_location);
280 if (load->intrinsic == nir_intrinsic_load_uniform) {
281 load->const_index[1] =
282 state->type_size(intrin->variables[0]->var->type);
286 load->src[0] = nir_src_for_ssa(vertex_index);
288 load->src[per_vertex ? 1 : 0] = nir_src_for_ssa(offset);
290 if (intrin->dest.is_ssa) {
291 nir_ssa_dest_init(&load->instr, &load->dest,
292 intrin->num_components, NULL);
293 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
294 nir_src_for_ssa(&load->dest.ssa));
296 nir_dest_copy(&load->dest, &intrin->dest, state->mem_ctx);
299 nir_instr_insert_before(&intrin->instr, &load->instr);
300 nir_instr_remove(&intrin->instr);
304 case nir_intrinsic_store_var: {
305 assert(mode == nir_var_shader_out || mode == nir_var_shared);
308 nir_ssa_def *vertex_index;
311 is_per_vertex_output(state, intrin->variables[0]->var);
313 offset = get_io_offset(b, intrin->variables[0],
314 per_vertex ? &vertex_index : NULL,
317 nir_intrinsic_instr *store =
318 nir_intrinsic_instr_create(state->mem_ctx,
319 store_op(state, mode, per_vertex));
320 store->num_components = intrin->num_components;
322 nir_src_copy(&store->src[0], &intrin->src[0], store);
324 nir_intrinsic_set_base(store,
325 intrin->variables[0]->var->data.driver_location);
326 nir_intrinsic_set_write_mask(store, nir_intrinsic_write_mask(intrin));
329 store->src[1] = nir_src_for_ssa(vertex_index);
331 store->src[per_vertex ? 2 : 1] = nir_src_for_ssa(offset);
333 nir_instr_insert_before(&intrin->instr, &store->instr);
334 nir_instr_remove(&intrin->instr);
338 case nir_intrinsic_var_atomic_add:
339 case nir_intrinsic_var_atomic_imin:
340 case nir_intrinsic_var_atomic_umin:
341 case nir_intrinsic_var_atomic_imax:
342 case nir_intrinsic_var_atomic_umax:
343 case nir_intrinsic_var_atomic_and:
344 case nir_intrinsic_var_atomic_or:
345 case nir_intrinsic_var_atomic_xor:
346 case nir_intrinsic_var_atomic_exchange:
347 case nir_intrinsic_var_atomic_comp_swap: {
348 assert(mode == nir_var_shared);
352 offset = get_io_offset(b, intrin->variables[0],
353 NULL, state->type_size);
355 nir_intrinsic_instr *atomic =
356 nir_intrinsic_instr_create(state->mem_ctx,
357 atomic_op(intrin->intrinsic));
359 atomic->src[0] = nir_src_for_ssa(offset);
361 atomic->const_index[0] =
362 intrin->variables[0]->var->data.driver_location;
365 i < nir_op_infos[intrin->intrinsic].num_inputs;
367 nir_src_copy(&atomic->src[i+1], &intrin->src[i], atomic);
370 if (intrin->dest.is_ssa) {
371 nir_ssa_dest_init(&atomic->instr, &atomic->dest,
372 intrin->dest.ssa.num_components, NULL);
373 nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
374 nir_src_for_ssa(&atomic->dest.ssa));
376 nir_dest_copy(&atomic->dest, &intrin->dest, state->mem_ctx);
379 nir_instr_insert_before(&intrin->instr, &atomic->instr);
380 nir_instr_remove(&intrin->instr);
393 nir_lower_io_impl(nir_function_impl *impl,
394 nir_variable_mode mode,
395 int (*type_size)(const struct glsl_type *))
397 struct lower_io_state state;
399 nir_builder_init(&state.builder, impl);
400 state.mem_ctx = ralloc_parent(impl);
402 state.type_size = type_size;
404 nir_foreach_block(impl, nir_lower_io_block, &state);
406 nir_metadata_preserve(impl, nir_metadata_block_index |
407 nir_metadata_dominance);
411 nir_lower_io(nir_shader *shader, nir_variable_mode mode,
412 int (*type_size)(const struct glsl_type *))
414 nir_foreach_function(shader, function) {
416 nir_lower_io_impl(function->impl, mode, type_size);
421 * Return the offset soruce for a load/store intrinsic.
424 nir_get_io_offset_src(nir_intrinsic_instr *instr)
426 switch (instr->intrinsic) {
427 case nir_intrinsic_load_input:
428 case nir_intrinsic_load_output:
429 case nir_intrinsic_load_uniform:
430 return &instr->src[0];
431 case nir_intrinsic_load_ubo:
432 case nir_intrinsic_load_ssbo:
433 case nir_intrinsic_load_per_vertex_input:
434 case nir_intrinsic_load_per_vertex_output:
435 case nir_intrinsic_store_output:
436 return &instr->src[1];
437 case nir_intrinsic_store_ssbo:
438 case nir_intrinsic_store_per_vertex_output:
439 return &instr->src[2];
446 * Return the vertex index source for a load/store per_vertex intrinsic.
449 nir_get_io_vertex_index_src(nir_intrinsic_instr *instr)
451 switch (instr->intrinsic) {
452 case nir_intrinsic_load_per_vertex_input:
453 case nir_intrinsic_load_per_vertex_output:
454 return &instr->src[0];
455 case nir_intrinsic_store_per_vertex_output:
456 return &instr->src[1];