2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "main/shaderimage.h"
28 #include "program/prog_to_nir.h"
30 #include "brw_fs_surface_builder.h"
36 fs_visitor::emit_nir_code()
38 nir_shader *nir = prog->nir;
40 /* emit the arrays used for inputs and outputs - load/store intrinsics will
41 * be converted to reads/writes of these arrays
43 nir_setup_inputs(nir);
44 nir_setup_outputs(nir);
45 nir_setup_uniforms(nir);
46 nir_emit_system_values(nir);
48 /* get the main function and emit it */
49 nir_foreach_overload(nir, overload) {
50 assert(strcmp(overload->function->name, "main") == 0);
51 assert(overload->impl);
52 nir_emit_impl(overload->impl);
57 fs_visitor::nir_setup_inputs(nir_shader *shader)
59 nir_inputs = bld.vgrf(BRW_REGISTER_TYPE_F, shader->num_inputs);
61 foreach_list_typed(nir_variable, var, node, &shader->inputs) {
62 enum brw_reg_type type = brw_type_for_base_type(var->type);
63 fs_reg input = offset(nir_inputs, bld, var->data.driver_location);
67 case MESA_SHADER_VERTEX: {
68 /* Our ATTR file is indexed by VERT_ATTRIB_*, which is the value
69 * stored in nir_variable::location.
71 * However, NIR's load_input intrinsics use a different index - an
72 * offset into a single contiguous array containing all inputs.
73 * This index corresponds to the nir_variable::driver_location field.
75 * So, we need to copy from fs_reg(ATTR, var->location) to
76 * offset(nir_inputs, var->data.driver_location).
78 const glsl_type *const t = var->type->without_array();
79 const unsigned components = t->components();
80 const unsigned cols = t->matrix_columns;
81 const unsigned elts = t->vector_elements;
82 unsigned array_length = var->type->is_array() ? var->type->length : 1;
83 for (unsigned i = 0; i < array_length; i++) {
84 for (unsigned j = 0; j < cols; j++) {
85 for (unsigned k = 0; k < elts; k++) {
86 bld.MOV(offset(retype(input, type), bld,
87 components * i + elts * j + k),
88 offset(fs_reg(ATTR, var->data.location + i, type),
95 case MESA_SHADER_GEOMETRY:
96 case MESA_SHADER_COMPUTE:
97 case MESA_SHADER_TESS_CTRL:
98 case MESA_SHADER_TESS_EVAL:
99 unreachable("fs_visitor not used for these stages yet.");
101 case MESA_SHADER_FRAGMENT:
102 if (var->data.location == VARYING_SLOT_POS) {
103 reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
104 var->data.origin_upper_left);
105 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
108 int location = var->data.location;
109 emit_general_interpolation(&input, var->name, var->type,
110 (glsl_interp_qualifier) var->data.interpolation,
111 &location, var->data.centroid,
120 fs_visitor::nir_setup_single_output_varying(fs_reg ®,
121 const glsl_type *type,
124 if (type->is_array() || type->is_matrix()) {
125 const struct glsl_type *elem_type = glsl_get_array_element(type);
126 const unsigned length = glsl_get_length(type);
128 for (unsigned i = 0; i < length; i++) {
129 nir_setup_single_output_varying(reg, elem_type, location);
131 } else if (type->is_record()) {
132 for (unsigned i = 0; i < type->length; i++) {
133 const struct glsl_type *field_type = type->fields.structure[i].type;
134 nir_setup_single_output_varying(reg, field_type, location);
137 assert(type->is_scalar() || type->is_vector());
138 this->outputs[location] = reg;
139 this->output_components[location] = type->vector_elements;
140 reg = offset(reg, bld, 4);
146 fs_visitor::nir_setup_outputs(nir_shader *shader)
148 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
150 nir_outputs = bld.vgrf(BRW_REGISTER_TYPE_F, shader->num_outputs);
152 foreach_list_typed(nir_variable, var, node, &shader->outputs) {
153 fs_reg reg = offset(nir_outputs, bld, var->data.driver_location);
155 int vector_elements =
156 var->type->is_array() ? var->type->fields.array->vector_elements
157 : var->type->vector_elements;
160 case MESA_SHADER_VERTEX: {
161 unsigned location = var->data.location;
162 nir_setup_single_output_varying(reg, var->type, location);
165 case MESA_SHADER_FRAGMENT:
166 if (var->data.index > 0) {
167 assert(var->data.location == FRAG_RESULT_DATA0);
168 assert(var->data.index == 1);
169 this->dual_src_output = reg;
170 this->do_dual_src = true;
171 } else if (var->data.location == FRAG_RESULT_COLOR) {
172 /* Writing gl_FragColor outputs to all color regions. */
173 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
174 this->outputs[i] = reg;
175 this->output_components[i] = 4;
177 } else if (var->data.location == FRAG_RESULT_DEPTH) {
178 this->frag_depth = reg;
179 } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
180 this->sample_mask = reg;
182 /* gl_FragData or a user-defined FS output */
183 assert(var->data.location >= FRAG_RESULT_DATA0 &&
184 var->data.location < FRAG_RESULT_DATA0+BRW_MAX_DRAW_BUFFERS);
186 /* General color output. */
187 for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
188 int output = var->data.location - FRAG_RESULT_DATA0 + i;
189 this->outputs[output] = offset(reg, bld, vector_elements * i);
190 this->output_components[output] = vector_elements;
195 unreachable("unhandled shader stage");
201 fs_visitor::nir_setup_uniforms(nir_shader *shader)
203 num_direct_uniforms = shader->num_direct_uniforms;
205 if (dispatch_width != 8)
208 /* We split the uniform register file in half. The first half is
209 * entirely direct uniforms. The second half is indirect.
211 if (num_direct_uniforms > 0)
212 param_size[0] = num_direct_uniforms;
213 if (shader->num_uniforms > num_direct_uniforms)
214 param_size[num_direct_uniforms] = shader->num_uniforms - num_direct_uniforms;
216 uniforms = shader->num_uniforms;
219 foreach_list_typed(nir_variable, var, node, &shader->uniforms) {
220 /* UBO's and atomics don't take up space in the uniform file */
221 if (var->interface_type != NULL || var->type->contains_atomic())
224 if (strncmp(var->name, "gl_", 3) == 0)
225 nir_setup_builtin_uniform(var);
227 nir_setup_uniform(var);
230 /* prog_to_nir doesn't create uniform variables; set param up directly. */
231 for (unsigned p = 0; p < prog->Parameters->NumParameters; p++) {
232 for (unsigned int i = 0; i < 4; i++) {
233 stage_prog_data->param[4 * p + i] =
234 &prog->Parameters->ParameterValues[p][i];
241 fs_visitor::nir_setup_uniform(nir_variable *var)
243 int namelen = strlen(var->name);
245 /* The data for our (non-builtin) uniforms is stored in a series of
246 * gl_uniform_driver_storage structs for each subcomponent that
247 * glGetUniformLocation() could name. We know it's been set up in the
248 * same order we'd walk the type, so walk the list of storage and find
249 * anything with our name, or the prefix of a component that starts with
252 unsigned index = var->data.driver_location;
253 for (unsigned u = 0; u < shader_prog->NumUniformStorage; u++) {
254 struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
256 if (storage->builtin)
259 if (strncmp(var->name, storage->name, namelen) != 0 ||
260 (storage->name[namelen] != 0 &&
261 storage->name[namelen] != '.' &&
262 storage->name[namelen] != '[')) {
266 if (storage->type->is_image()) {
267 /* Images don't get a valid location assigned by nir_lower_io()
268 * because their size is driver-specific, so we need to allocate
269 * space for them here at the end of the parameter array.
271 var->data.driver_location = uniforms;
272 param_size[uniforms] =
273 BRW_IMAGE_PARAM_SIZE * MAX2(storage->array_elements, 1);
275 setup_image_uniform_values(storage);
277 unsigned slots = storage->type->component_slots();
278 if (storage->array_elements)
279 slots *= storage->array_elements;
281 for (unsigned i = 0; i < slots; i++) {
282 stage_prog_data->param[index++] = &storage->storage[i];
289 fs_visitor::nir_setup_builtin_uniform(nir_variable *var)
291 const nir_state_slot *const slots = var->state_slots;
292 assert(var->state_slots != NULL);
294 unsigned uniform_index = var->data.driver_location;
295 for (unsigned int i = 0; i < var->num_state_slots; i++) {
296 /* This state reference has already been setup by ir_to_mesa, but we'll
297 * get the same index back here.
299 int index = _mesa_add_state_reference(this->prog->Parameters,
300 (gl_state_index *)slots[i].tokens);
302 /* Add each of the unique swizzles of the element as a parameter.
303 * This'll end up matching the expected layout of the
304 * array/matrix/structure we're trying to fill in.
307 for (unsigned int j = 0; j < 4; j++) {
308 int swiz = GET_SWZ(slots[i].swizzle, j);
309 if (swiz == last_swiz)
313 stage_prog_data->param[uniform_index++] =
314 &prog->Parameters->ParameterValues[index][swiz];
320 emit_system_values_block(nir_block *block, void *void_visitor)
322 fs_visitor *v = (fs_visitor *)void_visitor;
325 nir_foreach_instr(block, instr) {
326 if (instr->type != nir_instr_type_intrinsic)
329 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
330 switch (intrin->intrinsic) {
331 case nir_intrinsic_load_vertex_id:
332 unreachable("should be lowered by lower_vertex_id().");
334 case nir_intrinsic_load_vertex_id_zero_base:
335 assert(v->stage == MESA_SHADER_VERTEX);
336 reg = &v->nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
337 if (reg->file == BAD_FILE)
338 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
341 case nir_intrinsic_load_base_vertex:
342 assert(v->stage == MESA_SHADER_VERTEX);
343 reg = &v->nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
344 if (reg->file == BAD_FILE)
345 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX);
348 case nir_intrinsic_load_instance_id:
349 assert(v->stage == MESA_SHADER_VERTEX);
350 reg = &v->nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
351 if (reg->file == BAD_FILE)
352 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID);
355 case nir_intrinsic_load_sample_pos:
356 assert(v->stage == MESA_SHADER_FRAGMENT);
357 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
358 if (reg->file == BAD_FILE)
359 *reg = *v->emit_samplepos_setup();
362 case nir_intrinsic_load_sample_id:
363 assert(v->stage == MESA_SHADER_FRAGMENT);
364 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
365 if (reg->file == BAD_FILE)
366 *reg = *v->emit_sampleid_setup();
369 case nir_intrinsic_load_sample_mask_in:
370 assert(v->stage == MESA_SHADER_FRAGMENT);
371 assert(v->devinfo->gen >= 7);
372 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
373 if (reg->file == BAD_FILE)
374 *reg = fs_reg(retype(brw_vec8_grf(v->payload.sample_mask_in_reg, 0),
375 BRW_REGISTER_TYPE_D));
387 fs_visitor::nir_emit_system_values(nir_shader *shader)
389 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
390 nir_foreach_overload(shader, overload) {
391 assert(strcmp(overload->function->name, "main") == 0);
392 assert(overload->impl);
393 nir_foreach_block(overload->impl, emit_system_values_block, this);
398 fs_visitor::nir_emit_impl(nir_function_impl *impl)
400 nir_locals = reralloc(mem_ctx, nir_locals, fs_reg, impl->reg_alloc);
401 foreach_list_typed(nir_register, reg, node, &impl->registers) {
402 unsigned array_elems =
403 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
404 unsigned size = array_elems * reg->num_components;
405 nir_locals[reg->index] = bld.vgrf(BRW_REGISTER_TYPE_F, size);
408 nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
411 nir_emit_cf_list(&impl->body);
415 fs_visitor::nir_emit_cf_list(exec_list *list)
417 exec_list_validate(list);
418 foreach_list_typed(nir_cf_node, node, node, list) {
419 switch (node->type) {
421 nir_emit_if(nir_cf_node_as_if(node));
424 case nir_cf_node_loop:
425 nir_emit_loop(nir_cf_node_as_loop(node));
428 case nir_cf_node_block:
429 nir_emit_block(nir_cf_node_as_block(node));
433 unreachable("Invalid CFG node block");
439 fs_visitor::nir_emit_if(nir_if *if_stmt)
441 /* first, put the condition into f0 */
442 fs_inst *inst = bld.MOV(bld.null_reg_d(),
443 retype(get_nir_src(if_stmt->condition),
444 BRW_REGISTER_TYPE_D));
445 inst->conditional_mod = BRW_CONDITIONAL_NZ;
447 bld.IF(BRW_PREDICATE_NORMAL);
449 nir_emit_cf_list(&if_stmt->then_list);
451 /* note: if the else is empty, dead CF elimination will remove it */
452 bld.emit(BRW_OPCODE_ELSE);
454 nir_emit_cf_list(&if_stmt->else_list);
456 bld.emit(BRW_OPCODE_ENDIF);
458 try_replace_with_sel();
462 fs_visitor::nir_emit_loop(nir_loop *loop)
464 bld.emit(BRW_OPCODE_DO);
466 nir_emit_cf_list(&loop->body);
468 bld.emit(BRW_OPCODE_WHILE);
472 fs_visitor::nir_emit_block(nir_block *block)
474 nir_foreach_instr(block, instr) {
475 nir_emit_instr(instr);
480 fs_visitor::nir_emit_instr(nir_instr *instr)
482 const fs_builder abld = bld.annotate(NULL, instr);
484 switch (instr->type) {
485 case nir_instr_type_alu:
486 nir_emit_alu(abld, nir_instr_as_alu(instr));
489 case nir_instr_type_intrinsic:
490 nir_emit_intrinsic(abld, nir_instr_as_intrinsic(instr));
493 case nir_instr_type_tex:
494 nir_emit_texture(abld, nir_instr_as_tex(instr));
497 case nir_instr_type_load_const:
498 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
501 case nir_instr_type_ssa_undef:
502 nir_emit_undef(abld, nir_instr_as_ssa_undef(instr));
505 case nir_instr_type_jump:
506 nir_emit_jump(abld, nir_instr_as_jump(instr));
510 unreachable("unknown instruction type");
515 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
516 const fs_reg &result)
518 if (!instr->src[0].src.is_ssa ||
519 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_intrinsic)
522 nir_intrinsic_instr *src0 =
523 nir_instr_as_intrinsic(instr->src[0].src.ssa->parent_instr);
525 if (src0->intrinsic != nir_intrinsic_load_front_face)
528 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
529 if (!value1 || fabsf(value1->f[0]) != 1.0f)
532 nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
533 if (!value2 || fabsf(value2->f[0]) != 1.0f)
536 fs_reg tmp = vgrf(glsl_type::int_type);
538 if (devinfo->gen >= 6) {
539 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
540 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
542 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
544 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
545 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
547 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
549 * This negation looks like it's safe in practice, because bits 0:4 will
550 * surely be TRIANGLES
553 if (value1->f[0] == -1.0f) {
557 tmp.type = BRW_REGISTER_TYPE_W;
558 tmp.subreg_offset = 2;
561 fs_inst *or_inst = bld.OR(tmp, g0, fs_reg(0x3f80));
562 or_inst->src[1].type = BRW_REGISTER_TYPE_UW;
564 tmp.type = BRW_REGISTER_TYPE_D;
565 tmp.subreg_offset = 0;
568 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
569 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
571 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
573 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
574 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
576 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
578 * This negation looks like it's safe in practice, because bits 0:4 will
579 * surely be TRIANGLES
582 if (value1->f[0] == -1.0f) {
586 bld.OR(tmp, g1_6, fs_reg(0x3f800000));
588 bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000));
594 fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
596 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
599 fs_reg result = get_nir_dest(instr->dest.dest);
600 result.type = brw_type_for_nir_type(nir_op_infos[instr->op].output_type);
603 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
604 op[i] = get_nir_src(instr->src[i].src);
605 op[i].type = brw_type_for_nir_type(nir_op_infos[instr->op].input_types[i]);
606 op[i].abs = instr->src[i].abs;
607 op[i].negate = instr->src[i].negate;
610 /* We get a bunch of mov's out of the from_ssa pass and they may still
611 * be vectorized. We'll handle them as a special-case. We'll also
612 * handle vecN here because it's basically the same thing.
620 fs_reg temp = result;
621 bool need_extra_copy = false;
622 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
623 if (!instr->src[i].src.is_ssa &&
624 instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
625 need_extra_copy = true;
626 temp = bld.vgrf(result.type, 4);
631 for (unsigned i = 0; i < 4; i++) {
632 if (!(instr->dest.write_mask & (1 << i)))
635 if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
636 inst = bld.MOV(offset(temp, bld, i),
637 offset(op[0], bld, instr->src[0].swizzle[i]));
639 inst = bld.MOV(offset(temp, bld, i),
640 offset(op[i], bld, instr->src[i].swizzle[0]));
642 inst->saturate = instr->dest.saturate;
645 /* In this case the source and destination registers were the same,
646 * so we need to insert an extra set of moves in order to deal with
649 if (need_extra_copy) {
650 for (unsigned i = 0; i < 4; i++) {
651 if (!(instr->dest.write_mask & (1 << i)))
654 bld.MOV(offset(result, bld, i), offset(temp, bld, i));
663 /* At this point, we have dealt with any instruction that operates on
664 * more than a single channel. Therefore, we can just adjust the source
665 * and destination registers for that channel and emit the instruction.
667 unsigned channel = 0;
668 if (nir_op_infos[instr->op].output_size == 0) {
669 /* Since NIR is doing the scalarizing for us, we should only ever see
670 * vectorized operations with a single channel.
672 assert(_mesa_bitcount(instr->dest.write_mask) == 1);
673 channel = ffs(instr->dest.write_mask) - 1;
675 result = offset(result, bld, channel);
678 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
679 assert(nir_op_infos[instr->op].input_sizes[i] < 2);
680 op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
686 inst = bld.MOV(result, op[0]);
687 inst->saturate = instr->dest.saturate;
692 bld.MOV(result, op[0]);
696 /* AND(val, 0x80000000) gives the sign bit.
698 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
701 bld.CMP(bld.null_reg_f(), op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
703 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
704 op[0].type = BRW_REGISTER_TYPE_UD;
705 result.type = BRW_REGISTER_TYPE_UD;
706 bld.AND(result_int, op[0], fs_reg(0x80000000u));
708 inst = bld.OR(result_int, result_int, fs_reg(0x3f800000u));
709 inst->predicate = BRW_PREDICATE_NORMAL;
710 if (instr->dest.saturate) {
711 inst = bld.MOV(result, result);
712 inst->saturate = true;
718 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
719 * -> non-negative val generates 0x00000000.
720 * Predicated OR sets 1 if val is positive.
722 bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_G);
723 bld.ASR(result, op[0], fs_reg(31));
724 inst = bld.OR(result, result, fs_reg(1));
725 inst->predicate = BRW_PREDICATE_NORMAL;
729 inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
730 inst->saturate = instr->dest.saturate;
734 inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
735 inst->saturate = instr->dest.saturate;
739 inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
740 inst->saturate = instr->dest.saturate;
744 inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
745 inst->saturate = instr->dest.saturate;
749 inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
750 inst->saturate = instr->dest.saturate;
754 if (fs_key->high_quality_derivatives) {
755 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
757 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
759 inst->saturate = instr->dest.saturate;
761 case nir_op_fddx_fine:
762 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
763 inst->saturate = instr->dest.saturate;
765 case nir_op_fddx_coarse:
766 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
767 inst->saturate = instr->dest.saturate;
770 if (fs_key->high_quality_derivatives) {
771 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
772 fs_reg(fs_key->render_to_fbo));
774 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
775 fs_reg(fs_key->render_to_fbo));
777 inst->saturate = instr->dest.saturate;
779 case nir_op_fddy_fine:
780 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
781 fs_reg(fs_key->render_to_fbo));
782 inst->saturate = instr->dest.saturate;
784 case nir_op_fddy_coarse:
785 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
786 fs_reg(fs_key->render_to_fbo));
787 inst->saturate = instr->dest.saturate;
792 inst = bld.ADD(result, op[0], op[1]);
793 inst->saturate = instr->dest.saturate;
797 inst = bld.MUL(result, op[0], op[1]);
798 inst->saturate = instr->dest.saturate;
802 bld.MUL(result, op[0], op[1]);
805 case nir_op_imul_high:
806 case nir_op_umul_high:
807 bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
812 bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
815 case nir_op_uadd_carry:
816 unreachable("Should have been lowered by carry_to_arith().");
818 case nir_op_usub_borrow:
819 unreachable("Should have been lowered by borrow_to_arith().");
822 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
828 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_L);
834 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_GE);
839 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_Z);
844 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ);
848 if (devinfo->gen >= 8) {
849 op[0] = resolve_source_modifiers(op[0]);
851 bld.NOT(result, op[0]);
854 if (devinfo->gen >= 8) {
855 op[0] = resolve_source_modifiers(op[0]);
856 op[1] = resolve_source_modifiers(op[1]);
858 bld.XOR(result, op[0], op[1]);
861 if (devinfo->gen >= 8) {
862 op[0] = resolve_source_modifiers(op[0]);
863 op[1] = resolve_source_modifiers(op[1]);
865 bld.OR(result, op[0], op[1]);
868 if (devinfo->gen >= 8) {
869 op[0] = resolve_source_modifiers(op[0]);
870 op[1] = resolve_source_modifiers(op[1]);
872 bld.AND(result, op[0], op[1]);
884 case nir_op_ball_fequal2:
885 case nir_op_ball_iequal2:
886 case nir_op_ball_fequal3:
887 case nir_op_ball_iequal3:
888 case nir_op_ball_fequal4:
889 case nir_op_ball_iequal4:
890 case nir_op_bany_fnequal2:
891 case nir_op_bany_inequal2:
892 case nir_op_bany_fnequal3:
893 case nir_op_bany_inequal3:
894 case nir_op_bany_fnequal4:
895 case nir_op_bany_inequal4:
896 unreachable("Lowered by nir_lower_alu_reductions");
898 case nir_op_fnoise1_1:
899 case nir_op_fnoise1_2:
900 case nir_op_fnoise1_3:
901 case nir_op_fnoise1_4:
902 case nir_op_fnoise2_1:
903 case nir_op_fnoise2_2:
904 case nir_op_fnoise2_3:
905 case nir_op_fnoise2_4:
906 case nir_op_fnoise3_1:
907 case nir_op_fnoise3_2:
908 case nir_op_fnoise3_3:
909 case nir_op_fnoise3_4:
910 case nir_op_fnoise4_1:
911 case nir_op_fnoise4_2:
912 case nir_op_fnoise4_3:
913 case nir_op_fnoise4_4:
914 unreachable("not reached: should be handled by lower_noise");
917 unreachable("not reached: should be handled by ldexp_to_arith()");
920 inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
921 inst->saturate = instr->dest.saturate;
925 inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
926 inst->saturate = instr->dest.saturate;
931 bld.MOV(result, negate(op[0]));
935 bld.CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
938 bld.CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
942 inst = bld.RNDZ(result, op[0]);
943 inst->saturate = instr->dest.saturate;
947 op[0].negate = !op[0].negate;
948 fs_reg temp = vgrf(glsl_type::float_type);
949 bld.RNDD(temp, op[0]);
951 inst = bld.MOV(result, temp);
952 inst->saturate = instr->dest.saturate;
956 inst = bld.RNDD(result, op[0]);
957 inst->saturate = instr->dest.saturate;
960 inst = bld.FRC(result, op[0]);
961 inst->saturate = instr->dest.saturate;
963 case nir_op_fround_even:
964 inst = bld.RNDE(result, op[0]);
965 inst->saturate = instr->dest.saturate;
971 if (devinfo->gen >= 6) {
972 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
973 inst->conditional_mod = BRW_CONDITIONAL_L;
975 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_L);
976 inst = bld.SEL(result, op[0], op[1]);
977 inst->predicate = BRW_PREDICATE_NORMAL;
979 inst->saturate = instr->dest.saturate;
985 if (devinfo->gen >= 6) {
986 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
987 inst->conditional_mod = BRW_CONDITIONAL_GE;
989 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_GE);
990 inst = bld.SEL(result, op[0], op[1]);
991 inst->predicate = BRW_PREDICATE_NORMAL;
993 inst->saturate = instr->dest.saturate;
996 case nir_op_pack_snorm_2x16:
997 case nir_op_pack_snorm_4x8:
998 case nir_op_pack_unorm_2x16:
999 case nir_op_pack_unorm_4x8:
1000 case nir_op_unpack_snorm_2x16:
1001 case nir_op_unpack_snorm_4x8:
1002 case nir_op_unpack_unorm_2x16:
1003 case nir_op_unpack_unorm_4x8:
1004 case nir_op_unpack_half_2x16:
1005 case nir_op_pack_half_2x16:
1006 unreachable("not reached: should be handled by lower_packing_builtins");
1008 case nir_op_unpack_half_2x16_split_x:
1009 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
1010 inst->saturate = instr->dest.saturate;
1012 case nir_op_unpack_half_2x16_split_y:
1013 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
1014 inst->saturate = instr->dest.saturate;
1018 inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
1019 inst->saturate = instr->dest.saturate;
1022 case nir_op_bitfield_reverse:
1023 bld.BFREV(result, op[0]);
1026 case nir_op_bit_count:
1027 bld.CBIT(result, op[0]);
1030 case nir_op_ufind_msb:
1031 case nir_op_ifind_msb: {
1032 bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
1034 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1035 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1036 * subtract the result from 31 to convert the MSB count into an LSB count.
1039 bld.CMP(bld.null_reg_d(), result, fs_reg(-1), BRW_CONDITIONAL_NZ);
1040 fs_reg neg_result(result);
1041 neg_result.negate = true;
1042 inst = bld.ADD(result, neg_result, fs_reg(31));
1043 inst->predicate = BRW_PREDICATE_NORMAL;
1047 case nir_op_find_lsb:
1048 bld.FBL(result, op[0]);
1051 case nir_op_ubitfield_extract:
1052 case nir_op_ibitfield_extract:
1053 bld.BFE(result, op[2], op[1], op[0]);
1056 bld.BFI1(result, op[0], op[1]);
1059 bld.BFI2(result, op[0], op[1], op[2]);
1062 case nir_op_bitfield_insert:
1063 unreachable("not reached: should be handled by "
1064 "lower_instructions::bitfield_insert_to_bfm_bfi");
1067 bld.SHL(result, op[0], op[1]);
1070 bld.ASR(result, op[0], op[1]);
1073 bld.SHR(result, op[0], op[1]);
1076 case nir_op_pack_half_2x16_split:
1077 bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
1081 inst = bld.MAD(result, op[2], op[1], op[0]);
1082 inst->saturate = instr->dest.saturate;
1086 inst = bld.LRP(result, op[0], op[1], op[2]);
1087 inst->saturate = instr->dest.saturate;
1091 if (optimize_frontfacing_ternary(instr, result))
1094 bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
1095 inst = bld.SEL(result, op[1], op[2]);
1096 inst->predicate = BRW_PREDICATE_NORMAL;
1100 unreachable("unhandled instruction");
1103 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1104 * to sign extend the low bit to 0/~0
1106 if (devinfo->gen <= 5 &&
1107 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1108 fs_reg masked = vgrf(glsl_type::int_type);
1109 bld.AND(masked, result, fs_reg(1));
1110 masked.negate = true;
1111 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
1116 fs_visitor::nir_emit_load_const(const fs_builder &bld,
1117 nir_load_const_instr *instr)
1119 fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_D, instr->def.num_components);
1121 for (unsigned i = 0; i < instr->def.num_components; i++)
1122 bld.MOV(offset(reg, bld, i), fs_reg(instr->value.i[i]));
1124 nir_ssa_values[instr->def.index] = reg;
1128 fs_visitor::nir_emit_undef(const fs_builder &bld, nir_ssa_undef_instr *instr)
1130 nir_ssa_values[instr->def.index] = bld.vgrf(BRW_REGISTER_TYPE_D,
1131 instr->def.num_components);
1135 fs_reg_for_nir_reg(fs_visitor *v, nir_register *nir_reg,
1136 unsigned base_offset, nir_src *indirect)
1140 assert(!nir_reg->is_global);
1142 reg = v->nir_locals[nir_reg->index];
1144 reg = offset(reg, v->bld, base_offset * nir_reg->num_components);
1146 int multiplier = nir_reg->num_components * (v->dispatch_width / 8);
1148 reg.reladdr = new(v->mem_ctx) fs_reg(v->vgrf(glsl_type::int_type));
1149 v->bld.MUL(*reg.reladdr, v->get_nir_src(*indirect),
1150 fs_reg(multiplier));
1157 fs_visitor::get_nir_src(nir_src src)
1161 reg = nir_ssa_values[src.ssa->index];
1163 reg = fs_reg_for_nir_reg(this, src.reg.reg, src.reg.base_offset,
1167 /* to avoid floating-point denorm flushing problems, set the type by
1168 * default to D - instructions that need floating point semantics will set
1169 * this to F if they need to
1171 return retype(reg, BRW_REGISTER_TYPE_D);
1175 fs_visitor::get_nir_dest(nir_dest dest)
1178 nir_ssa_values[dest.ssa.index] = bld.vgrf(BRW_REGISTER_TYPE_F,
1179 dest.ssa.num_components);
1180 return nir_ssa_values[dest.ssa.index];
1183 return fs_reg_for_nir_reg(this, dest.reg.reg, dest.reg.base_offset,
1188 fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
1190 fs_reg image(UNIFORM, deref->var->data.driver_location,
1191 BRW_REGISTER_TYPE_UD);
1193 if (deref->deref.child) {
1194 const nir_deref_array *deref_array =
1195 nir_deref_as_array(deref->deref.child);
1196 assert(deref->deref.child->deref_type == nir_deref_type_array &&
1197 deref_array->deref.child == NULL);
1198 const unsigned size = glsl_get_length(deref->var->type);
1199 const unsigned base = MIN2(deref_array->base_offset, size - 1);
1201 image = offset(image, bld, base * BRW_IMAGE_PARAM_SIZE);
1203 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
1204 fs_reg *tmp = new(mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1206 if (devinfo->gen == 7 && !devinfo->is_haswell) {
1207 /* IVB hangs when trying to access an invalid surface index with
1208 * the dataport. According to the spec "if the index used to
1209 * select an individual element is negative or greater than or
1210 * equal to the size of the array, the results of the operation
1211 * are undefined but may not lead to termination" -- which is one
1212 * of the possible outcomes of the hang. Clamp the index to
1213 * prevent access outside of the array bounds.
1215 bld.emit_minmax(*tmp, retype(get_nir_src(deref_array->indirect),
1216 BRW_REGISTER_TYPE_UD),
1217 fs_reg(size - base - 1), BRW_CONDITIONAL_L);
1219 bld.MOV(*tmp, get_nir_src(deref_array->indirect));
1222 bld.MUL(*tmp, *tmp, fs_reg(BRW_IMAGE_PARAM_SIZE));
1223 image.reladdr = tmp;
1231 fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1234 for (unsigned i = 0; i < 4; i++) {
1235 if (!((wr_mask >> i) & 1))
1238 fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
1239 new_inst->dst = offset(new_inst->dst, bld, i);
1240 for (unsigned j = 0; j < new_inst->sources; j++)
1241 if (new_inst->src[j].file == GRF)
1242 new_inst->src[j] = offset(new_inst->src[j], bld, i);
1249 * Get the matching channel register datatype for an image intrinsic of the
1250 * specified GLSL image type.
1253 get_image_base_type(const glsl_type *type)
1255 switch ((glsl_base_type)type->sampler_type) {
1256 case GLSL_TYPE_UINT:
1257 return BRW_REGISTER_TYPE_UD;
1259 return BRW_REGISTER_TYPE_D;
1260 case GLSL_TYPE_FLOAT:
1261 return BRW_REGISTER_TYPE_F;
1263 unreachable("Not reached.");
1268 * Get the appropriate atomic op for an image atomic intrinsic.
1271 get_image_atomic_op(nir_intrinsic_op op, const glsl_type *type)
1274 case nir_intrinsic_image_atomic_add:
1276 case nir_intrinsic_image_atomic_min:
1277 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1278 BRW_AOP_IMIN : BRW_AOP_UMIN);
1279 case nir_intrinsic_image_atomic_max:
1280 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1281 BRW_AOP_IMAX : BRW_AOP_UMAX);
1282 case nir_intrinsic_image_atomic_and:
1284 case nir_intrinsic_image_atomic_or:
1286 case nir_intrinsic_image_atomic_xor:
1288 case nir_intrinsic_image_atomic_exchange:
1290 case nir_intrinsic_image_atomic_comp_swap:
1291 return BRW_AOP_CMPWR;
1293 unreachable("Not reachable.");
1298 fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
1301 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1302 dest = get_nir_dest(instr->dest);
1304 bool has_indirect = false;
1306 switch (instr->intrinsic) {
1307 case nir_intrinsic_discard:
1308 case nir_intrinsic_discard_if: {
1309 /* We track our discarded pixels in f0.1. By predicating on it, we can
1310 * update just the flag bits that aren't yet discarded. If there's no
1311 * condition, we emit a CMP of g0 != g0, so all currently executing
1312 * channels will get turned off.
1315 if (instr->intrinsic == nir_intrinsic_discard_if) {
1316 cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
1317 fs_reg(0), BRW_CONDITIONAL_Z);
1319 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
1320 BRW_REGISTER_TYPE_UW));
1321 cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
1323 cmp->predicate = BRW_PREDICATE_NORMAL;
1324 cmp->flag_subreg = 1;
1326 if (devinfo->gen >= 6) {
1327 emit_discard_jump();
1332 case nir_intrinsic_atomic_counter_inc:
1333 case nir_intrinsic_atomic_counter_dec:
1334 case nir_intrinsic_atomic_counter_read: {
1335 using namespace surface_access;
1337 /* Get the arguments of the atomic intrinsic. */
1338 const fs_reg offset = get_nir_src(instr->src[0]);
1339 const unsigned surface = (stage_prog_data->binding_table.abo_start +
1340 instr->const_index[0]);
1343 /* Emit a surface read or atomic op. */
1344 switch (instr->intrinsic) {
1345 case nir_intrinsic_atomic_counter_read:
1346 tmp = emit_untyped_read(bld, fs_reg(surface), offset, 1, 1);
1349 case nir_intrinsic_atomic_counter_inc:
1350 tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
1351 fs_reg(), 1, 1, BRW_AOP_INC);
1354 case nir_intrinsic_atomic_counter_dec:
1355 tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
1356 fs_reg(), 1, 1, BRW_AOP_PREDEC);
1360 unreachable("Unreachable");
1363 /* Assign the result. */
1364 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), tmp);
1366 /* Mark the surface as used. */
1367 brw_mark_surface_used(stage_prog_data, surface);
1371 case nir_intrinsic_image_load:
1372 case nir_intrinsic_image_store:
1373 case nir_intrinsic_image_atomic_add:
1374 case nir_intrinsic_image_atomic_min:
1375 case nir_intrinsic_image_atomic_max:
1376 case nir_intrinsic_image_atomic_and:
1377 case nir_intrinsic_image_atomic_or:
1378 case nir_intrinsic_image_atomic_xor:
1379 case nir_intrinsic_image_atomic_exchange:
1380 case nir_intrinsic_image_atomic_comp_swap: {
1381 using namespace image_access;
1383 /* Get the referenced image variable and type. */
1384 const nir_variable *var = instr->variables[0]->var;
1385 const glsl_type *type = var->type->without_array();
1386 const brw_reg_type base_type = get_image_base_type(type);
1388 /* Get some metadata from the image intrinsic. */
1389 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
1390 const unsigned arr_dims = type->sampler_array ? 1 : 0;
1391 const unsigned surf_dims = type->coordinate_components() - arr_dims;
1392 const mesa_format format =
1393 (var->data.image.write_only ? MESA_FORMAT_NONE :
1394 _mesa_get_shader_image_format(var->data.image.format));
1396 /* Get the arguments of the image intrinsic. */
1397 const fs_reg image = get_nir_image_deref(instr->variables[0]);
1398 const fs_reg addr = retype(get_nir_src(instr->src[0]),
1399 BRW_REGISTER_TYPE_UD);
1400 const fs_reg src0 = (info->num_srcs >= 3 ?
1401 retype(get_nir_src(instr->src[2]), base_type) :
1403 const fs_reg src1 = (info->num_srcs >= 4 ?
1404 retype(get_nir_src(instr->src[3]), base_type) :
1408 /* Emit an image load, store or atomic op. */
1409 if (instr->intrinsic == nir_intrinsic_image_load)
1410 tmp = emit_image_load(bld, image, addr, surf_dims, arr_dims, format);
1412 else if (instr->intrinsic == nir_intrinsic_image_store)
1413 emit_image_store(bld, image, addr, src0, surf_dims, arr_dims, format);
1416 tmp = emit_image_atomic(bld, image, addr, src0, src1,
1417 surf_dims, arr_dims, info->dest_components,
1418 get_image_atomic_op(instr->intrinsic, type));
1420 /* Assign the result. */
1421 for (unsigned c = 0; c < info->dest_components; ++c)
1422 bld.MOV(offset(retype(dest, base_type), bld, c),
1423 offset(tmp, bld, c));
1427 case nir_intrinsic_memory_barrier: {
1428 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 16 / dispatch_width);
1429 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
1434 case nir_intrinsic_image_size: {
1435 /* Get the referenced image variable and type. */
1436 const nir_variable *var = instr->variables[0]->var;
1437 const glsl_type *type = var->type->without_array();
1439 /* Get the size of the image. */
1440 const fs_reg image = get_nir_image_deref(instr->variables[0]);
1441 const fs_reg size = offset(image, bld, BRW_IMAGE_PARAM_SIZE_OFFSET);
1443 /* For 1DArray image types, the array index is stored in the Z component.
1444 * Fix this by swizzling the Z component to the Y component.
1446 const bool is_1d_array_image =
1447 type->sampler_dimensionality == GLSL_SAMPLER_DIM_1D &&
1448 type->sampler_array;
1450 /* For CubeArray images, we should count the number of cubes instead
1451 * of the number of faces. Fix it by dividing the (Z component) by 6.
1453 const bool is_cube_array_image =
1454 type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
1455 type->sampler_array;
1457 /* Copy all the components. */
1458 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
1459 for (unsigned c = 0; c < info->dest_components; ++c) {
1460 if ((int)c >= type->coordinate_components()) {
1461 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
1463 } else if (c == 1 && is_1d_array_image) {
1464 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
1465 offset(size, bld, 2));
1466 } else if (c == 2 && is_cube_array_image) {
1467 bld.emit(SHADER_OPCODE_INT_QUOTIENT,
1468 offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
1469 offset(size, bld, c), fs_reg(6));
1471 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
1472 offset(size, bld, c));
1479 case nir_intrinsic_load_front_face:
1480 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
1481 *emit_frontfacing_interpolation());
1484 case nir_intrinsic_load_vertex_id:
1485 unreachable("should be lowered by lower_vertex_id()");
1487 case nir_intrinsic_load_vertex_id_zero_base: {
1488 fs_reg vertex_id = nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
1489 assert(vertex_id.file != BAD_FILE);
1490 dest.type = vertex_id.type;
1491 bld.MOV(dest, vertex_id);
1495 case nir_intrinsic_load_base_vertex: {
1496 fs_reg base_vertex = nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
1497 assert(base_vertex.file != BAD_FILE);
1498 dest.type = base_vertex.type;
1499 bld.MOV(dest, base_vertex);
1503 case nir_intrinsic_load_instance_id: {
1504 fs_reg instance_id = nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
1505 assert(instance_id.file != BAD_FILE);
1506 dest.type = instance_id.type;
1507 bld.MOV(dest, instance_id);
1511 case nir_intrinsic_load_sample_mask_in: {
1512 fs_reg sample_mask_in = nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
1513 assert(sample_mask_in.file != BAD_FILE);
1514 dest.type = sample_mask_in.type;
1515 bld.MOV(dest, sample_mask_in);
1519 case nir_intrinsic_load_sample_pos: {
1520 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
1521 assert(sample_pos.file != BAD_FILE);
1522 dest.type = sample_pos.type;
1523 bld.MOV(dest, sample_pos);
1524 bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
1528 case nir_intrinsic_load_sample_id: {
1529 fs_reg sample_id = nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
1530 assert(sample_id.file != BAD_FILE);
1531 dest.type = sample_id.type;
1532 bld.MOV(dest, sample_id);
1536 case nir_intrinsic_load_uniform_indirect:
1537 has_indirect = true;
1539 case nir_intrinsic_load_uniform: {
1540 unsigned index = instr->const_index[0];
1543 if (index < num_direct_uniforms) {
1544 uniform_reg = fs_reg(UNIFORM, 0);
1546 uniform_reg = fs_reg(UNIFORM, num_direct_uniforms);
1547 index -= num_direct_uniforms;
1550 for (unsigned j = 0; j < instr->num_components; j++) {
1551 fs_reg src = offset(retype(uniform_reg, dest.type), bld, index);
1553 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
1557 dest = offset(dest, bld, 1);
1562 case nir_intrinsic_load_ubo_indirect:
1563 has_indirect = true;
1565 case nir_intrinsic_load_ubo: {
1566 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
1570 surf_index = fs_reg(stage_prog_data->binding_table.ubo_start +
1573 /* The block index is not a constant. Evaluate the index expression
1574 * per-channel and add the base UBO index; we have to select a value
1575 * from any live channel.
1577 surf_index = vgrf(glsl_type::uint_type);
1578 bld.ADD(surf_index, get_nir_src(instr->src[0]),
1579 fs_reg(stage_prog_data->binding_table.ubo_start));
1580 surf_index = bld.emit_uniformize(surf_index);
1582 /* Assume this may touch any UBO. It would be nice to provide
1583 * a tighter bound, but the array information is already lowered away.
1585 brw_mark_surface_used(prog_data,
1586 stage_prog_data->binding_table.ubo_start +
1587 shader_prog->NumUniformBlocks - 1);
1591 fs_reg base_offset = retype(get_nir_src(instr->src[1]),
1592 BRW_REGISTER_TYPE_D);
1594 unsigned vec4_offset = instr->const_index[0];
1595 for (int i = 0; i < instr->num_components; i++)
1596 VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
1597 base_offset, vec4_offset + i * 4);
1599 fs_reg packed_consts = vgrf(glsl_type::float_type);
1600 packed_consts.type = dest.type;
1602 fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
1603 bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
1604 surf_index, const_offset_reg);
1606 for (unsigned i = 0; i < instr->num_components; i++) {
1607 packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i);
1609 /* The std140 packing rules don't allow vectors to cross 16-byte
1610 * boundaries, and a reg is 32 bytes.
1612 assert(packed_consts.subreg_offset < 32);
1614 bld.MOV(dest, packed_consts);
1615 dest = offset(dest, bld, 1);
1621 case nir_intrinsic_load_input_indirect:
1622 has_indirect = true;
1624 case nir_intrinsic_load_input: {
1626 for (unsigned j = 0; j < instr->num_components; j++) {
1627 fs_reg src = offset(retype(nir_inputs, dest.type), bld,
1628 instr->const_index[0] + index);
1630 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
1634 dest = offset(dest, bld, 1);
1639 /* Handle ARB_gpu_shader5 interpolation intrinsics
1641 * It's worth a quick word of explanation as to why we handle the full
1642 * variable-based interpolation intrinsic rather than a lowered version
1643 * with like we do for other inputs. We have to do that because the way
1644 * we set up inputs doesn't allow us to use the already setup inputs for
1645 * interpolation. At the beginning of the shader, we go through all of
1646 * the input variables and do the initial interpolation and put it in
1647 * the nir_inputs array based on its location as determined in
1648 * nir_lower_io. If the input isn't used, dead code cleans up and
1649 * everything works fine. However, when we get to the ARB_gpu_shader5
1650 * interpolation intrinsics, we need to reinterpolate the input
1651 * differently. If we used an intrinsic that just had an index it would
1652 * only give us the offset into the nir_inputs array. However, this is
1653 * useless because that value is post-interpolation and we need
1654 * pre-interpolation. In order to get the actual location of the bits
1655 * we get from the vertex fetching hardware, we need the variable.
1657 case nir_intrinsic_interp_var_at_centroid:
1658 case nir_intrinsic_interp_var_at_sample:
1659 case nir_intrinsic_interp_var_at_offset: {
1660 assert(stage == MESA_SHADER_FRAGMENT);
1662 ((struct brw_wm_prog_data *) prog_data)->pulls_bary = true;
1664 fs_reg dst_xy = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
1666 /* For most messages, we need one reg of ignored data; the hardware
1667 * requires mlen==1 even when there is no payload. in the per-slot
1668 * offset case, we'll replace this with the proper source data.
1670 fs_reg src = vgrf(glsl_type::float_type);
1671 int mlen = 1; /* one reg unless overriden */
1674 switch (instr->intrinsic) {
1675 case nir_intrinsic_interp_var_at_centroid:
1676 inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_CENTROID,
1677 dst_xy, src, fs_reg(0u));
1680 case nir_intrinsic_interp_var_at_sample: {
1681 /* XXX: We should probably handle non-constant sample id's */
1682 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
1683 assert(const_sample);
1684 unsigned msg_data = const_sample ? const_sample->i[0] << 4 : 0;
1685 inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE, dst_xy, src,
1690 case nir_intrinsic_interp_var_at_offset: {
1691 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
1694 unsigned off_x = MIN2((int)(const_offset->f[0] * 16), 7) & 0xf;
1695 unsigned off_y = MIN2((int)(const_offset->f[1] * 16), 7) & 0xf;
1697 inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, dst_xy, src,
1698 fs_reg(off_x | (off_y << 4)));
1700 src = vgrf(glsl_type::ivec2_type);
1701 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
1702 BRW_REGISTER_TYPE_F);
1703 for (int i = 0; i < 2; i++) {
1704 fs_reg temp = vgrf(glsl_type::float_type);
1705 bld.MUL(temp, offset(offset_src, bld, i), fs_reg(16.0f));
1706 fs_reg itemp = vgrf(glsl_type::int_type);
1707 bld.MOV(itemp, temp); /* float to int */
1709 /* Clamp the upper end of the range to +7/16.
1710 * ARB_gpu_shader5 requires that we support a maximum offset
1711 * of +0.5, which isn't representable in a S0.4 value -- if
1712 * we didn't clamp it, we'd end up with -8/16, which is the
1713 * opposite of what the shader author wanted.
1715 * This is legal due to ARB_gpu_shader5's quantization
1718 * "Not all values of <offset> may be supported; x and y
1719 * offsets may be rounded to fixed-point values with the
1720 * number of fraction bits given by the
1721 * implementation-dependent constant
1722 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1724 set_condmod(BRW_CONDITIONAL_L,
1725 bld.SEL(offset(src, bld, i), itemp, fs_reg(7)));
1728 mlen = 2 * dispatch_width / 8;
1729 inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET, dst_xy, src,
1736 unreachable("Invalid intrinsic");
1740 /* 2 floats per slot returned */
1741 inst->regs_written = 2 * dispatch_width / 8;
1742 inst->pi_noperspective = instr->variables[0]->var->data.interpolation ==
1743 INTERP_QUALIFIER_NOPERSPECTIVE;
1745 for (unsigned j = 0; j < instr->num_components; j++) {
1746 fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
1747 src.type = dest.type;
1749 bld.emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
1750 dest = offset(dest, bld, 1);
1755 case nir_intrinsic_store_output_indirect:
1756 has_indirect = true;
1758 case nir_intrinsic_store_output: {
1759 fs_reg src = get_nir_src(instr->src[0]);
1761 for (unsigned j = 0; j < instr->num_components; j++) {
1762 fs_reg new_dest = offset(retype(nir_outputs, src.type), bld,
1763 instr->const_index[0] + index);
1765 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
1767 bld.MOV(new_dest, src);
1768 src = offset(src, bld, 1);
1773 case nir_intrinsic_barrier:
1778 unreachable("unknown intrinsic");
1783 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
1785 unsigned sampler = instr->sampler_index;
1786 fs_reg sampler_reg(sampler);
1788 /* FINISHME: We're failing to recompile our programs when the sampler is
1789 * updated. This only matters for the texture rectangle scale parameters
1790 * (pre-gen6, or gen6+ with GL_CLAMP).
1792 int texunit = prog->SamplerUnits[sampler];
1794 int gather_component = instr->component;
1796 bool is_rect = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
1798 bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
1801 int lod_components = 0;
1802 int UNUSED offset_components = 0;
1804 fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, tex_offset;
1806 for (unsigned i = 0; i < instr->num_srcs; i++) {
1807 fs_reg src = get_nir_src(instr->src[i].src);
1808 switch (instr->src[i].src_type) {
1809 case nir_tex_src_bias:
1810 lod = retype(src, BRW_REGISTER_TYPE_F);
1812 case nir_tex_src_comparitor:
1813 shadow_comparitor = retype(src, BRW_REGISTER_TYPE_F);
1815 case nir_tex_src_coord:
1816 switch (instr->op) {
1818 case nir_texop_txf_ms:
1819 coordinate = retype(src, BRW_REGISTER_TYPE_D);
1822 coordinate = retype(src, BRW_REGISTER_TYPE_F);
1826 case nir_tex_src_ddx:
1827 lod = retype(src, BRW_REGISTER_TYPE_F);
1828 lod_components = nir_tex_instr_src_size(instr, i);
1830 case nir_tex_src_ddy:
1831 lod2 = retype(src, BRW_REGISTER_TYPE_F);
1833 case nir_tex_src_lod:
1834 switch (instr->op) {
1836 lod = retype(src, BRW_REGISTER_TYPE_UD);
1839 lod = retype(src, BRW_REGISTER_TYPE_D);
1842 lod = retype(src, BRW_REGISTER_TYPE_F);
1846 case nir_tex_src_ms_index:
1847 sample_index = retype(src, BRW_REGISTER_TYPE_UD);
1849 case nir_tex_src_offset:
1850 tex_offset = retype(src, BRW_REGISTER_TYPE_D);
1851 if (instr->is_array)
1852 offset_components = instr->coord_components - 1;
1854 offset_components = instr->coord_components;
1856 case nir_tex_src_projector:
1857 unreachable("should be lowered");
1859 case nir_tex_src_sampler_offset: {
1860 /* Figure out the highest possible sampler index and mark it as used */
1861 uint32_t max_used = sampler + instr->sampler_array_size - 1;
1862 if (instr->op == nir_texop_tg4 && devinfo->gen < 8) {
1863 max_used += stage_prog_data->binding_table.gather_texture_start;
1865 max_used += stage_prog_data->binding_table.texture_start;
1867 brw_mark_surface_used(prog_data, max_used);
1869 /* Emit code to evaluate the actual indexing expression */
1870 sampler_reg = vgrf(glsl_type::uint_type);
1871 bld.ADD(sampler_reg, src, fs_reg(sampler));
1872 sampler_reg = bld.emit_uniformize(sampler_reg);
1877 unreachable("unknown texture source");
1881 if (instr->op == nir_texop_txf_ms) {
1882 if (devinfo->gen >= 7 &&
1883 key_tex->compressed_multisample_layout_mask & (1 << sampler)) {
1884 mcs = emit_mcs_fetch(coordinate, instr->coord_components, sampler_reg);
1890 for (unsigned i = 0; i < 3; i++) {
1891 if (instr->const_offset[i] != 0) {
1892 assert(offset_components == 0);
1893 tex_offset = fs_reg(brw_texture_offset(instr->const_offset, 3));
1898 enum glsl_base_type dest_base_type =
1899 brw_glsl_base_type_for_nir_type (instr->dest_type);
1901 const glsl_type *dest_type =
1902 glsl_type::get_instance(dest_base_type, nir_tex_instr_dest_size(instr),
1905 ir_texture_opcode op;
1906 switch (instr->op) {
1907 case nir_texop_lod: op = ir_lod; break;
1908 case nir_texop_query_levels: op = ir_query_levels; break;
1909 case nir_texop_tex: op = ir_tex; break;
1910 case nir_texop_tg4: op = ir_tg4; break;
1911 case nir_texop_txb: op = ir_txb; break;
1912 case nir_texop_txd: op = ir_txd; break;
1913 case nir_texop_txf: op = ir_txf; break;
1914 case nir_texop_txf_ms: op = ir_txf_ms; break;
1915 case nir_texop_txl: op = ir_txl; break;
1916 case nir_texop_txs: op = ir_txs; break;
1918 unreachable("unknown texture opcode");
1921 emit_texture(op, dest_type, coordinate, instr->coord_components,
1922 shadow_comparitor, lod, lod2, lod_components, sample_index,
1923 tex_offset, mcs, gather_component,
1924 is_cube_array, is_rect, sampler, sampler_reg, texunit);
1926 fs_reg dest = get_nir_dest(instr->dest);
1927 dest.type = this->result.type;
1928 unsigned num_components = nir_tex_instr_dest_size(instr);
1929 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
1930 dest, this->result),
1931 (1 << num_components) - 1);
1935 fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
1937 switch (instr->type) {
1938 case nir_jump_break:
1939 bld.emit(BRW_OPCODE_BREAK);
1941 case nir_jump_continue:
1942 bld.emit(BRW_OPCODE_CONTINUE);
1944 case nir_jump_return:
1946 unreachable("unknown jump");