2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "glsl/ir_optimization.h"
26 #include "glsl/nir/glsl_to_nir.h"
27 #include "main/shaderimage.h"
28 #include "program/prog_to_nir.h"
30 #include "brw_fs_surface_builder.h"
32 #include "brw_fs_surface_builder.h"
35 using namespace brw::surface_access;
38 fs_visitor::emit_nir_code()
40 nir_shader *nir = prog->nir;
42 /* emit the arrays used for inputs and outputs - load/store intrinsics will
43 * be converted to reads/writes of these arrays
45 nir_setup_inputs(nir);
46 nir_setup_outputs(nir);
47 nir_setup_uniforms(nir);
48 nir_emit_system_values(nir);
50 /* get the main function and emit it */
51 nir_foreach_overload(nir, overload) {
52 assert(strcmp(overload->function->name, "main") == 0);
53 assert(overload->impl);
54 nir_emit_impl(overload->impl);
59 fs_visitor::nir_setup_inputs(nir_shader *shader)
61 nir_inputs = bld.vgrf(BRW_REGISTER_TYPE_F, shader->num_inputs);
63 foreach_list_typed(nir_variable, var, node, &shader->inputs) {
64 enum brw_reg_type type = brw_type_for_base_type(var->type);
65 fs_reg input = offset(nir_inputs, bld, var->data.driver_location);
69 case MESA_SHADER_VERTEX: {
70 /* Our ATTR file is indexed by VERT_ATTRIB_*, which is the value
71 * stored in nir_variable::location.
73 * However, NIR's load_input intrinsics use a different index - an
74 * offset into a single contiguous array containing all inputs.
75 * This index corresponds to the nir_variable::driver_location field.
77 * So, we need to copy from fs_reg(ATTR, var->location) to
78 * offset(nir_inputs, var->data.driver_location).
80 const glsl_type *const t = var->type->without_array();
81 const unsigned components = t->components();
82 const unsigned cols = t->matrix_columns;
83 const unsigned elts = t->vector_elements;
84 unsigned array_length = var->type->is_array() ? var->type->length : 1;
85 for (unsigned i = 0; i < array_length; i++) {
86 for (unsigned j = 0; j < cols; j++) {
87 for (unsigned k = 0; k < elts; k++) {
88 bld.MOV(offset(retype(input, type), bld,
89 components * i + elts * j + k),
90 offset(fs_reg(ATTR, var->data.location + i, type),
97 case MESA_SHADER_GEOMETRY:
98 case MESA_SHADER_COMPUTE:
99 case MESA_SHADER_TESS_CTRL:
100 case MESA_SHADER_TESS_EVAL:
101 unreachable("fs_visitor not used for these stages yet.");
103 case MESA_SHADER_FRAGMENT:
104 if (var->data.location == VARYING_SLOT_POS) {
105 reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
106 var->data.origin_upper_left);
107 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
110 emit_general_interpolation(input, var->name, var->type,
111 (glsl_interp_qualifier) var->data.interpolation,
112 var->data.location, var->data.centroid,
121 fs_visitor::nir_setup_outputs(nir_shader *shader)
123 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
125 nir_outputs = bld.vgrf(BRW_REGISTER_TYPE_F, shader->num_outputs);
127 foreach_list_typed(nir_variable, var, node, &shader->outputs) {
128 fs_reg reg = offset(nir_outputs, bld, var->data.driver_location);
130 int vector_elements =
131 var->type->is_array() ? var->type->fields.array->vector_elements
132 : var->type->vector_elements;
135 case MESA_SHADER_VERTEX:
136 for (unsigned int i = 0; i < ALIGN(type_size_scalar(var->type), 4) / 4; i++) {
137 int output = var->data.location + i;
138 this->outputs[output] = offset(reg, bld, 4 * i);
139 this->output_components[output] = vector_elements;
142 case MESA_SHADER_FRAGMENT:
143 if (var->data.index > 0) {
144 assert(var->data.location == FRAG_RESULT_DATA0);
145 assert(var->data.index == 1);
146 this->dual_src_output = reg;
147 this->do_dual_src = true;
148 } else if (var->data.location == FRAG_RESULT_COLOR) {
149 /* Writing gl_FragColor outputs to all color regions. */
150 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
151 this->outputs[i] = reg;
152 this->output_components[i] = 4;
154 } else if (var->data.location == FRAG_RESULT_DEPTH) {
155 this->frag_depth = reg;
156 } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
157 this->sample_mask = reg;
159 /* gl_FragData or a user-defined FS output */
160 assert(var->data.location >= FRAG_RESULT_DATA0 &&
161 var->data.location < FRAG_RESULT_DATA0+BRW_MAX_DRAW_BUFFERS);
163 /* General color output. */
164 for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
165 int output = var->data.location - FRAG_RESULT_DATA0 + i;
166 this->outputs[output] = offset(reg, bld, vector_elements * i);
167 this->output_components[output] = vector_elements;
172 unreachable("unhandled shader stage");
178 fs_visitor::nir_setup_uniforms(nir_shader *shader)
180 if (dispatch_width != 8)
183 uniforms = shader->num_uniforms;
186 foreach_list_typed(nir_variable, var, node, &shader->uniforms) {
187 /* UBO's and atomics don't take up space in the uniform file */
188 if (var->interface_type != NULL || var->type->contains_atomic())
191 if (strncmp(var->name, "gl_", 3) == 0)
192 nir_setup_builtin_uniform(var);
194 nir_setup_uniform(var);
195 if(type_size_scalar(var->type) > 0)
196 param_size[var->data.driver_location] = type_size_scalar(var->type);
199 /* prog_to_nir only creates a single giant uniform variable so we can
200 * just set param up directly. */
201 for (unsigned p = 0; p < prog->Parameters->NumParameters; p++) {
202 for (unsigned int i = 0; i < 4; i++) {
203 stage_prog_data->param[4 * p + i] =
204 &prog->Parameters->ParameterValues[p][i];
207 if(prog->Parameters->NumParameters > 0)
208 param_size[0] = prog->Parameters->NumParameters * 4;
213 fs_visitor::nir_setup_uniform(nir_variable *var)
215 int namelen = strlen(var->name);
217 /* The data for our (non-builtin) uniforms is stored in a series of
218 * gl_uniform_driver_storage structs for each subcomponent that
219 * glGetUniformLocation() could name. We know it's been set up in the
220 * same order we'd walk the type, so walk the list of storage and find
221 * anything with our name, or the prefix of a component that starts with
224 unsigned index = var->data.driver_location;
225 for (unsigned u = 0; u < shader_prog->NumUniformStorage; u++) {
226 struct gl_uniform_storage *storage = &shader_prog->UniformStorage[u];
228 if (storage->builtin)
231 if (strncmp(var->name, storage->name, namelen) != 0 ||
232 (storage->name[namelen] != 0 &&
233 storage->name[namelen] != '.' &&
234 storage->name[namelen] != '[')) {
238 if (storage->type->is_image()) {
239 setup_image_uniform_values(index, storage);
241 unsigned slots = storage->type->component_slots();
242 if (storage->array_elements)
243 slots *= storage->array_elements;
245 for (unsigned i = 0; i < slots; i++) {
246 stage_prog_data->param[index++] = &storage->storage[i];
253 fs_visitor::nir_setup_builtin_uniform(nir_variable *var)
255 const nir_state_slot *const slots = var->state_slots;
256 assert(var->state_slots != NULL);
258 unsigned uniform_index = var->data.driver_location;
259 for (unsigned int i = 0; i < var->num_state_slots; i++) {
260 /* This state reference has already been setup by ir_to_mesa, but we'll
261 * get the same index back here.
263 int index = _mesa_add_state_reference(this->prog->Parameters,
264 (gl_state_index *)slots[i].tokens);
266 /* Add each of the unique swizzles of the element as a parameter.
267 * This'll end up matching the expected layout of the
268 * array/matrix/structure we're trying to fill in.
271 for (unsigned int j = 0; j < 4; j++) {
272 int swiz = GET_SWZ(slots[i].swizzle, j);
273 if (swiz == last_swiz)
277 stage_prog_data->param[uniform_index++] =
278 &prog->Parameters->ParameterValues[index][swiz];
284 emit_system_values_block(nir_block *block, void *void_visitor)
286 fs_visitor *v = (fs_visitor *)void_visitor;
289 nir_foreach_instr(block, instr) {
290 if (instr->type != nir_instr_type_intrinsic)
293 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
294 switch (intrin->intrinsic) {
295 case nir_intrinsic_load_vertex_id:
296 unreachable("should be lowered by lower_vertex_id().");
298 case nir_intrinsic_load_vertex_id_zero_base:
299 assert(v->stage == MESA_SHADER_VERTEX);
300 reg = &v->nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
301 if (reg->file == BAD_FILE)
302 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
305 case nir_intrinsic_load_base_vertex:
306 assert(v->stage == MESA_SHADER_VERTEX);
307 reg = &v->nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
308 if (reg->file == BAD_FILE)
309 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX);
312 case nir_intrinsic_load_instance_id:
313 assert(v->stage == MESA_SHADER_VERTEX);
314 reg = &v->nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
315 if (reg->file == BAD_FILE)
316 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID);
319 case nir_intrinsic_load_sample_pos:
320 assert(v->stage == MESA_SHADER_FRAGMENT);
321 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
322 if (reg->file == BAD_FILE)
323 *reg = *v->emit_samplepos_setup();
326 case nir_intrinsic_load_sample_id:
327 assert(v->stage == MESA_SHADER_FRAGMENT);
328 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
329 if (reg->file == BAD_FILE)
330 *reg = *v->emit_sampleid_setup();
333 case nir_intrinsic_load_sample_mask_in:
334 assert(v->stage == MESA_SHADER_FRAGMENT);
335 assert(v->devinfo->gen >= 7);
336 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
337 if (reg->file == BAD_FILE)
338 *reg = fs_reg(retype(brw_vec8_grf(v->payload.sample_mask_in_reg, 0),
339 BRW_REGISTER_TYPE_D));
342 case nir_intrinsic_load_local_invocation_id:
343 assert(v->stage == MESA_SHADER_COMPUTE);
344 reg = &v->nir_system_values[SYSTEM_VALUE_LOCAL_INVOCATION_ID];
345 if (reg->file == BAD_FILE)
346 *reg = *v->emit_cs_local_invocation_id_setup();
349 case nir_intrinsic_load_work_group_id:
350 assert(v->stage == MESA_SHADER_COMPUTE);
351 reg = &v->nir_system_values[SYSTEM_VALUE_WORK_GROUP_ID];
352 if (reg->file == BAD_FILE)
353 *reg = *v->emit_cs_work_group_id_setup();
365 fs_visitor::nir_emit_system_values(nir_shader *shader)
367 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
368 nir_foreach_overload(shader, overload) {
369 assert(strcmp(overload->function->name, "main") == 0);
370 assert(overload->impl);
371 nir_foreach_block(overload->impl, emit_system_values_block, this);
376 fs_visitor::nir_emit_impl(nir_function_impl *impl)
378 nir_locals = reralloc(mem_ctx, nir_locals, fs_reg, impl->reg_alloc);
379 foreach_list_typed(nir_register, reg, node, &impl->registers) {
380 unsigned array_elems =
381 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
382 unsigned size = array_elems * reg->num_components;
383 nir_locals[reg->index] = bld.vgrf(BRW_REGISTER_TYPE_F, size);
386 nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
389 nir_emit_cf_list(&impl->body);
393 fs_visitor::nir_emit_cf_list(exec_list *list)
395 exec_list_validate(list);
396 foreach_list_typed(nir_cf_node, node, node, list) {
397 switch (node->type) {
399 nir_emit_if(nir_cf_node_as_if(node));
402 case nir_cf_node_loop:
403 nir_emit_loop(nir_cf_node_as_loop(node));
406 case nir_cf_node_block:
407 nir_emit_block(nir_cf_node_as_block(node));
411 unreachable("Invalid CFG node block");
417 fs_visitor::nir_emit_if(nir_if *if_stmt)
419 /* first, put the condition into f0 */
420 fs_inst *inst = bld.MOV(bld.null_reg_d(),
421 retype(get_nir_src(if_stmt->condition),
422 BRW_REGISTER_TYPE_D));
423 inst->conditional_mod = BRW_CONDITIONAL_NZ;
425 bld.IF(BRW_PREDICATE_NORMAL);
427 nir_emit_cf_list(&if_stmt->then_list);
429 /* note: if the else is empty, dead CF elimination will remove it */
430 bld.emit(BRW_OPCODE_ELSE);
432 nir_emit_cf_list(&if_stmt->else_list);
434 bld.emit(BRW_OPCODE_ENDIF);
438 fs_visitor::nir_emit_loop(nir_loop *loop)
440 bld.emit(BRW_OPCODE_DO);
442 nir_emit_cf_list(&loop->body);
444 bld.emit(BRW_OPCODE_WHILE);
448 fs_visitor::nir_emit_block(nir_block *block)
450 nir_foreach_instr(block, instr) {
451 nir_emit_instr(instr);
456 fs_visitor::nir_emit_instr(nir_instr *instr)
458 const fs_builder abld = bld.annotate(NULL, instr);
460 switch (instr->type) {
461 case nir_instr_type_alu:
462 nir_emit_alu(abld, nir_instr_as_alu(instr));
465 case nir_instr_type_intrinsic:
466 nir_emit_intrinsic(abld, nir_instr_as_intrinsic(instr));
469 case nir_instr_type_tex:
470 nir_emit_texture(abld, nir_instr_as_tex(instr));
473 case nir_instr_type_load_const:
474 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
477 case nir_instr_type_ssa_undef:
478 nir_emit_undef(abld, nir_instr_as_ssa_undef(instr));
481 case nir_instr_type_jump:
482 nir_emit_jump(abld, nir_instr_as_jump(instr));
486 unreachable("unknown instruction type");
491 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
492 const fs_reg &result)
494 if (!instr->src[0].src.is_ssa ||
495 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_intrinsic)
498 nir_intrinsic_instr *src0 =
499 nir_instr_as_intrinsic(instr->src[0].src.ssa->parent_instr);
501 if (src0->intrinsic != nir_intrinsic_load_front_face)
504 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
505 if (!value1 || fabsf(value1->f[0]) != 1.0f)
508 nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
509 if (!value2 || fabsf(value2->f[0]) != 1.0f)
512 fs_reg tmp = vgrf(glsl_type::int_type);
514 if (devinfo->gen >= 6) {
515 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
516 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
518 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
520 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
521 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
523 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
525 * This negation looks like it's safe in practice, because bits 0:4 will
526 * surely be TRIANGLES
529 if (value1->f[0] == -1.0f) {
533 tmp.type = BRW_REGISTER_TYPE_W;
534 tmp.subreg_offset = 2;
537 fs_inst *or_inst = bld.OR(tmp, g0, fs_reg(0x3f80));
538 or_inst->src[1].type = BRW_REGISTER_TYPE_UW;
540 tmp.type = BRW_REGISTER_TYPE_D;
541 tmp.subreg_offset = 0;
544 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
545 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
547 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
549 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
550 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
552 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
554 * This negation looks like it's safe in practice, because bits 0:4 will
555 * surely be TRIANGLES
558 if (value1->f[0] == -1.0f) {
562 bld.OR(tmp, g1_6, fs_reg(0x3f800000));
564 bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, fs_reg(0xbf800000));
570 fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
572 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
575 fs_reg result = get_nir_dest(instr->dest.dest);
576 result.type = brw_type_for_nir_type(nir_op_infos[instr->op].output_type);
579 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
580 op[i] = get_nir_src(instr->src[i].src);
581 op[i].type = brw_type_for_nir_type(nir_op_infos[instr->op].input_types[i]);
582 op[i].abs = instr->src[i].abs;
583 op[i].negate = instr->src[i].negate;
586 /* We get a bunch of mov's out of the from_ssa pass and they may still
587 * be vectorized. We'll handle them as a special-case. We'll also
588 * handle vecN here because it's basically the same thing.
596 fs_reg temp = result;
597 bool need_extra_copy = false;
598 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
599 if (!instr->src[i].src.is_ssa &&
600 instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
601 need_extra_copy = true;
602 temp = bld.vgrf(result.type, 4);
607 for (unsigned i = 0; i < 4; i++) {
608 if (!(instr->dest.write_mask & (1 << i)))
611 if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
612 inst = bld.MOV(offset(temp, bld, i),
613 offset(op[0], bld, instr->src[0].swizzle[i]));
615 inst = bld.MOV(offset(temp, bld, i),
616 offset(op[i], bld, instr->src[i].swizzle[0]));
618 inst->saturate = instr->dest.saturate;
621 /* In this case the source and destination registers were the same,
622 * so we need to insert an extra set of moves in order to deal with
625 if (need_extra_copy) {
626 for (unsigned i = 0; i < 4; i++) {
627 if (!(instr->dest.write_mask & (1 << i)))
630 bld.MOV(offset(result, bld, i), offset(temp, bld, i));
639 /* At this point, we have dealt with any instruction that operates on
640 * more than a single channel. Therefore, we can just adjust the source
641 * and destination registers for that channel and emit the instruction.
643 unsigned channel = 0;
644 if (nir_op_infos[instr->op].output_size == 0) {
645 /* Since NIR is doing the scalarizing for us, we should only ever see
646 * vectorized operations with a single channel.
648 assert(_mesa_bitcount(instr->dest.write_mask) == 1);
649 channel = ffs(instr->dest.write_mask) - 1;
651 result = offset(result, bld, channel);
654 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
655 assert(nir_op_infos[instr->op].input_sizes[i] < 2);
656 op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
662 inst = bld.MOV(result, op[0]);
663 inst->saturate = instr->dest.saturate;
668 bld.MOV(result, op[0]);
672 /* AND(val, 0x80000000) gives the sign bit.
674 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
677 bld.CMP(bld.null_reg_f(), op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
679 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
680 op[0].type = BRW_REGISTER_TYPE_UD;
681 result.type = BRW_REGISTER_TYPE_UD;
682 bld.AND(result_int, op[0], fs_reg(0x80000000u));
684 inst = bld.OR(result_int, result_int, fs_reg(0x3f800000u));
685 inst->predicate = BRW_PREDICATE_NORMAL;
686 if (instr->dest.saturate) {
687 inst = bld.MOV(result, result);
688 inst->saturate = true;
694 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
695 * -> non-negative val generates 0x00000000.
696 * Predicated OR sets 1 if val is positive.
698 bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_G);
699 bld.ASR(result, op[0], fs_reg(31));
700 inst = bld.OR(result, result, fs_reg(1));
701 inst->predicate = BRW_PREDICATE_NORMAL;
705 inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
706 inst->saturate = instr->dest.saturate;
710 inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
711 inst->saturate = instr->dest.saturate;
715 inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
716 inst->saturate = instr->dest.saturate;
720 inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
721 inst->saturate = instr->dest.saturate;
725 inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
726 inst->saturate = instr->dest.saturate;
730 if (fs_key->high_quality_derivatives) {
731 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
733 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
735 inst->saturate = instr->dest.saturate;
737 case nir_op_fddx_fine:
738 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
739 inst->saturate = instr->dest.saturate;
741 case nir_op_fddx_coarse:
742 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
743 inst->saturate = instr->dest.saturate;
746 if (fs_key->high_quality_derivatives) {
747 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
748 fs_reg(fs_key->render_to_fbo));
750 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
751 fs_reg(fs_key->render_to_fbo));
753 inst->saturate = instr->dest.saturate;
755 case nir_op_fddy_fine:
756 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
757 fs_reg(fs_key->render_to_fbo));
758 inst->saturate = instr->dest.saturate;
760 case nir_op_fddy_coarse:
761 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
762 fs_reg(fs_key->render_to_fbo));
763 inst->saturate = instr->dest.saturate;
768 inst = bld.ADD(result, op[0], op[1]);
769 inst->saturate = instr->dest.saturate;
773 inst = bld.MUL(result, op[0], op[1]);
774 inst->saturate = instr->dest.saturate;
778 bld.MUL(result, op[0], op[1]);
781 case nir_op_imul_high:
782 case nir_op_umul_high:
783 bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
788 bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
791 case nir_op_uadd_carry:
792 unreachable("Should have been lowered by carry_to_arith().");
794 case nir_op_usub_borrow:
795 unreachable("Should have been lowered by borrow_to_arith().");
798 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
804 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_L);
810 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_GE);
815 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_Z);
820 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ);
824 if (devinfo->gen >= 8) {
825 op[0] = resolve_source_modifiers(op[0]);
827 bld.NOT(result, op[0]);
830 if (devinfo->gen >= 8) {
831 op[0] = resolve_source_modifiers(op[0]);
832 op[1] = resolve_source_modifiers(op[1]);
834 bld.XOR(result, op[0], op[1]);
837 if (devinfo->gen >= 8) {
838 op[0] = resolve_source_modifiers(op[0]);
839 op[1] = resolve_source_modifiers(op[1]);
841 bld.OR(result, op[0], op[1]);
844 if (devinfo->gen >= 8) {
845 op[0] = resolve_source_modifiers(op[0]);
846 op[1] = resolve_source_modifiers(op[1]);
848 bld.AND(result, op[0], op[1]);
860 case nir_op_ball_fequal2:
861 case nir_op_ball_iequal2:
862 case nir_op_ball_fequal3:
863 case nir_op_ball_iequal3:
864 case nir_op_ball_fequal4:
865 case nir_op_ball_iequal4:
866 case nir_op_bany_fnequal2:
867 case nir_op_bany_inequal2:
868 case nir_op_bany_fnequal3:
869 case nir_op_bany_inequal3:
870 case nir_op_bany_fnequal4:
871 case nir_op_bany_inequal4:
872 unreachable("Lowered by nir_lower_alu_reductions");
874 case nir_op_fnoise1_1:
875 case nir_op_fnoise1_2:
876 case nir_op_fnoise1_3:
877 case nir_op_fnoise1_4:
878 case nir_op_fnoise2_1:
879 case nir_op_fnoise2_2:
880 case nir_op_fnoise2_3:
881 case nir_op_fnoise2_4:
882 case nir_op_fnoise3_1:
883 case nir_op_fnoise3_2:
884 case nir_op_fnoise3_3:
885 case nir_op_fnoise3_4:
886 case nir_op_fnoise4_1:
887 case nir_op_fnoise4_2:
888 case nir_op_fnoise4_3:
889 case nir_op_fnoise4_4:
890 unreachable("not reached: should be handled by lower_noise");
893 unreachable("not reached: should be handled by ldexp_to_arith()");
896 inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
897 inst->saturate = instr->dest.saturate;
901 inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
902 inst->saturate = instr->dest.saturate;
907 bld.MOV(result, negate(op[0]));
911 bld.CMP(result, op[0], fs_reg(0.0f), BRW_CONDITIONAL_NZ);
914 bld.CMP(result, op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
918 inst = bld.RNDZ(result, op[0]);
919 inst->saturate = instr->dest.saturate;
923 op[0].negate = !op[0].negate;
924 fs_reg temp = vgrf(glsl_type::float_type);
925 bld.RNDD(temp, op[0]);
927 inst = bld.MOV(result, temp);
928 inst->saturate = instr->dest.saturate;
932 inst = bld.RNDD(result, op[0]);
933 inst->saturate = instr->dest.saturate;
936 inst = bld.FRC(result, op[0]);
937 inst->saturate = instr->dest.saturate;
939 case nir_op_fround_even:
940 inst = bld.RNDE(result, op[0]);
941 inst->saturate = instr->dest.saturate;
947 if (devinfo->gen >= 6) {
948 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
949 inst->conditional_mod = BRW_CONDITIONAL_L;
951 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_L);
952 inst = bld.SEL(result, op[0], op[1]);
953 inst->predicate = BRW_PREDICATE_NORMAL;
955 inst->saturate = instr->dest.saturate;
961 if (devinfo->gen >= 6) {
962 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
963 inst->conditional_mod = BRW_CONDITIONAL_GE;
965 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_GE);
966 inst = bld.SEL(result, op[0], op[1]);
967 inst->predicate = BRW_PREDICATE_NORMAL;
969 inst->saturate = instr->dest.saturate;
972 case nir_op_pack_snorm_2x16:
973 case nir_op_pack_snorm_4x8:
974 case nir_op_pack_unorm_2x16:
975 case nir_op_pack_unorm_4x8:
976 case nir_op_unpack_snorm_2x16:
977 case nir_op_unpack_snorm_4x8:
978 case nir_op_unpack_unorm_2x16:
979 case nir_op_unpack_unorm_4x8:
980 case nir_op_unpack_half_2x16:
981 case nir_op_pack_half_2x16:
982 unreachable("not reached: should be handled by lower_packing_builtins");
984 case nir_op_unpack_half_2x16_split_x:
985 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
986 inst->saturate = instr->dest.saturate;
988 case nir_op_unpack_half_2x16_split_y:
989 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
990 inst->saturate = instr->dest.saturate;
994 inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
995 inst->saturate = instr->dest.saturate;
998 case nir_op_bitfield_reverse:
999 bld.BFREV(result, op[0]);
1002 case nir_op_bit_count:
1003 bld.CBIT(result, op[0]);
1006 case nir_op_ufind_msb:
1007 case nir_op_ifind_msb: {
1008 bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
1010 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1011 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1012 * subtract the result from 31 to convert the MSB count into an LSB count.
1015 bld.CMP(bld.null_reg_d(), result, fs_reg(-1), BRW_CONDITIONAL_NZ);
1016 fs_reg neg_result(result);
1017 neg_result.negate = true;
1018 inst = bld.ADD(result, neg_result, fs_reg(31));
1019 inst->predicate = BRW_PREDICATE_NORMAL;
1023 case nir_op_find_lsb:
1024 bld.FBL(result, op[0]);
1027 case nir_op_ubitfield_extract:
1028 case nir_op_ibitfield_extract:
1029 bld.BFE(result, op[2], op[1], op[0]);
1032 bld.BFI1(result, op[0], op[1]);
1035 bld.BFI2(result, op[0], op[1], op[2]);
1038 case nir_op_bitfield_insert:
1039 unreachable("not reached: should be handled by "
1040 "lower_instructions::bitfield_insert_to_bfm_bfi");
1043 bld.SHL(result, op[0], op[1]);
1046 bld.ASR(result, op[0], op[1]);
1049 bld.SHR(result, op[0], op[1]);
1052 case nir_op_pack_half_2x16_split:
1053 bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
1057 inst = bld.MAD(result, op[2], op[1], op[0]);
1058 inst->saturate = instr->dest.saturate;
1062 inst = bld.LRP(result, op[0], op[1], op[2]);
1063 inst->saturate = instr->dest.saturate;
1067 if (optimize_frontfacing_ternary(instr, result))
1070 bld.CMP(bld.null_reg_d(), op[0], fs_reg(0), BRW_CONDITIONAL_NZ);
1071 inst = bld.SEL(result, op[1], op[2]);
1072 inst->predicate = BRW_PREDICATE_NORMAL;
1076 unreachable("unhandled instruction");
1079 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1080 * to sign extend the low bit to 0/~0
1082 if (devinfo->gen <= 5 &&
1083 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1084 fs_reg masked = vgrf(glsl_type::int_type);
1085 bld.AND(masked, result, fs_reg(1));
1086 masked.negate = true;
1087 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
1092 fs_visitor::nir_emit_load_const(const fs_builder &bld,
1093 nir_load_const_instr *instr)
1095 fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_D, instr->def.num_components);
1097 for (unsigned i = 0; i < instr->def.num_components; i++)
1098 bld.MOV(offset(reg, bld, i), fs_reg(instr->value.i[i]));
1100 nir_ssa_values[instr->def.index] = reg;
1104 fs_visitor::nir_emit_undef(const fs_builder &bld, nir_ssa_undef_instr *instr)
1106 nir_ssa_values[instr->def.index] = bld.vgrf(BRW_REGISTER_TYPE_D,
1107 instr->def.num_components);
1111 fs_reg_for_nir_reg(fs_visitor *v, nir_register *nir_reg,
1112 unsigned base_offset, nir_src *indirect)
1116 assert(!nir_reg->is_global);
1118 reg = v->nir_locals[nir_reg->index];
1120 reg = offset(reg, v->bld, base_offset * nir_reg->num_components);
1122 int multiplier = nir_reg->num_components * (v->dispatch_width / 8);
1124 reg.reladdr = new(v->mem_ctx) fs_reg(v->vgrf(glsl_type::int_type));
1125 v->bld.MUL(*reg.reladdr, v->get_nir_src(*indirect),
1126 fs_reg(multiplier));
1133 fs_visitor::get_nir_src(nir_src src)
1137 reg = nir_ssa_values[src.ssa->index];
1139 reg = fs_reg_for_nir_reg(this, src.reg.reg, src.reg.base_offset,
1143 /* to avoid floating-point denorm flushing problems, set the type by
1144 * default to D - instructions that need floating point semantics will set
1145 * this to F if they need to
1147 return retype(reg, BRW_REGISTER_TYPE_D);
1151 fs_visitor::get_nir_dest(nir_dest dest)
1154 nir_ssa_values[dest.ssa.index] = bld.vgrf(BRW_REGISTER_TYPE_F,
1155 dest.ssa.num_components);
1156 return nir_ssa_values[dest.ssa.index];
1159 return fs_reg_for_nir_reg(this, dest.reg.reg, dest.reg.base_offset,
1164 fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
1166 fs_reg image(UNIFORM, deref->var->data.driver_location,
1167 BRW_REGISTER_TYPE_UD);
1169 if (deref->deref.child) {
1170 const nir_deref_array *deref_array =
1171 nir_deref_as_array(deref->deref.child);
1172 assert(deref->deref.child->deref_type == nir_deref_type_array &&
1173 deref_array->deref.child == NULL);
1174 const unsigned size = glsl_get_length(deref->var->type);
1175 const unsigned base = MIN2(deref_array->base_offset, size - 1);
1177 image = offset(image, bld, base * BRW_IMAGE_PARAM_SIZE);
1179 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
1180 fs_reg *tmp = new(mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1182 if (devinfo->gen == 7 && !devinfo->is_haswell) {
1183 /* IVB hangs when trying to access an invalid surface index with
1184 * the dataport. According to the spec "if the index used to
1185 * select an individual element is negative or greater than or
1186 * equal to the size of the array, the results of the operation
1187 * are undefined but may not lead to termination" -- which is one
1188 * of the possible outcomes of the hang. Clamp the index to
1189 * prevent access outside of the array bounds.
1191 bld.emit_minmax(*tmp, retype(get_nir_src(deref_array->indirect),
1192 BRW_REGISTER_TYPE_UD),
1193 fs_reg(size - base - 1), BRW_CONDITIONAL_L);
1195 bld.MOV(*tmp, get_nir_src(deref_array->indirect));
1198 bld.MUL(*tmp, *tmp, fs_reg(BRW_IMAGE_PARAM_SIZE));
1199 image.reladdr = tmp;
1207 fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1210 for (unsigned i = 0; i < 4; i++) {
1211 if (!((wr_mask >> i) & 1))
1214 fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
1215 new_inst->dst = offset(new_inst->dst, bld, i);
1216 for (unsigned j = 0; j < new_inst->sources; j++)
1217 if (new_inst->src[j].file == GRF)
1218 new_inst->src[j] = offset(new_inst->src[j], bld, i);
1225 * Get the matching channel register datatype for an image intrinsic of the
1226 * specified GLSL image type.
1229 get_image_base_type(const glsl_type *type)
1231 switch ((glsl_base_type)type->sampler_type) {
1232 case GLSL_TYPE_UINT:
1233 return BRW_REGISTER_TYPE_UD;
1235 return BRW_REGISTER_TYPE_D;
1236 case GLSL_TYPE_FLOAT:
1237 return BRW_REGISTER_TYPE_F;
1239 unreachable("Not reached.");
1244 * Get the appropriate atomic op for an image atomic intrinsic.
1247 get_image_atomic_op(nir_intrinsic_op op, const glsl_type *type)
1250 case nir_intrinsic_image_atomic_add:
1252 case nir_intrinsic_image_atomic_min:
1253 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1254 BRW_AOP_IMIN : BRW_AOP_UMIN);
1255 case nir_intrinsic_image_atomic_max:
1256 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1257 BRW_AOP_IMAX : BRW_AOP_UMAX);
1258 case nir_intrinsic_image_atomic_and:
1260 case nir_intrinsic_image_atomic_or:
1262 case nir_intrinsic_image_atomic_xor:
1264 case nir_intrinsic_image_atomic_exchange:
1266 case nir_intrinsic_image_atomic_comp_swap:
1267 return BRW_AOP_CMPWR;
1269 unreachable("Not reachable.");
1274 fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
1277 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1278 dest = get_nir_dest(instr->dest);
1280 bool has_indirect = false;
1282 switch (instr->intrinsic) {
1283 case nir_intrinsic_discard:
1284 case nir_intrinsic_discard_if: {
1285 /* We track our discarded pixels in f0.1. By predicating on it, we can
1286 * update just the flag bits that aren't yet discarded. If there's no
1287 * condition, we emit a CMP of g0 != g0, so all currently executing
1288 * channels will get turned off.
1291 if (instr->intrinsic == nir_intrinsic_discard_if) {
1292 cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
1293 fs_reg(0), BRW_CONDITIONAL_Z);
1295 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
1296 BRW_REGISTER_TYPE_UW));
1297 cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
1299 cmp->predicate = BRW_PREDICATE_NORMAL;
1300 cmp->flag_subreg = 1;
1302 if (devinfo->gen >= 6) {
1303 emit_discard_jump();
1308 case nir_intrinsic_atomic_counter_inc:
1309 case nir_intrinsic_atomic_counter_dec:
1310 case nir_intrinsic_atomic_counter_read: {
1311 using namespace surface_access;
1313 /* Get the arguments of the atomic intrinsic. */
1314 const fs_reg offset = get_nir_src(instr->src[0]);
1315 const unsigned surface = (stage_prog_data->binding_table.abo_start +
1316 instr->const_index[0]);
1319 /* Emit a surface read or atomic op. */
1320 switch (instr->intrinsic) {
1321 case nir_intrinsic_atomic_counter_read:
1322 tmp = emit_untyped_read(bld, fs_reg(surface), offset, 1, 1);
1325 case nir_intrinsic_atomic_counter_inc:
1326 tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
1327 fs_reg(), 1, 1, BRW_AOP_INC);
1330 case nir_intrinsic_atomic_counter_dec:
1331 tmp = emit_untyped_atomic(bld, fs_reg(surface), offset, fs_reg(),
1332 fs_reg(), 1, 1, BRW_AOP_PREDEC);
1336 unreachable("Unreachable");
1339 /* Assign the result. */
1340 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), tmp);
1342 /* Mark the surface as used. */
1343 brw_mark_surface_used(stage_prog_data, surface);
1347 case nir_intrinsic_image_load:
1348 case nir_intrinsic_image_store:
1349 case nir_intrinsic_image_atomic_add:
1350 case nir_intrinsic_image_atomic_min:
1351 case nir_intrinsic_image_atomic_max:
1352 case nir_intrinsic_image_atomic_and:
1353 case nir_intrinsic_image_atomic_or:
1354 case nir_intrinsic_image_atomic_xor:
1355 case nir_intrinsic_image_atomic_exchange:
1356 case nir_intrinsic_image_atomic_comp_swap: {
1357 using namespace image_access;
1359 /* Get the referenced image variable and type. */
1360 const nir_variable *var = instr->variables[0]->var;
1361 const glsl_type *type = var->type->without_array();
1362 const brw_reg_type base_type = get_image_base_type(type);
1364 /* Get some metadata from the image intrinsic. */
1365 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
1366 const unsigned arr_dims = type->sampler_array ? 1 : 0;
1367 const unsigned surf_dims = type->coordinate_components() - arr_dims;
1368 const mesa_format format =
1369 (var->data.image.write_only ? MESA_FORMAT_NONE :
1370 _mesa_get_shader_image_format(var->data.image.format));
1372 /* Get the arguments of the image intrinsic. */
1373 const fs_reg image = get_nir_image_deref(instr->variables[0]);
1374 const fs_reg addr = retype(get_nir_src(instr->src[0]),
1375 BRW_REGISTER_TYPE_UD);
1376 const fs_reg src0 = (info->num_srcs >= 3 ?
1377 retype(get_nir_src(instr->src[2]), base_type) :
1379 const fs_reg src1 = (info->num_srcs >= 4 ?
1380 retype(get_nir_src(instr->src[3]), base_type) :
1384 /* Emit an image load, store or atomic op. */
1385 if (instr->intrinsic == nir_intrinsic_image_load)
1386 tmp = emit_image_load(bld, image, addr, surf_dims, arr_dims, format);
1388 else if (instr->intrinsic == nir_intrinsic_image_store)
1389 emit_image_store(bld, image, addr, src0, surf_dims, arr_dims, format);
1392 tmp = emit_image_atomic(bld, image, addr, src0, src1,
1393 surf_dims, arr_dims, info->dest_components,
1394 get_image_atomic_op(instr->intrinsic, type));
1396 /* Assign the result. */
1397 for (unsigned c = 0; c < info->dest_components; ++c)
1398 bld.MOV(offset(retype(dest, base_type), bld, c),
1399 offset(tmp, bld, c));
1403 case nir_intrinsic_memory_barrier: {
1404 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 16 / dispatch_width);
1405 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
1410 case nir_intrinsic_image_size: {
1411 /* Get the referenced image variable and type. */
1412 const nir_variable *var = instr->variables[0]->var;
1413 const glsl_type *type = var->type->without_array();
1415 /* Get the size of the image. */
1416 const fs_reg image = get_nir_image_deref(instr->variables[0]);
1417 const fs_reg size = offset(image, bld, BRW_IMAGE_PARAM_SIZE_OFFSET);
1419 /* For 1DArray image types, the array index is stored in the Z component.
1420 * Fix this by swizzling the Z component to the Y component.
1422 const bool is_1d_array_image =
1423 type->sampler_dimensionality == GLSL_SAMPLER_DIM_1D &&
1424 type->sampler_array;
1426 /* For CubeArray images, we should count the number of cubes instead
1427 * of the number of faces. Fix it by dividing the (Z component) by 6.
1429 const bool is_cube_array_image =
1430 type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
1431 type->sampler_array;
1433 /* Copy all the components. */
1434 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
1435 for (unsigned c = 0; c < info->dest_components; ++c) {
1436 if ((int)c >= type->coordinate_components()) {
1437 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
1439 } else if (c == 1 && is_1d_array_image) {
1440 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
1441 offset(size, bld, 2));
1442 } else if (c == 2 && is_cube_array_image) {
1443 bld.emit(SHADER_OPCODE_INT_QUOTIENT,
1444 offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
1445 offset(size, bld, c), fs_reg(6));
1447 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
1448 offset(size, bld, c));
1455 case nir_intrinsic_image_samples:
1456 /* The driver does not support multi-sampled images. */
1457 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), fs_reg(1));
1460 case nir_intrinsic_load_front_face:
1461 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
1462 *emit_frontfacing_interpolation());
1465 case nir_intrinsic_load_vertex_id:
1466 unreachable("should be lowered by lower_vertex_id()");
1468 case nir_intrinsic_load_vertex_id_zero_base:
1469 case nir_intrinsic_load_base_vertex:
1470 case nir_intrinsic_load_instance_id:
1471 case nir_intrinsic_load_sample_mask_in:
1472 case nir_intrinsic_load_sample_id: {
1473 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1474 fs_reg val = nir_system_values[sv];
1475 assert(val.file != BAD_FILE);
1476 dest.type = val.type;
1481 case nir_intrinsic_load_sample_pos: {
1482 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
1483 assert(sample_pos.file != BAD_FILE);
1484 dest.type = sample_pos.type;
1485 bld.MOV(dest, sample_pos);
1486 bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
1490 case nir_intrinsic_load_uniform_indirect:
1491 has_indirect = true;
1493 case nir_intrinsic_load_uniform: {
1494 fs_reg uniform_reg(UNIFORM, instr->const_index[0]);
1495 uniform_reg.reg_offset = instr->const_index[1];
1497 for (unsigned j = 0; j < instr->num_components; j++) {
1498 fs_reg src = offset(retype(uniform_reg, dest.type), bld, j);
1500 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
1503 dest = offset(dest, bld, 1);
1508 case nir_intrinsic_load_ubo_indirect:
1509 has_indirect = true;
1511 case nir_intrinsic_load_ubo: {
1512 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
1516 surf_index = fs_reg(stage_prog_data->binding_table.ubo_start +
1519 /* The block index is not a constant. Evaluate the index expression
1520 * per-channel and add the base UBO index; we have to select a value
1521 * from any live channel.
1523 surf_index = vgrf(glsl_type::uint_type);
1524 bld.ADD(surf_index, get_nir_src(instr->src[0]),
1525 fs_reg(stage_prog_data->binding_table.ubo_start));
1526 surf_index = bld.emit_uniformize(surf_index);
1528 /* Assume this may touch any UBO. It would be nice to provide
1529 * a tighter bound, but the array information is already lowered away.
1531 brw_mark_surface_used(prog_data,
1532 stage_prog_data->binding_table.ubo_start +
1533 shader_prog->NumBufferInterfaceBlocks - 1);
1537 /* Turn the byte offset into a dword offset. */
1538 fs_reg base_offset = vgrf(glsl_type::int_type);
1539 bld.SHR(base_offset, retype(get_nir_src(instr->src[1]),
1540 BRW_REGISTER_TYPE_D),
1543 unsigned vec4_offset = instr->const_index[0] / 4;
1544 for (int i = 0; i < instr->num_components; i++)
1545 VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
1546 base_offset, vec4_offset + i);
1548 fs_reg packed_consts = vgrf(glsl_type::float_type);
1549 packed_consts.type = dest.type;
1551 fs_reg const_offset_reg((unsigned) instr->const_index[0] & ~15);
1552 bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
1553 surf_index, const_offset_reg);
1555 for (unsigned i = 0; i < instr->num_components; i++) {
1556 packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i);
1558 /* The std140 packing rules don't allow vectors to cross 16-byte
1559 * boundaries, and a reg is 32 bytes.
1561 assert(packed_consts.subreg_offset < 32);
1563 bld.MOV(dest, packed_consts);
1564 dest = offset(dest, bld, 1);
1570 case nir_intrinsic_load_ssbo_indirect:
1571 has_indirect = true;
1573 case nir_intrinsic_load_ssbo: {
1574 assert(devinfo->gen >= 7);
1576 nir_const_value *const_uniform_block =
1577 nir_src_as_const_value(instr->src[0]);
1580 if (const_uniform_block) {
1581 unsigned index = stage_prog_data->binding_table.ubo_start +
1582 const_uniform_block->u[0];
1583 surf_index = fs_reg(index);
1584 brw_mark_surface_used(prog_data, index);
1586 surf_index = vgrf(glsl_type::uint_type);
1587 bld.ADD(surf_index, get_nir_src(instr->src[0]),
1588 fs_reg(stage_prog_data->binding_table.ubo_start));
1589 surf_index = bld.emit_uniformize(surf_index);
1591 /* Assume this may touch any UBO. It would be nice to provide
1592 * a tighter bound, but the array information is already lowered away.
1594 brw_mark_surface_used(prog_data,
1595 stage_prog_data->binding_table.ubo_start +
1596 shader_prog->NumBufferInterfaceBlocks - 1);
1599 /* Get the offset to read from */
1600 fs_reg offset_reg = vgrf(glsl_type::uint_type);
1601 unsigned const_offset_bytes = 0;
1603 bld.MOV(offset_reg, get_nir_src(instr->src[1]));
1605 const_offset_bytes = instr->const_index[0];
1606 bld.MOV(offset_reg, fs_reg(const_offset_bytes));
1609 /* Read the vector */
1610 for (int i = 0; i < instr->num_components; i++) {
1611 fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
1612 1 /* dims */, 1 /* size */,
1613 BRW_PREDICATE_NONE);
1614 read_result.type = dest.type;
1615 bld.MOV(dest, read_result);
1616 dest = offset(dest, bld, 1);
1618 /* Vector components are stored contiguous in memory */
1619 if (i < instr->num_components) {
1620 if (!has_indirect) {
1621 const_offset_bytes += 4;
1622 bld.MOV(offset_reg, fs_reg(const_offset_bytes));
1624 bld.ADD(offset_reg, offset_reg, brw_imm_ud(4));
1632 case nir_intrinsic_load_input_indirect:
1633 has_indirect = true;
1635 case nir_intrinsic_load_input: {
1637 for (unsigned j = 0; j < instr->num_components; j++) {
1638 fs_reg src = offset(retype(nir_inputs, dest.type), bld,
1639 instr->const_index[0] + index);
1641 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
1645 dest = offset(dest, bld, 1);
1650 /* Handle ARB_gpu_shader5 interpolation intrinsics
1652 * It's worth a quick word of explanation as to why we handle the full
1653 * variable-based interpolation intrinsic rather than a lowered version
1654 * with like we do for other inputs. We have to do that because the way
1655 * we set up inputs doesn't allow us to use the already setup inputs for
1656 * interpolation. At the beginning of the shader, we go through all of
1657 * the input variables and do the initial interpolation and put it in
1658 * the nir_inputs array based on its location as determined in
1659 * nir_lower_io. If the input isn't used, dead code cleans up and
1660 * everything works fine. However, when we get to the ARB_gpu_shader5
1661 * interpolation intrinsics, we need to reinterpolate the input
1662 * differently. If we used an intrinsic that just had an index it would
1663 * only give us the offset into the nir_inputs array. However, this is
1664 * useless because that value is post-interpolation and we need
1665 * pre-interpolation. In order to get the actual location of the bits
1666 * we get from the vertex fetching hardware, we need the variable.
1668 case nir_intrinsic_interp_var_at_centroid:
1669 case nir_intrinsic_interp_var_at_sample:
1670 case nir_intrinsic_interp_var_at_offset: {
1671 assert(stage == MESA_SHADER_FRAGMENT);
1673 ((struct brw_wm_prog_data *) prog_data)->pulls_bary = true;
1675 fs_reg dst_xy = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
1677 /* For most messages, we need one reg of ignored data; the hardware
1678 * requires mlen==1 even when there is no payload. in the per-slot
1679 * offset case, we'll replace this with the proper source data.
1681 fs_reg src = vgrf(glsl_type::float_type);
1682 int mlen = 1; /* one reg unless overriden */
1685 switch (instr->intrinsic) {
1686 case nir_intrinsic_interp_var_at_centroid:
1687 inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_CENTROID,
1688 dst_xy, src, fs_reg(0u));
1691 case nir_intrinsic_interp_var_at_sample: {
1692 /* XXX: We should probably handle non-constant sample id's */
1693 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
1694 assert(const_sample);
1695 unsigned msg_data = const_sample ? const_sample->i[0] << 4 : 0;
1696 inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_SAMPLE, dst_xy, src,
1701 case nir_intrinsic_interp_var_at_offset: {
1702 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
1705 unsigned off_x = MIN2((int)(const_offset->f[0] * 16), 7) & 0xf;
1706 unsigned off_y = MIN2((int)(const_offset->f[1] * 16), 7) & 0xf;
1708 inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET, dst_xy, src,
1709 fs_reg(off_x | (off_y << 4)));
1711 src = vgrf(glsl_type::ivec2_type);
1712 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
1713 BRW_REGISTER_TYPE_F);
1714 for (int i = 0; i < 2; i++) {
1715 fs_reg temp = vgrf(glsl_type::float_type);
1716 bld.MUL(temp, offset(offset_src, bld, i), fs_reg(16.0f));
1717 fs_reg itemp = vgrf(glsl_type::int_type);
1718 bld.MOV(itemp, temp); /* float to int */
1720 /* Clamp the upper end of the range to +7/16.
1721 * ARB_gpu_shader5 requires that we support a maximum offset
1722 * of +0.5, which isn't representable in a S0.4 value -- if
1723 * we didn't clamp it, we'd end up with -8/16, which is the
1724 * opposite of what the shader author wanted.
1726 * This is legal due to ARB_gpu_shader5's quantization
1729 * "Not all values of <offset> may be supported; x and y
1730 * offsets may be rounded to fixed-point values with the
1731 * number of fraction bits given by the
1732 * implementation-dependent constant
1733 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
1735 set_condmod(BRW_CONDITIONAL_L,
1736 bld.SEL(offset(src, bld, i), itemp, fs_reg(7)));
1739 mlen = 2 * dispatch_width / 8;
1740 inst = bld.emit(FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET, dst_xy, src,
1747 unreachable("Invalid intrinsic");
1751 /* 2 floats per slot returned */
1752 inst->regs_written = 2 * dispatch_width / 8;
1753 inst->pi_noperspective = instr->variables[0]->var->data.interpolation ==
1754 INTERP_QUALIFIER_NOPERSPECTIVE;
1756 for (unsigned j = 0; j < instr->num_components; j++) {
1757 fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
1758 src.type = dest.type;
1760 bld.emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
1761 dest = offset(dest, bld, 1);
1766 case nir_intrinsic_store_ssbo_indirect:
1767 has_indirect = true;
1769 case nir_intrinsic_store_ssbo: {
1770 assert(devinfo->gen >= 7);
1774 nir_const_value *const_uniform_block =
1775 nir_src_as_const_value(instr->src[1]);
1776 if (const_uniform_block) {
1777 unsigned index = stage_prog_data->binding_table.ubo_start +
1778 const_uniform_block->u[0];
1779 surf_index = fs_reg(index);
1780 brw_mark_surface_used(prog_data, index);
1782 surf_index = vgrf(glsl_type::uint_type);
1783 bld.ADD(surf_index, get_nir_src(instr->src[1]),
1784 fs_reg(stage_prog_data->binding_table.ubo_start));
1785 surf_index = bld.emit_uniformize(surf_index);
1787 brw_mark_surface_used(prog_data,
1788 stage_prog_data->binding_table.ubo_start +
1789 shader_prog->NumBufferInterfaceBlocks - 1);
1793 fs_reg offset_reg = vgrf(glsl_type::uint_type);
1794 unsigned const_offset_bytes = 0;
1796 bld.MOV(offset_reg, get_nir_src(instr->src[2]));
1798 const_offset_bytes = instr->const_index[0];
1799 bld.MOV(offset_reg, fs_reg(const_offset_bytes));
1803 fs_reg val_reg = get_nir_src(instr->src[0]);
1806 unsigned writemask = instr->const_index[1];
1808 /* Write each component present in the writemask */
1809 unsigned skipped_channels = 0;
1810 for (int i = 0; i < instr->num_components; i++) {
1811 int component_mask = 1 << i;
1812 if (writemask & component_mask) {
1813 if (skipped_channels) {
1814 if (!has_indirect) {
1815 const_offset_bytes += 4 * skipped_channels;
1816 bld.MOV(offset_reg, fs_reg(const_offset_bytes));
1818 bld.ADD(offset_reg, offset_reg,
1819 brw_imm_ud(4 * skipped_channels));
1821 skipped_channels = 0;
1824 emit_untyped_write(bld, surf_index, offset_reg,
1825 offset(val_reg, bld, i),
1826 1 /* dims */, 1 /* size */,
1827 BRW_PREDICATE_NONE);
1835 case nir_intrinsic_store_output_indirect:
1836 has_indirect = true;
1838 case nir_intrinsic_store_output: {
1839 fs_reg src = get_nir_src(instr->src[0]);
1841 for (unsigned j = 0; j < instr->num_components; j++) {
1842 fs_reg new_dest = offset(retype(nir_outputs, src.type), bld,
1843 instr->const_index[0] + index);
1845 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
1847 bld.MOV(new_dest, src);
1848 src = offset(src, bld, 1);
1853 case nir_intrinsic_barrier:
1855 if (stage == MESA_SHADER_COMPUTE)
1856 ((struct brw_cs_prog_data *) prog_data)->uses_barrier = true;
1859 case nir_intrinsic_load_local_invocation_id:
1860 case nir_intrinsic_load_work_group_id: {
1861 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1862 fs_reg val = nir_system_values[sv];
1863 assert(val.file != BAD_FILE);
1864 dest.type = val.type;
1865 for (unsigned i = 0; i < 3; i++)
1866 bld.MOV(offset(dest, bld, i), offset(val, bld, i));
1870 case nir_intrinsic_ssbo_atomic_add:
1871 nir_emit_ssbo_atomic(bld, BRW_AOP_ADD, instr);
1873 case nir_intrinsic_ssbo_atomic_min:
1874 if (dest.type == BRW_REGISTER_TYPE_D)
1875 nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
1877 nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
1879 case nir_intrinsic_ssbo_atomic_max:
1880 if (dest.type == BRW_REGISTER_TYPE_D)
1881 nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
1883 nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
1885 case nir_intrinsic_ssbo_atomic_and:
1886 nir_emit_ssbo_atomic(bld, BRW_AOP_AND, instr);
1888 case nir_intrinsic_ssbo_atomic_or:
1889 nir_emit_ssbo_atomic(bld, BRW_AOP_OR, instr);
1891 case nir_intrinsic_ssbo_atomic_xor:
1892 nir_emit_ssbo_atomic(bld, BRW_AOP_XOR, instr);
1894 case nir_intrinsic_ssbo_atomic_exchange:
1895 nir_emit_ssbo_atomic(bld, BRW_AOP_MOV, instr);
1897 case nir_intrinsic_ssbo_atomic_comp_swap:
1898 nir_emit_ssbo_atomic(bld, BRW_AOP_CMPWR, instr);
1901 case nir_intrinsic_get_buffer_size: {
1902 nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
1903 unsigned ubo_index = const_uniform_block ? const_uniform_block->u[0] : 0;
1904 int reg_width = dispatch_width / 8;
1906 assert(shader->base.UniformBlocks[ubo_index].IsShaderStorage);
1909 fs_reg source = fs_reg(0);
1911 int mlen = 1 * reg_width;
1912 fs_reg src_payload = fs_reg(GRF, alloc.allocate(mlen),
1913 BRW_REGISTER_TYPE_UD);
1914 bld.LOAD_PAYLOAD(src_payload, &source, 1, 0);
1916 fs_reg surf_index = fs_reg(prog_data->binding_table.ubo_start + ubo_index);
1917 fs_inst *inst = bld.emit(FS_OPCODE_GET_BUFFER_SIZE, dest,
1918 src_payload, surf_index);
1919 inst->header_size = 0;
1925 case nir_intrinsic_load_num_work_groups: {
1926 assert(devinfo->gen >= 7);
1927 assert(stage == MESA_SHADER_COMPUTE);
1929 struct brw_cs_prog_data *cs_prog_data =
1930 (struct brw_cs_prog_data *) prog_data;
1931 const unsigned surface =
1932 cs_prog_data->binding_table.work_groups_start;
1934 cs_prog_data->uses_num_work_groups = true;
1936 fs_reg surf_index = fs_reg(surface);
1937 brw_mark_surface_used(prog_data, surface);
1939 /* Read the 3 GLuint components of gl_NumWorkGroups */
1940 for (unsigned i = 0; i < 3; i++) {
1941 fs_reg read_result =
1942 emit_untyped_read(bld, surf_index,
1944 1 /* dims */, 1 /* size */,
1945 BRW_PREDICATE_NONE);
1946 read_result.type = dest.type;
1947 bld.MOV(dest, read_result);
1948 dest = offset(dest, bld, 1);
1954 unreachable("unknown intrinsic");
1959 fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
1960 int op, nir_intrinsic_instr *instr)
1963 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1964 dest = get_nir_dest(instr->dest);
1967 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
1968 if (const_surface) {
1969 unsigned surf_index = stage_prog_data->binding_table.ubo_start +
1970 const_surface->u[0];
1971 surface = fs_reg(surf_index);
1972 brw_mark_surface_used(prog_data, surf_index);
1974 surface = vgrf(glsl_type::uint_type);
1975 bld.ADD(surface, get_nir_src(instr->src[0]),
1976 fs_reg(stage_prog_data->binding_table.ubo_start));
1978 /* Assume this may touch any UBO. This is the same we do for other
1979 * UBO/SSBO accesses with non-constant surface.
1981 brw_mark_surface_used(prog_data,
1982 stage_prog_data->binding_table.ubo_start +
1983 shader_prog->NumBufferInterfaceBlocks - 1);
1986 fs_reg offset = get_nir_src(instr->src[1]);
1987 fs_reg data1 = get_nir_src(instr->src[2]);
1989 if (op == BRW_AOP_CMPWR)
1990 data2 = get_nir_src(instr->src[3]);
1992 /* Emit the actual atomic operation operation */
1994 fs_reg atomic_result =
1995 surface_access::emit_untyped_atomic(bld, surface, offset,
1997 1 /* dims */, 1 /* rsize */,
1999 BRW_PREDICATE_NONE);
2000 dest.type = atomic_result.type;
2001 bld.MOV(dest, atomic_result);
2005 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
2007 unsigned sampler = instr->sampler_index;
2008 fs_reg sampler_reg(sampler);
2010 /* FINISHME: We're failing to recompile our programs when the sampler is
2011 * updated. This only matters for the texture rectangle scale parameters
2012 * (pre-gen6, or gen6+ with GL_CLAMP).
2014 int texunit = prog->SamplerUnits[sampler];
2016 int gather_component = instr->component;
2018 bool is_rect = instr->sampler_dim == GLSL_SAMPLER_DIM_RECT;
2020 bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
2023 int lod_components = 0;
2024 int UNUSED offset_components = 0;
2026 fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, tex_offset;
2028 for (unsigned i = 0; i < instr->num_srcs; i++) {
2029 fs_reg src = get_nir_src(instr->src[i].src);
2030 switch (instr->src[i].src_type) {
2031 case nir_tex_src_bias:
2032 lod = retype(src, BRW_REGISTER_TYPE_F);
2034 case nir_tex_src_comparitor:
2035 shadow_comparitor = retype(src, BRW_REGISTER_TYPE_F);
2037 case nir_tex_src_coord:
2038 switch (instr->op) {
2040 case nir_texop_txf_ms:
2041 coordinate = retype(src, BRW_REGISTER_TYPE_D);
2044 coordinate = retype(src, BRW_REGISTER_TYPE_F);
2048 case nir_tex_src_ddx:
2049 lod = retype(src, BRW_REGISTER_TYPE_F);
2050 lod_components = nir_tex_instr_src_size(instr, i);
2052 case nir_tex_src_ddy:
2053 lod2 = retype(src, BRW_REGISTER_TYPE_F);
2055 case nir_tex_src_lod:
2056 switch (instr->op) {
2058 lod = retype(src, BRW_REGISTER_TYPE_UD);
2061 lod = retype(src, BRW_REGISTER_TYPE_D);
2064 lod = retype(src, BRW_REGISTER_TYPE_F);
2068 case nir_tex_src_ms_index:
2069 sample_index = retype(src, BRW_REGISTER_TYPE_UD);
2071 case nir_tex_src_offset:
2072 tex_offset = retype(src, BRW_REGISTER_TYPE_D);
2073 if (instr->is_array)
2074 offset_components = instr->coord_components - 1;
2076 offset_components = instr->coord_components;
2078 case nir_tex_src_projector:
2079 unreachable("should be lowered");
2081 case nir_tex_src_sampler_offset: {
2082 /* Figure out the highest possible sampler index and mark it as used */
2083 uint32_t max_used = sampler + instr->sampler_array_size - 1;
2084 if (instr->op == nir_texop_tg4 && devinfo->gen < 8) {
2085 max_used += stage_prog_data->binding_table.gather_texture_start;
2087 max_used += stage_prog_data->binding_table.texture_start;
2089 brw_mark_surface_used(prog_data, max_used);
2091 /* Emit code to evaluate the actual indexing expression */
2092 sampler_reg = vgrf(glsl_type::uint_type);
2093 bld.ADD(sampler_reg, src, fs_reg(sampler));
2094 sampler_reg = bld.emit_uniformize(sampler_reg);
2099 unreachable("unknown texture source");
2103 if (instr->op == nir_texop_txf_ms) {
2104 if (devinfo->gen >= 7 &&
2105 key_tex->compressed_multisample_layout_mask & (1 << sampler)) {
2106 mcs = emit_mcs_fetch(coordinate, instr->coord_components, sampler_reg);
2112 for (unsigned i = 0; i < 3; i++) {
2113 if (instr->const_offset[i] != 0) {
2114 assert(offset_components == 0);
2115 tex_offset = fs_reg(brw_texture_offset(instr->const_offset, 3));
2120 enum glsl_base_type dest_base_type =
2121 brw_glsl_base_type_for_nir_type (instr->dest_type);
2123 const glsl_type *dest_type =
2124 glsl_type::get_instance(dest_base_type, nir_tex_instr_dest_size(instr),
2127 ir_texture_opcode op;
2128 switch (instr->op) {
2129 case nir_texop_lod: op = ir_lod; break;
2130 case nir_texop_query_levels: op = ir_query_levels; break;
2131 case nir_texop_tex: op = ir_tex; break;
2132 case nir_texop_tg4: op = ir_tg4; break;
2133 case nir_texop_txb: op = ir_txb; break;
2134 case nir_texop_txd: op = ir_txd; break;
2135 case nir_texop_txf: op = ir_txf; break;
2136 case nir_texop_txf_ms: op = ir_txf_ms; break;
2137 case nir_texop_txl: op = ir_txl; break;
2138 case nir_texop_txs: op = ir_txs; break;
2139 case nir_texop_texture_samples: {
2140 fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
2141 fs_inst *inst = bld.emit(SHADER_OPCODE_SAMPLEINFO, dst,
2142 bld.vgrf(BRW_REGISTER_TYPE_D, 1),
2145 inst->header_size = 1;
2146 inst->base_mrf = -1;
2150 unreachable("unknown texture opcode");
2153 emit_texture(op, dest_type, coordinate, instr->coord_components,
2154 shadow_comparitor, lod, lod2, lod_components, sample_index,
2155 tex_offset, mcs, gather_component,
2156 is_cube_array, is_rect, sampler, sampler_reg, texunit);
2158 fs_reg dest = get_nir_dest(instr->dest);
2159 dest.type = this->result.type;
2160 unsigned num_components = nir_tex_instr_dest_size(instr);
2161 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
2162 dest, this->result),
2163 (1 << num_components) - 1);
2167 fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
2169 switch (instr->type) {
2170 case nir_jump_break:
2171 bld.emit(BRW_OPCODE_BREAK);
2173 case nir_jump_continue:
2174 bld.emit(BRW_OPCODE_CONTINUE);
2176 case nir_jump_return:
2178 unreachable("unknown jump");