2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "compiler/glsl/ir.h"
26 #include "brw_fs_surface_builder.h"
28 #include "brw_program.h"
31 using namespace brw::surface_access;
34 fs_visitor::emit_nir_code()
36 /* emit the arrays used for inputs and outputs - load/store intrinsics will
37 * be converted to reads/writes of these arrays
42 nir_emit_system_values();
44 /* get the main function and emit it */
45 nir_foreach_function(function, nir) {
46 assert(strcmp(function->name, "main") == 0);
47 assert(function->impl);
48 nir_emit_impl(function->impl);
53 fs_visitor::nir_setup_inputs()
55 if (stage != MESA_SHADER_FRAGMENT)
58 nir_inputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_inputs);
60 nir_foreach_variable(var, &nir->inputs) {
61 fs_reg input = offset(nir_inputs, bld, var->data.driver_location);
64 if (var->data.location == VARYING_SLOT_POS) {
65 reg = *emit_fragcoord_interpolation();
66 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
68 } else if (var->data.location == VARYING_SLOT_LAYER) {
69 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_LAYER, 1), 3);
70 reg.type = BRW_REGISTER_TYPE_D;
71 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
72 } else if (var->data.location == VARYING_SLOT_VIEWPORT) {
73 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_VIEWPORT, 2), 3);
74 reg.type = BRW_REGISTER_TYPE_D;
75 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
77 int location = var->data.location;
78 emit_general_interpolation(&input, var->name, var->type,
79 (glsl_interp_qualifier) var->data.interpolation,
80 &location, var->data.centroid,
87 fs_visitor::nir_setup_single_output_varying(fs_reg *reg,
88 const glsl_type *type,
91 if (type->is_array() || type->is_matrix()) {
92 const struct glsl_type *elem_type = glsl_get_array_element(type);
93 const unsigned length = glsl_get_length(type);
95 for (unsigned i = 0; i < length; i++) {
96 nir_setup_single_output_varying(reg, elem_type, location);
98 } else if (type->is_record()) {
99 for (unsigned i = 0; i < type->length; i++) {
100 const struct glsl_type *field_type = type->fields.structure[i].type;
101 nir_setup_single_output_varying(reg, field_type, location);
104 assert(type->is_scalar() || type->is_vector());
105 unsigned num_elements = type->vector_elements;
106 if (type->is_double())
108 for (unsigned count = 0; count < num_elements; count += 4) {
109 this->outputs[*location] = *reg;
110 this->output_components[*location] = MIN2(4, num_elements - count);
111 *reg = offset(*reg, bld, 4);
118 fs_visitor::nir_setup_outputs()
120 if (stage == MESA_SHADER_TESS_CTRL)
123 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
125 nir_outputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_outputs);
127 nir_foreach_variable(var, &nir->outputs) {
128 fs_reg reg = offset(nir_outputs, bld, var->data.driver_location);
131 case MESA_SHADER_VERTEX:
132 case MESA_SHADER_TESS_EVAL:
133 case MESA_SHADER_GEOMETRY: {
134 unsigned location = var->data.location;
135 nir_setup_single_output_varying(®, var->type, &location);
138 case MESA_SHADER_FRAGMENT:
139 if (key->force_dual_color_blend &&
140 var->data.location == FRAG_RESULT_DATA1) {
141 this->dual_src_output = reg;
142 this->do_dual_src = true;
143 } else if (var->data.index > 0) {
144 assert(var->data.location == FRAG_RESULT_DATA0);
145 assert(var->data.index == 1);
146 this->dual_src_output = reg;
147 this->do_dual_src = true;
148 } else if (var->data.location == FRAG_RESULT_COLOR) {
149 /* Writing gl_FragColor outputs to all color regions. */
150 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
151 this->outputs[i] = reg;
152 this->output_components[i] = 4;
154 } else if (var->data.location == FRAG_RESULT_DEPTH) {
155 this->frag_depth = reg;
156 } else if (var->data.location == FRAG_RESULT_STENCIL) {
157 this->frag_stencil = reg;
158 } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
159 this->sample_mask = reg;
161 int vector_elements = var->type->without_array()->vector_elements;
163 /* gl_FragData or a user-defined FS output */
164 assert(var->data.location >= FRAG_RESULT_DATA0 &&
165 var->data.location < FRAG_RESULT_DATA0+BRW_MAX_DRAW_BUFFERS);
167 /* General color output. */
168 for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
169 int output = var->data.location - FRAG_RESULT_DATA0 + i;
170 this->outputs[output] = offset(reg, bld, vector_elements * i);
171 this->output_components[output] = vector_elements;
176 unreachable("unhandled shader stage");
182 fs_visitor::nir_setup_uniforms()
184 if (dispatch_width != min_dispatch_width)
187 uniforms = nir->num_uniforms / 4;
191 emit_system_values_block(nir_block *block, fs_visitor *v)
195 nir_foreach_instr(instr, block) {
196 if (instr->type != nir_instr_type_intrinsic)
199 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
200 switch (intrin->intrinsic) {
201 case nir_intrinsic_load_vertex_id:
202 unreachable("should be lowered by lower_vertex_id().");
204 case nir_intrinsic_load_vertex_id_zero_base:
205 assert(v->stage == MESA_SHADER_VERTEX);
206 reg = &v->nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
207 if (reg->file == BAD_FILE)
208 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
211 case nir_intrinsic_load_base_vertex:
212 assert(v->stage == MESA_SHADER_VERTEX);
213 reg = &v->nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
214 if (reg->file == BAD_FILE)
215 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX);
218 case nir_intrinsic_load_instance_id:
219 assert(v->stage == MESA_SHADER_VERTEX);
220 reg = &v->nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
221 if (reg->file == BAD_FILE)
222 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID);
225 case nir_intrinsic_load_base_instance:
226 assert(v->stage == MESA_SHADER_VERTEX);
227 reg = &v->nir_system_values[SYSTEM_VALUE_BASE_INSTANCE];
228 if (reg->file == BAD_FILE)
229 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_INSTANCE);
232 case nir_intrinsic_load_draw_id:
233 assert(v->stage == MESA_SHADER_VERTEX);
234 reg = &v->nir_system_values[SYSTEM_VALUE_DRAW_ID];
235 if (reg->file == BAD_FILE)
236 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_DRAW_ID);
239 case nir_intrinsic_load_invocation_id:
240 if (v->stage == MESA_SHADER_TESS_CTRL)
242 assert(v->stage == MESA_SHADER_GEOMETRY);
243 reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
244 if (reg->file == BAD_FILE) {
245 const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
246 fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
247 fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
248 abld.SHR(iid, g1, brw_imm_ud(27u));
253 case nir_intrinsic_load_sample_pos:
254 assert(v->stage == MESA_SHADER_FRAGMENT);
255 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
256 if (reg->file == BAD_FILE)
257 *reg = *v->emit_samplepos_setup();
260 case nir_intrinsic_load_sample_id:
261 assert(v->stage == MESA_SHADER_FRAGMENT);
262 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
263 if (reg->file == BAD_FILE)
264 *reg = *v->emit_sampleid_setup();
267 case nir_intrinsic_load_sample_mask_in:
268 assert(v->stage == MESA_SHADER_FRAGMENT);
269 assert(v->devinfo->gen >= 7);
270 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
271 if (reg->file == BAD_FILE)
272 *reg = *v->emit_samplemaskin_setup();
275 case nir_intrinsic_load_work_group_id:
276 assert(v->stage == MESA_SHADER_COMPUTE);
277 reg = &v->nir_system_values[SYSTEM_VALUE_WORK_GROUP_ID];
278 if (reg->file == BAD_FILE)
279 *reg = *v->emit_cs_work_group_id_setup();
282 case nir_intrinsic_load_helper_invocation:
283 assert(v->stage == MESA_SHADER_FRAGMENT);
284 reg = &v->nir_system_values[SYSTEM_VALUE_HELPER_INVOCATION];
285 if (reg->file == BAD_FILE) {
286 const fs_builder abld =
287 v->bld.annotate("gl_HelperInvocation", NULL);
289 /* On Gen6+ (gl_HelperInvocation is only exposed on Gen7+) the
290 * pixel mask is in g1.7 of the thread payload.
292 * We move the per-channel pixel enable bit to the low bit of each
293 * channel by shifting the byte containing the pixel mask by the
294 * vector immediate 0x76543210UV.
296 * The region of <1,8,0> reads only 1 byte (the pixel masks for
297 * subspans 0 and 1) in SIMD8 and an additional byte (the pixel
298 * masks for 2 and 3) in SIMD16.
300 fs_reg shifted = abld.vgrf(BRW_REGISTER_TYPE_UW, 1);
302 stride(byte_offset(retype(brw_vec1_grf(1, 0),
303 BRW_REGISTER_TYPE_UB), 28),
305 brw_imm_v(0x76543210));
307 /* A set bit in the pixel mask means the channel is enabled, but
308 * that is the opposite of gl_HelperInvocation so we need to invert
311 * The negate source-modifier bit of logical instructions on Gen8+
312 * performs 1's complement negation, so we can use that instead of
315 fs_reg inverted = negate(shifted);
316 if (v->devinfo->gen < 8) {
317 inverted = abld.vgrf(BRW_REGISTER_TYPE_UW);
318 abld.NOT(inverted, shifted);
321 /* We then resolve the 0/1 result to 0/~0 boolean values by ANDing
322 * with 1 and negating.
324 fs_reg anded = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
325 abld.AND(anded, inverted, brw_imm_uw(1));
327 fs_reg dst = abld.vgrf(BRW_REGISTER_TYPE_D, 1);
328 abld.MOV(dst, negate(retype(anded, BRW_REGISTER_TYPE_D)));
342 fs_visitor::nir_emit_system_values()
344 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
345 for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
346 nir_system_values[i] = fs_reg();
349 nir_foreach_function(function, nir) {
350 assert(strcmp(function->name, "main") == 0);
351 assert(function->impl);
352 nir_foreach_block(block, function->impl) {
353 emit_system_values_block(block, this);
359 fs_visitor::nir_emit_impl(nir_function_impl *impl)
361 nir_locals = ralloc_array(mem_ctx, fs_reg, impl->reg_alloc);
362 for (unsigned i = 0; i < impl->reg_alloc; i++) {
363 nir_locals[i] = fs_reg();
366 foreach_list_typed(nir_register, reg, node, &impl->registers) {
367 unsigned array_elems =
368 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
369 unsigned size = array_elems * reg->num_components;
370 const brw_reg_type reg_type =
371 reg->bit_size == 32 ? BRW_REGISTER_TYPE_F : BRW_REGISTER_TYPE_DF;
372 nir_locals[reg->index] = bld.vgrf(reg_type, size);
375 nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
378 nir_emit_cf_list(&impl->body);
382 fs_visitor::nir_emit_cf_list(exec_list *list)
384 exec_list_validate(list);
385 foreach_list_typed(nir_cf_node, node, node, list) {
386 switch (node->type) {
388 nir_emit_if(nir_cf_node_as_if(node));
391 case nir_cf_node_loop:
392 nir_emit_loop(nir_cf_node_as_loop(node));
395 case nir_cf_node_block:
396 nir_emit_block(nir_cf_node_as_block(node));
400 unreachable("Invalid CFG node block");
406 fs_visitor::nir_emit_if(nir_if *if_stmt)
408 /* first, put the condition into f0 */
409 fs_inst *inst = bld.MOV(bld.null_reg_d(),
410 retype(get_nir_src(if_stmt->condition),
411 BRW_REGISTER_TYPE_D));
412 inst->conditional_mod = BRW_CONDITIONAL_NZ;
414 bld.IF(BRW_PREDICATE_NORMAL);
416 nir_emit_cf_list(&if_stmt->then_list);
418 /* note: if the else is empty, dead CF elimination will remove it */
419 bld.emit(BRW_OPCODE_ELSE);
421 nir_emit_cf_list(&if_stmt->else_list);
423 bld.emit(BRW_OPCODE_ENDIF);
427 fs_visitor::nir_emit_loop(nir_loop *loop)
429 bld.emit(BRW_OPCODE_DO);
431 nir_emit_cf_list(&loop->body);
433 bld.emit(BRW_OPCODE_WHILE);
437 fs_visitor::nir_emit_block(nir_block *block)
439 nir_foreach_instr(instr, block) {
440 nir_emit_instr(instr);
445 fs_visitor::nir_emit_instr(nir_instr *instr)
447 const fs_builder abld = bld.annotate(NULL, instr);
449 switch (instr->type) {
450 case nir_instr_type_alu:
451 nir_emit_alu(abld, nir_instr_as_alu(instr));
454 case nir_instr_type_intrinsic:
456 case MESA_SHADER_VERTEX:
457 nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
459 case MESA_SHADER_TESS_CTRL:
460 nir_emit_tcs_intrinsic(abld, nir_instr_as_intrinsic(instr));
462 case MESA_SHADER_TESS_EVAL:
463 nir_emit_tes_intrinsic(abld, nir_instr_as_intrinsic(instr));
465 case MESA_SHADER_GEOMETRY:
466 nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
468 case MESA_SHADER_FRAGMENT:
469 nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
471 case MESA_SHADER_COMPUTE:
472 nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
475 unreachable("unsupported shader stage");
479 case nir_instr_type_tex:
480 nir_emit_texture(abld, nir_instr_as_tex(instr));
483 case nir_instr_type_load_const:
484 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
487 case nir_instr_type_ssa_undef:
488 nir_emit_undef(abld, nir_instr_as_ssa_undef(instr));
491 case nir_instr_type_jump:
492 nir_emit_jump(abld, nir_instr_as_jump(instr));
496 unreachable("unknown instruction type");
501 * Recognizes a parent instruction of nir_op_extract_* and changes the type to
505 fs_visitor::optimize_extract_to_float(nir_alu_instr *instr,
506 const fs_reg &result)
508 if (!instr->src[0].src.is_ssa ||
509 !instr->src[0].src.ssa->parent_instr)
512 if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
515 nir_alu_instr *src0 =
516 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
518 if (src0->op != nir_op_extract_u8 && src0->op != nir_op_extract_u16 &&
519 src0->op != nir_op_extract_i8 && src0->op != nir_op_extract_i16)
522 nir_const_value *element = nir_src_as_const_value(src0->src[1].src);
523 assert(element != NULL);
525 /* Element type to extract.*/
526 const brw_reg_type type = brw_int_type(
527 src0->op == nir_op_extract_u16 || src0->op == nir_op_extract_i16 ? 2 : 1,
528 src0->op == nir_op_extract_i16 || src0->op == nir_op_extract_i8);
530 fs_reg op0 = get_nir_src(src0->src[0].src);
531 op0.type = brw_type_for_nir_type(
532 (nir_alu_type)(nir_op_infos[src0->op].input_types[0] |
533 nir_src_bit_size(src0->src[0].src)));
534 op0 = offset(op0, bld, src0->src[0].swizzle[0]);
536 set_saturate(instr->dest.saturate,
537 bld.MOV(result, subscript(op0, type, element->u32[0])));
542 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
543 const fs_reg &result)
545 if (!instr->src[0].src.is_ssa ||
546 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_intrinsic)
549 nir_intrinsic_instr *src0 =
550 nir_instr_as_intrinsic(instr->src[0].src.ssa->parent_instr);
552 if (src0->intrinsic != nir_intrinsic_load_front_face)
555 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
556 if (!value1 || fabsf(value1->f32[0]) != 1.0f)
559 nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
560 if (!value2 || fabsf(value2->f32[0]) != 1.0f)
563 fs_reg tmp = vgrf(glsl_type::int_type);
565 if (devinfo->gen >= 6) {
566 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
567 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
569 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
571 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
572 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
574 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
576 * This negation looks like it's safe in practice, because bits 0:4 will
577 * surely be TRIANGLES
580 if (value1->f32[0] == -1.0f) {
584 tmp.type = BRW_REGISTER_TYPE_W;
585 tmp.subreg_offset = 2;
588 bld.OR(tmp, g0, brw_imm_uw(0x3f80));
590 tmp.type = BRW_REGISTER_TYPE_D;
591 tmp.subreg_offset = 0;
594 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
595 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
597 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
599 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
600 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
602 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
604 * This negation looks like it's safe in practice, because bits 0:4 will
605 * surely be TRIANGLES
608 if (value1->f32[0] == -1.0f) {
612 bld.OR(tmp, g1_6, brw_imm_d(0x3f800000));
614 bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, brw_imm_d(0xbf800000));
620 fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
622 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
625 fs_reg result = get_nir_dest(instr->dest.dest);
626 result.type = brw_type_for_nir_type(
627 (nir_alu_type)(nir_op_infos[instr->op].output_type |
628 nir_dest_bit_size(instr->dest.dest)));
631 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
632 op[i] = get_nir_src(instr->src[i].src);
633 op[i].type = brw_type_for_nir_type(
634 (nir_alu_type)(nir_op_infos[instr->op].input_types[i] |
635 nir_src_bit_size(instr->src[i].src)));
636 op[i].abs = instr->src[i].abs;
637 op[i].negate = instr->src[i].negate;
640 /* We get a bunch of mov's out of the from_ssa pass and they may still
641 * be vectorized. We'll handle them as a special-case. We'll also
642 * handle vecN here because it's basically the same thing.
650 fs_reg temp = result;
651 bool need_extra_copy = false;
652 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
653 if (!instr->src[i].src.is_ssa &&
654 instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
655 need_extra_copy = true;
656 temp = bld.vgrf(result.type, 4);
661 for (unsigned i = 0; i < 4; i++) {
662 if (!(instr->dest.write_mask & (1 << i)))
665 if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
666 inst = bld.MOV(offset(temp, bld, i),
667 offset(op[0], bld, instr->src[0].swizzle[i]));
669 inst = bld.MOV(offset(temp, bld, i),
670 offset(op[i], bld, instr->src[i].swizzle[0]));
672 inst->saturate = instr->dest.saturate;
675 /* In this case the source and destination registers were the same,
676 * so we need to insert an extra set of moves in order to deal with
679 if (need_extra_copy) {
680 for (unsigned i = 0; i < 4; i++) {
681 if (!(instr->dest.write_mask & (1 << i)))
684 bld.MOV(offset(result, bld, i), offset(temp, bld, i));
693 /* At this point, we have dealt with any instruction that operates on
694 * more than a single channel. Therefore, we can just adjust the source
695 * and destination registers for that channel and emit the instruction.
697 unsigned channel = 0;
698 if (nir_op_infos[instr->op].output_size == 0) {
699 /* Since NIR is doing the scalarizing for us, we should only ever see
700 * vectorized operations with a single channel.
702 assert(_mesa_bitcount(instr->dest.write_mask) == 1);
703 channel = ffs(instr->dest.write_mask) - 1;
705 result = offset(result, bld, channel);
708 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
709 assert(nir_op_infos[instr->op].input_sizes[i] < 2);
710 op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
716 if (optimize_extract_to_float(instr, result))
718 inst = bld.MOV(result, op[0]);
719 inst->saturate = instr->dest.saturate;
725 /* CHV PRM, vol07, 3D Media GPGPU Engine, Register Region Restrictions:
727 * "When source or destination is 64b (...), regioning in Align1
728 * must follow these rules:
730 * 1. Source and destination horizontal stride must be aligned to
734 * This means that 32-bit to 64-bit conversions need to have the 32-bit
735 * data elements aligned to 64-bit. This restriction does not apply to
738 if (devinfo->is_cherryview || devinfo->is_broxton) {
739 fs_reg tmp = bld.vgrf(result.type, 1);
740 tmp = subscript(tmp, op[0].type, 0);
741 inst = bld.MOV(tmp, op[0]);
742 inst = bld.MOV(result, tmp);
743 inst->saturate = instr->dest.saturate;
750 inst = bld.MOV(result, op[0]);
751 inst->saturate = instr->dest.saturate;
756 bld.MOV(result, op[0]);
760 if (type_sz(op[0].type) < 8) {
761 /* AND(val, 0x80000000) gives the sign bit.
763 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
766 bld.CMP(bld.null_reg_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
768 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
769 op[0].type = BRW_REGISTER_TYPE_UD;
770 result.type = BRW_REGISTER_TYPE_UD;
771 bld.AND(result_int, op[0], brw_imm_ud(0x80000000u));
773 inst = bld.OR(result_int, result_int, brw_imm_ud(0x3f800000u));
774 inst->predicate = BRW_PREDICATE_NORMAL;
775 if (instr->dest.saturate) {
776 inst = bld.MOV(result, result);
777 inst->saturate = true;
780 /* For doubles we do the same but we need to consider:
782 * - 2-src instructions can't operate with 64-bit immediates
783 * - The sign is encoded in the high 32-bit of each DF
784 * - CMP with DF requires special handling in SIMD16
785 * - We need to produce a DF result.
788 /* 2-src instructions can't have 64-bit immediates, so put 0.0 in
789 * a register and compare with that.
791 fs_reg tmp = vgrf(glsl_type::double_type);
792 bld.MOV(tmp, brw_imm_df(0.0));
794 /* A direct DF CMP using the flag register (null dst) won't work in
795 * SIMD16 because the CMP will be split in two by lower_simd_width,
796 * resulting in two CMP instructions with the same dst (NULL),
797 * leading to dead code elimination of the first one. In SIMD8,
798 * however, there is no need to split the CMP and we can save some
801 fs_reg dst_tmp = vgrf(glsl_type::double_type);
802 bld.CMP(dst_tmp, op[0], tmp, BRW_CONDITIONAL_NZ);
804 /* In SIMD16 we want to avoid using a NULL dst register with DF CMP,
805 * so we store the result of the comparison in a vgrf instead and
806 * then we generate a UD comparison from that that won't have to
807 * be split by lower_simd_width. This is what NIR does to handle
808 * double comparisons in the general case.
810 if (bld.dispatch_width() == 16 ) {
811 fs_reg dst_tmp_ud = retype(dst_tmp, BRW_REGISTER_TYPE_UD);
812 bld.MOV(dst_tmp_ud, subscript(dst_tmp, BRW_REGISTER_TYPE_UD, 0));
813 bld.CMP(bld.null_reg_ud(),
814 dst_tmp_ud, brw_imm_ud(0), BRW_CONDITIONAL_NZ);
817 /* Get the high 32-bit of each double component where the sign is */
818 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
819 bld.MOV(result_int, subscript(op[0], BRW_REGISTER_TYPE_UD, 1));
821 /* Get the sign bit */
822 bld.AND(result_int, result_int, brw_imm_ud(0x80000000u));
824 /* Add 1.0 to the sign, predicated to skip the case of op[0] == 0.0 */
825 inst = bld.OR(result_int, result_int, brw_imm_ud(0x3f800000u));
826 inst->predicate = BRW_PREDICATE_NORMAL;
828 /* Convert from 32-bit float to 64-bit double */
829 result.type = BRW_REGISTER_TYPE_DF;
830 inst = bld.MOV(result, retype(result_int, BRW_REGISTER_TYPE_F));
832 if (instr->dest.saturate) {
833 inst = bld.MOV(result, result);
834 inst->saturate = true;
841 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
842 * -> non-negative val generates 0x00000000.
843 * Predicated OR sets 1 if val is positive.
845 assert(nir_dest_bit_size(instr->dest.dest) < 64);
846 bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_G);
847 bld.ASR(result, op[0], brw_imm_d(31));
848 inst = bld.OR(result, result, brw_imm_d(1));
849 inst->predicate = BRW_PREDICATE_NORMAL;
853 inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
854 inst->saturate = instr->dest.saturate;
858 inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
859 inst->saturate = instr->dest.saturate;
863 inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
864 inst->saturate = instr->dest.saturate;
868 inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
869 inst->saturate = instr->dest.saturate;
873 inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
874 inst->saturate = instr->dest.saturate;
878 if (fs_key->high_quality_derivatives) {
879 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
881 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
883 inst->saturate = instr->dest.saturate;
885 case nir_op_fddx_fine:
886 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
887 inst->saturate = instr->dest.saturate;
889 case nir_op_fddx_coarse:
890 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
891 inst->saturate = instr->dest.saturate;
894 if (fs_key->high_quality_derivatives) {
895 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
897 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
899 inst->saturate = instr->dest.saturate;
901 case nir_op_fddy_fine:
902 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
903 inst->saturate = instr->dest.saturate;
905 case nir_op_fddy_coarse:
906 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
907 inst->saturate = instr->dest.saturate;
911 assert(nir_dest_bit_size(instr->dest.dest) < 64);
913 inst = bld.ADD(result, op[0], op[1]);
914 inst->saturate = instr->dest.saturate;
918 inst = bld.MUL(result, op[0], op[1]);
919 inst->saturate = instr->dest.saturate;
923 assert(nir_dest_bit_size(instr->dest.dest) < 64);
924 bld.MUL(result, op[0], op[1]);
927 case nir_op_imul_high:
928 case nir_op_umul_high:
929 assert(nir_dest_bit_size(instr->dest.dest) < 64);
930 bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
935 assert(nir_dest_bit_size(instr->dest.dest) < 64);
936 bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
939 case nir_op_uadd_carry:
940 unreachable("Should have been lowered by carry_to_arith().");
942 case nir_op_usub_borrow:
943 unreachable("Should have been lowered by borrow_to_arith().");
947 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
948 * appears that our hardware just does the right thing for signed
951 assert(nir_dest_bit_size(instr->dest.dest) < 64);
952 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
956 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
957 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
959 /* Math instructions don't support conditional mod */
960 inst = bld.MOV(bld.null_reg_d(), result);
961 inst->conditional_mod = BRW_CONDITIONAL_NZ;
963 /* Now, we need to determine if signs of the sources are different.
964 * When we XOR the sources, the top bit is 0 if they are the same and 1
965 * if they are different. We can then use a conditional modifier to
966 * turn that into a predicate. This leads us to an XOR.l instruction.
968 * Technically, according to the PRM, you're not allowed to use .l on a
969 * XOR instruction. However, emperical experiments and Curro's reading
970 * of the simulator source both indicate that it's safe.
972 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D);
973 inst = bld.XOR(tmp, op[0], op[1]);
974 inst->predicate = BRW_PREDICATE_NORMAL;
975 inst->conditional_mod = BRW_CONDITIONAL_L;
977 /* If the result of the initial remainder operation is non-zero and the
978 * two sources have different signs, add in a copy of op[1] to get the
979 * final integer modulus value.
981 inst = bld.ADD(result, result, op[1]);
982 inst->predicate = BRW_PREDICATE_NORMAL;
990 fs_reg dest = result;
991 if (nir_src_bit_size(instr->src[0].src) > 32) {
992 dest = bld.vgrf(BRW_REGISTER_TYPE_DF, 1);
994 brw_conditional_mod cond;
997 cond = BRW_CONDITIONAL_L;
1000 cond = BRW_CONDITIONAL_GE;
1003 cond = BRW_CONDITIONAL_Z;
1006 cond = BRW_CONDITIONAL_NZ;
1009 unreachable("bad opcode");
1011 bld.CMP(dest, op[0], op[1], cond);
1012 if (nir_src_bit_size(instr->src[0].src) > 32) {
1013 bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
1020 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1021 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_L);
1026 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1027 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_GE);
1031 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1032 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_Z);
1036 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1037 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ);
1041 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1042 if (devinfo->gen >= 8) {
1043 op[0] = resolve_source_modifiers(op[0]);
1045 bld.NOT(result, op[0]);
1048 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1049 if (devinfo->gen >= 8) {
1050 op[0] = resolve_source_modifiers(op[0]);
1051 op[1] = resolve_source_modifiers(op[1]);
1053 bld.XOR(result, op[0], op[1]);
1056 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1057 if (devinfo->gen >= 8) {
1058 op[0] = resolve_source_modifiers(op[0]);
1059 op[1] = resolve_source_modifiers(op[1]);
1061 bld.OR(result, op[0], op[1]);
1064 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1065 if (devinfo->gen >= 8) {
1066 op[0] = resolve_source_modifiers(op[0]);
1067 op[1] = resolve_source_modifiers(op[1]);
1069 bld.AND(result, op[0], op[1]);
1075 case nir_op_ball_fequal2:
1076 case nir_op_ball_iequal2:
1077 case nir_op_ball_fequal3:
1078 case nir_op_ball_iequal3:
1079 case nir_op_ball_fequal4:
1080 case nir_op_ball_iequal4:
1081 case nir_op_bany_fnequal2:
1082 case nir_op_bany_inequal2:
1083 case nir_op_bany_fnequal3:
1084 case nir_op_bany_inequal3:
1085 case nir_op_bany_fnequal4:
1086 case nir_op_bany_inequal4:
1087 unreachable("Lowered by nir_lower_alu_reductions");
1089 case nir_op_fnoise1_1:
1090 case nir_op_fnoise1_2:
1091 case nir_op_fnoise1_3:
1092 case nir_op_fnoise1_4:
1093 case nir_op_fnoise2_1:
1094 case nir_op_fnoise2_2:
1095 case nir_op_fnoise2_3:
1096 case nir_op_fnoise2_4:
1097 case nir_op_fnoise3_1:
1098 case nir_op_fnoise3_2:
1099 case nir_op_fnoise3_3:
1100 case nir_op_fnoise3_4:
1101 case nir_op_fnoise4_1:
1102 case nir_op_fnoise4_2:
1103 case nir_op_fnoise4_3:
1104 case nir_op_fnoise4_4:
1105 unreachable("not reached: should be handled by lower_noise");
1108 unreachable("not reached: should be handled by ldexp_to_arith()");
1111 inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
1112 inst->saturate = instr->dest.saturate;
1116 inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
1117 inst->saturate = instr->dest.saturate;
1122 bld.MOV(result, negate(op[0]));
1126 bld.CMP(result, op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
1129 /* two-argument instructions can't take 64-bit immediates */
1130 fs_reg zero = vgrf(glsl_type::double_type);
1131 bld.MOV(zero, brw_imm_df(0.0));
1132 /* A SIMD16 execution needs to be split in two instructions, so use
1133 * a vgrf instead of the flag register as dst so instruction splitting
1136 fs_reg tmp = vgrf(glsl_type::double_type);
1137 bld.CMP(tmp, op[0], zero, BRW_CONDITIONAL_NZ);
1138 bld.MOV(result, subscript(tmp, BRW_REGISTER_TYPE_UD, 0));
1142 bld.CMP(result, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
1146 inst = bld.RNDZ(result, op[0]);
1147 inst->saturate = instr->dest.saturate;
1150 case nir_op_fceil: {
1151 op[0].negate = !op[0].negate;
1152 fs_reg temp = vgrf(glsl_type::float_type);
1153 bld.RNDD(temp, op[0]);
1155 inst = bld.MOV(result, temp);
1156 inst->saturate = instr->dest.saturate;
1160 inst = bld.RNDD(result, op[0]);
1161 inst->saturate = instr->dest.saturate;
1164 inst = bld.FRC(result, op[0]);
1165 inst->saturate = instr->dest.saturate;
1167 case nir_op_fround_even:
1168 inst = bld.RNDE(result, op[0]);
1169 inst->saturate = instr->dest.saturate;
1172 case nir_op_fquantize2f16: {
1173 fs_reg tmp16 = bld.vgrf(BRW_REGISTER_TYPE_D);
1174 fs_reg tmp32 = bld.vgrf(BRW_REGISTER_TYPE_F);
1175 fs_reg zero = bld.vgrf(BRW_REGISTER_TYPE_F);
1177 /* The destination stride must be at least as big as the source stride. */
1178 tmp16.type = BRW_REGISTER_TYPE_W;
1181 /* Check for denormal */
1182 fs_reg abs_src0 = op[0];
1183 abs_src0.abs = true;
1184 bld.CMP(bld.null_reg_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1186 /* Get the appropriately signed zero */
1187 bld.AND(retype(zero, BRW_REGISTER_TYPE_UD),
1188 retype(op[0], BRW_REGISTER_TYPE_UD),
1189 brw_imm_ud(0x80000000));
1190 /* Do the actual F32 -> F16 -> F32 conversion */
1191 bld.emit(BRW_OPCODE_F32TO16, tmp16, op[0]);
1192 bld.emit(BRW_OPCODE_F16TO32, tmp32, tmp16);
1193 /* Select that or zero based on normal status */
1194 inst = bld.SEL(result, zero, tmp32);
1195 inst->predicate = BRW_PREDICATE_NORMAL;
1196 inst->saturate = instr->dest.saturate;
1202 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1204 inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_L);
1205 inst->saturate = instr->dest.saturate;
1210 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1212 inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_GE);
1213 inst->saturate = instr->dest.saturate;
1216 case nir_op_pack_snorm_2x16:
1217 case nir_op_pack_snorm_4x8:
1218 case nir_op_pack_unorm_2x16:
1219 case nir_op_pack_unorm_4x8:
1220 case nir_op_unpack_snorm_2x16:
1221 case nir_op_unpack_snorm_4x8:
1222 case nir_op_unpack_unorm_2x16:
1223 case nir_op_unpack_unorm_4x8:
1224 case nir_op_unpack_half_2x16:
1225 case nir_op_pack_half_2x16:
1226 unreachable("not reached: should be handled by lower_packing_builtins");
1228 case nir_op_unpack_half_2x16_split_x:
1229 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
1230 inst->saturate = instr->dest.saturate;
1232 case nir_op_unpack_half_2x16_split_y:
1233 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
1234 inst->saturate = instr->dest.saturate;
1237 case nir_op_pack_double_2x32_split:
1238 /* Optimize the common case where we are re-packing a double with
1239 * the result of a previous double unpack. In this case we can take the
1240 * 32-bit value to use in the re-pack from the original double and bypass
1241 * the unpack operation.
1243 for (int i = 0; i < 2; i++) {
1244 if (instr->src[i].src.is_ssa)
1247 const nir_instr *parent_instr = instr->src[i].src.ssa->parent_instr;
1248 if (parent_instr->type == nir_instr_type_alu)
1251 const nir_alu_instr *alu_parent = nir_instr_as_alu(parent_instr);
1252 if (alu_parent->op == nir_op_unpack_double_2x32_split_x ||
1253 alu_parent->op == nir_op_unpack_double_2x32_split_y)
1256 if (!alu_parent->src[0].src.is_ssa)
1259 op[i] = get_nir_src(alu_parent->src[0].src);
1260 op[i] = offset(retype(op[i], BRW_REGISTER_TYPE_DF), bld,
1261 alu_parent->src[0].swizzle[channel]);
1262 if (alu_parent->op == nir_op_unpack_double_2x32_split_y)
1263 op[i] = subscript(op[i], BRW_REGISTER_TYPE_UD, 1);
1265 op[i] = subscript(op[i], BRW_REGISTER_TYPE_UD, 0);
1267 bld.emit(FS_OPCODE_PACK, result, op[0], op[1]);
1270 case nir_op_unpack_double_2x32_split_x:
1271 case nir_op_unpack_double_2x32_split_y: {
1272 /* Optimize the common case where we are unpacking from a double we have
1273 * previously packed. In this case we can just bypass the pack operation
1274 * and source directly from its arguments.
1276 unsigned index = (instr->op == nir_op_unpack_double_2x32_split_x) ? 0 : 1;
1277 if (instr->src[0].src.is_ssa) {
1278 nir_instr *parent_instr = instr->src[0].src.ssa->parent_instr;
1279 if (parent_instr->type == nir_instr_type_alu) {
1280 nir_alu_instr *alu_parent = nir_instr_as_alu(parent_instr);
1281 if (alu_parent->op == nir_op_pack_double_2x32_split &&
1282 alu_parent->src[index].src.is_ssa) {
1283 op[0] = retype(get_nir_src(alu_parent->src[index].src),
1284 BRW_REGISTER_TYPE_UD);
1286 offset(op[0], bld, alu_parent->src[index].swizzle[channel]);
1287 bld.MOV(result, op[0]);
1293 if (instr->op == nir_op_unpack_double_2x32_split_x)
1294 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 0));
1296 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 1));
1301 inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
1302 inst->saturate = instr->dest.saturate;
1305 case nir_op_bitfield_reverse:
1306 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1307 bld.BFREV(result, op[0]);
1310 case nir_op_bit_count:
1311 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1312 bld.CBIT(result, op[0]);
1315 case nir_op_ufind_msb:
1316 case nir_op_ifind_msb: {
1317 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1318 bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
1320 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1321 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1322 * subtract the result from 31 to convert the MSB count into an LSB count.
1324 bld.CMP(bld.null_reg_d(), result, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
1326 inst = bld.ADD(result, result, brw_imm_d(31));
1327 inst->predicate = BRW_PREDICATE_NORMAL;
1328 inst->src[0].negate = true;
1332 case nir_op_find_lsb:
1333 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1334 bld.FBL(result, op[0]);
1337 case nir_op_ubitfield_extract:
1338 case nir_op_ibitfield_extract:
1339 unreachable("should have been lowered");
1342 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1343 bld.BFE(result, op[2], op[1], op[0]);
1346 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1347 bld.BFI1(result, op[0], op[1]);
1350 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1351 bld.BFI2(result, op[0], op[1], op[2]);
1354 case nir_op_bitfield_insert:
1355 unreachable("not reached: should have been lowered");
1358 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1359 bld.SHL(result, op[0], op[1]);
1362 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1363 bld.ASR(result, op[0], op[1]);
1366 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1367 bld.SHR(result, op[0], op[1]);
1370 case nir_op_pack_half_2x16_split:
1371 bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
1375 inst = bld.MAD(result, op[2], op[1], op[0]);
1376 inst->saturate = instr->dest.saturate;
1380 inst = bld.LRP(result, op[0], op[1], op[2]);
1381 inst->saturate = instr->dest.saturate;
1385 if (optimize_frontfacing_ternary(instr, result))
1388 bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
1389 inst = bld.SEL(result, op[1], op[2]);
1390 inst->predicate = BRW_PREDICATE_NORMAL;
1393 case nir_op_extract_u8:
1394 case nir_op_extract_i8: {
1395 const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
1396 nir_const_value *byte = nir_src_as_const_value(instr->src[1].src);
1397 assert(byte != NULL);
1398 bld.MOV(result, subscript(op[0], type, byte->u32[0]));
1402 case nir_op_extract_u16:
1403 case nir_op_extract_i16: {
1404 const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i16);
1405 nir_const_value *word = nir_src_as_const_value(instr->src[1].src);
1406 assert(word != NULL);
1407 bld.MOV(result, subscript(op[0], type, word->u32[0]));
1412 unreachable("unhandled instruction");
1415 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1416 * to sign extend the low bit to 0/~0
1418 if (devinfo->gen <= 5 &&
1419 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1420 fs_reg masked = vgrf(glsl_type::int_type);
1421 bld.AND(masked, result, brw_imm_d(1));
1422 masked.negate = true;
1423 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
1428 fs_visitor::nir_emit_load_const(const fs_builder &bld,
1429 nir_load_const_instr *instr)
1431 const brw_reg_type reg_type =
1432 instr->def.bit_size == 32 ? BRW_REGISTER_TYPE_D : BRW_REGISTER_TYPE_DF;
1433 fs_reg reg = bld.vgrf(reg_type, instr->def.num_components);
1435 switch (instr->def.bit_size) {
1437 for (unsigned i = 0; i < instr->def.num_components; i++)
1438 bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i32[i]));
1442 for (unsigned i = 0; i < instr->def.num_components; i++)
1443 bld.MOV(offset(reg, bld, i), brw_imm_df(instr->value.f64[i]));
1447 unreachable("Invalid bit size");
1450 nir_ssa_values[instr->def.index] = reg;
1454 fs_visitor::nir_emit_undef(const fs_builder &bld, nir_ssa_undef_instr *instr)
1456 const brw_reg_type reg_type =
1457 instr->def.bit_size == 32 ? BRW_REGISTER_TYPE_D : BRW_REGISTER_TYPE_DF;
1458 nir_ssa_values[instr->def.index] =
1459 bld.vgrf(reg_type, instr->def.num_components);
1463 fs_visitor::get_nir_src(const nir_src &src)
1467 reg = nir_ssa_values[src.ssa->index];
1469 /* We don't handle indirects on locals */
1470 assert(src.reg.indirect == NULL);
1471 reg = offset(nir_locals[src.reg.reg->index], bld,
1472 src.reg.base_offset * src.reg.reg->num_components);
1475 /* to avoid floating-point denorm flushing problems, set the type by
1476 * default to D - instructions that need floating point semantics will set
1477 * this to F if they need to
1479 return retype(reg, BRW_REGISTER_TYPE_D);
1483 * Return an IMM for constants; otherwise call get_nir_src() as normal.
1486 fs_visitor::get_nir_src_imm(const nir_src &src)
1488 nir_const_value *val = nir_src_as_const_value(src);
1489 return val ? fs_reg(brw_imm_d(val->i32[0])) : get_nir_src(src);
1493 fs_visitor::get_nir_dest(const nir_dest &dest)
1496 const brw_reg_type reg_type =
1497 dest.ssa.bit_size == 32 ? BRW_REGISTER_TYPE_F : BRW_REGISTER_TYPE_DF;
1498 nir_ssa_values[dest.ssa.index] =
1499 bld.vgrf(reg_type, dest.ssa.num_components);
1500 return nir_ssa_values[dest.ssa.index];
1502 /* We don't handle indirects on locals */
1503 assert(dest.reg.indirect == NULL);
1504 return offset(nir_locals[dest.reg.reg->index], bld,
1505 dest.reg.base_offset * dest.reg.reg->num_components);
1510 fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
1512 fs_reg image(UNIFORM, deref->var->data.driver_location / 4,
1513 BRW_REGISTER_TYPE_UD);
1515 unsigned indirect_max = 0;
1517 for (const nir_deref *tail = &deref->deref; tail->child;
1518 tail = tail->child) {
1519 const nir_deref_array *deref_array = nir_deref_as_array(tail->child);
1520 assert(tail->child->deref_type == nir_deref_type_array);
1521 const unsigned size = glsl_get_length(tail->type);
1522 const unsigned element_size = type_size_scalar(deref_array->deref.type);
1523 const unsigned base = MIN2(deref_array->base_offset, size - 1);
1524 image = offset(image, bld, base * element_size);
1526 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
1527 fs_reg tmp = vgrf(glsl_type::uint_type);
1529 /* Accessing an invalid surface index with the dataport can result
1530 * in a hang. According to the spec "if the index used to
1531 * select an individual element is negative or greater than or
1532 * equal to the size of the array, the results of the operation
1533 * are undefined but may not lead to termination" -- which is one
1534 * of the possible outcomes of the hang. Clamp the index to
1535 * prevent access outside of the array bounds.
1537 bld.emit_minmax(tmp, retype(get_nir_src(deref_array->indirect),
1538 BRW_REGISTER_TYPE_UD),
1539 brw_imm_ud(size - base - 1), BRW_CONDITIONAL_L);
1541 indirect_max += element_size * (tail->type->length - 1);
1543 bld.MUL(tmp, tmp, brw_imm_ud(element_size * 4));
1544 if (indirect.file == BAD_FILE) {
1547 bld.ADD(indirect, indirect, tmp);
1552 if (indirect.file == BAD_FILE) {
1555 /* Emit a pile of MOVs to load the uniform into a temporary. The
1556 * dead-code elimination pass will get rid of what we don't use.
1558 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, BRW_IMAGE_PARAM_SIZE);
1559 for (unsigned j = 0; j < BRW_IMAGE_PARAM_SIZE; j++) {
1560 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
1561 offset(tmp, bld, j), offset(image, bld, j),
1562 indirect, brw_imm_ud((indirect_max + 1) * 4));
1569 fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1572 for (unsigned i = 0; i < 4; i++) {
1573 if (!((wr_mask >> i) & 1))
1576 fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
1577 new_inst->dst = offset(new_inst->dst, bld, i);
1578 for (unsigned j = 0; j < new_inst->sources; j++)
1579 if (new_inst->src[j].file == VGRF)
1580 new_inst->src[j] = offset(new_inst->src[j], bld, i);
1587 * Get the matching channel register datatype for an image intrinsic of the
1588 * specified GLSL image type.
1591 get_image_base_type(const glsl_type *type)
1593 switch ((glsl_base_type)type->sampled_type) {
1594 case GLSL_TYPE_UINT:
1595 return BRW_REGISTER_TYPE_UD;
1597 return BRW_REGISTER_TYPE_D;
1598 case GLSL_TYPE_FLOAT:
1599 return BRW_REGISTER_TYPE_F;
1601 unreachable("Not reached.");
1606 * Get the appropriate atomic op for an image atomic intrinsic.
1609 get_image_atomic_op(nir_intrinsic_op op, const glsl_type *type)
1612 case nir_intrinsic_image_atomic_add:
1614 case nir_intrinsic_image_atomic_min:
1615 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1616 BRW_AOP_IMIN : BRW_AOP_UMIN);
1617 case nir_intrinsic_image_atomic_max:
1618 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1619 BRW_AOP_IMAX : BRW_AOP_UMAX);
1620 case nir_intrinsic_image_atomic_and:
1622 case nir_intrinsic_image_atomic_or:
1624 case nir_intrinsic_image_atomic_xor:
1626 case nir_intrinsic_image_atomic_exchange:
1628 case nir_intrinsic_image_atomic_comp_swap:
1629 return BRW_AOP_CMPWR;
1631 unreachable("Not reachable.");
1636 emit_pixel_interpolater_send(const fs_builder &bld,
1641 glsl_interp_qualifier interpolation)
1647 if (src.file == BAD_FILE) {
1649 payload = bld.vgrf(BRW_REGISTER_TYPE_F, 1);
1653 mlen = 2 * bld.dispatch_width() / 8;
1656 inst = bld.emit(opcode, dst, payload, desc);
1658 /* 2 floats per slot returned */
1659 inst->regs_written = 2 * bld.dispatch_width() / 8;
1660 inst->pi_noperspective = interpolation == INTERP_QUALIFIER_NOPERSPECTIVE;
1666 * Computes 1 << x, given a D/UD register containing some value x.
1669 intexp2(const fs_builder &bld, const fs_reg &x)
1671 assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
1673 fs_reg result = bld.vgrf(x.type, 1);
1674 fs_reg one = bld.vgrf(x.type, 1);
1676 bld.MOV(one, retype(brw_imm_d(1), one.type));
1677 bld.SHL(result, one, x);
1682 fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
1684 assert(stage == MESA_SHADER_GEOMETRY);
1686 struct brw_gs_prog_data *gs_prog_data =
1687 (struct brw_gs_prog_data *) prog_data;
1689 if (gs_compile->control_data_header_size_bits == 0)
1692 /* We can only do EndPrimitive() functionality when the control data
1693 * consists of cut bits. Fortunately, the only time it isn't is when the
1694 * output type is points, in which case EndPrimitive() is a no-op.
1696 if (gs_prog_data->control_data_format !=
1697 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
1701 /* Cut bits use one bit per vertex. */
1702 assert(gs_compile->control_data_bits_per_vertex == 1);
1704 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1705 vertex_count.type = BRW_REGISTER_TYPE_UD;
1707 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
1708 * vertex n, 0 otherwise. So all we need to do here is mark bit
1709 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
1710 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
1711 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
1713 * Note that if EndPrimitive() is called before emitting any vertices, this
1714 * will cause us to set bit 31 of the control_data_bits register to 1.
1715 * That's fine because:
1717 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
1718 * output, so the hardware will ignore cut bit 31.
1720 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
1721 * last vertex, so setting cut bit 31 has no effect (since the primitive
1722 * is automatically ended when the GS terminates).
1724 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
1725 * control_data_bits register to 0 when the first vertex is emitted.
1728 const fs_builder abld = bld.annotate("end primitive");
1730 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
1731 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1732 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
1733 fs_reg mask = intexp2(abld, prev_count);
1734 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1735 * attention to the lower 5 bits of its second source argument, so on this
1736 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
1737 * ((vertex_count - 1) % 32).
1739 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1743 fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
1745 assert(stage == MESA_SHADER_GEOMETRY);
1746 assert(gs_compile->control_data_bits_per_vertex != 0);
1748 struct brw_gs_prog_data *gs_prog_data =
1749 (struct brw_gs_prog_data *) prog_data;
1751 const fs_builder abld = bld.annotate("emit control data bits");
1752 const fs_builder fwa_bld = bld.exec_all();
1754 /* We use a single UD register to accumulate control data bits (32 bits
1755 * for each of the SIMD8 channels). So we need to write a DWord (32 bits)
1758 * Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
1759 * We have select a 128-bit group via the Global and Per-Slot Offsets, then
1760 * use the Channel Mask phase to enable/disable which DWord within that
1761 * group to write. (Remember, different SIMD8 channels may have emitted
1762 * different numbers of vertices, so we may need per-slot offsets.)
1764 * Channel masking presents an annoying problem: we may have to replicate
1765 * the data up to 4 times:
1767 * Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
1769 * To avoid penalizing shaders that emit a small number of vertices, we
1770 * can avoid these sometimes: if the size of the control data header is
1771 * <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
1772 * land in the same 128-bit group, so we can skip per-slot offsets.
1774 * Similarly, if the control data header is <= 32 bits, there is only one
1775 * DWord, so we can skip channel masks.
1777 enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
1779 fs_reg channel_mask, per_slot_offset;
1781 if (gs_compile->control_data_header_size_bits > 32) {
1782 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
1783 channel_mask = vgrf(glsl_type::uint_type);
1786 if (gs_compile->control_data_header_size_bits > 128) {
1787 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
1788 per_slot_offset = vgrf(glsl_type::uint_type);
1791 /* Figure out which DWord we're trying to write to using the formula:
1793 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
1795 * Since bits_per_vertex is a power of two, and is known at compile
1796 * time, this can be optimized to:
1798 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
1800 if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
1801 fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1802 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1803 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
1804 unsigned log2_bits_per_vertex =
1805 _mesa_fls(gs_compile->control_data_bits_per_vertex);
1806 abld.SHR(dword_index, prev_count, brw_imm_ud(6u - log2_bits_per_vertex));
1808 if (per_slot_offset.file != BAD_FILE) {
1809 /* Set the per-slot offset to dword_index / 4, so that we'll write to
1810 * the appropriate OWord within the control data header.
1812 abld.SHR(per_slot_offset, dword_index, brw_imm_ud(2u));
1815 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
1816 * write to the appropriate DWORD within the OWORD.
1818 fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1819 fwa_bld.AND(channel, dword_index, brw_imm_ud(3u));
1820 channel_mask = intexp2(fwa_bld, channel);
1821 /* Then the channel masks need to be in bits 23:16. */
1822 fwa_bld.SHL(channel_mask, channel_mask, brw_imm_ud(16u));
1825 /* Store the control data bits in the message payload and send it. */
1827 if (channel_mask.file != BAD_FILE)
1828 mlen += 4; /* channel masks, plus 3 extra copies of the data */
1829 if (per_slot_offset.file != BAD_FILE)
1832 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
1833 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
1835 sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1836 if (per_slot_offset.file != BAD_FILE)
1837 sources[i++] = per_slot_offset;
1838 if (channel_mask.file != BAD_FILE)
1839 sources[i++] = channel_mask;
1841 sources[i++] = this->control_data_bits;
1844 abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
1845 fs_inst *inst = abld.emit(opcode, reg_undef, payload);
1847 /* We need to increment Global Offset by 256-bits to make room for
1848 * Broadwell's extra "Vertex Count" payload at the beginning of the
1849 * URB entry. Since this is an OWord message, Global Offset is counted
1850 * in 128-bit units, so we must set it to 2.
1852 if (gs_prog_data->static_vertex_count == -1)
1857 fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
1860 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
1862 /* Note: we are calling this *before* increasing vertex_count, so
1863 * this->vertex_count == vertex_count - 1 in the formula above.
1866 /* Stream mode uses 2 bits per vertex */
1867 assert(gs_compile->control_data_bits_per_vertex == 2);
1869 /* Must be a valid stream */
1870 assert(stream_id >= 0 && stream_id < MAX_VERTEX_STREAMS);
1872 /* Control data bits are initialized to 0 so we don't have to set any
1873 * bits when sending vertices to stream 0.
1878 const fs_builder abld = bld.annotate("set stream control data bits", NULL);
1880 /* reg::sid = stream_id */
1881 fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1882 abld.MOV(sid, brw_imm_ud(stream_id));
1884 /* reg:shift_count = 2 * (vertex_count - 1) */
1885 fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1886 abld.SHL(shift_count, vertex_count, brw_imm_ud(1u));
1888 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1889 * attention to the lower 5 bits of its second source argument, so on this
1890 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
1891 * stream_id << ((2 * (vertex_count - 1)) % 32).
1893 fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1894 abld.SHL(mask, sid, shift_count);
1895 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1899 fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
1902 assert(stage == MESA_SHADER_GEOMETRY);
1904 struct brw_gs_prog_data *gs_prog_data =
1905 (struct brw_gs_prog_data *) prog_data;
1907 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1908 vertex_count.type = BRW_REGISTER_TYPE_UD;
1910 /* Haswell and later hardware ignores the "Render Stream Select" bits
1911 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
1912 * and instead sends all primitives down the pipeline for rasterization.
1913 * If the SOL stage is enabled, "Render Stream Select" is honored and
1914 * primitives bound to non-zero streams are discarded after stream output.
1916 * Since the only purpose of primives sent to non-zero streams is to
1917 * be recorded by transform feedback, we can simply discard all geometry
1918 * bound to these streams when transform feedback is disabled.
1920 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
1923 /* If we're outputting 32 control data bits or less, then we can wait
1924 * until the shader is over to output them all. Otherwise we need to
1925 * output them as we go. Now is the time to do it, since we're about to
1926 * output the vertex_count'th vertex, so it's guaranteed that the
1927 * control data bits associated with the (vertex_count - 1)th vertex are
1930 if (gs_compile->control_data_header_size_bits > 32) {
1931 const fs_builder abld =
1932 bld.annotate("emit vertex: emit control data bits");
1934 /* Only emit control data bits if we've finished accumulating a batch
1935 * of 32 bits. This is the case when:
1937 * (vertex_count * bits_per_vertex) % 32 == 0
1939 * (in other words, when the last 5 bits of vertex_count *
1940 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
1941 * integer n (which is always the case, since bits_per_vertex is
1942 * always 1 or 2), this is equivalent to requiring that the last 5-n
1943 * bits of vertex_count are 0:
1945 * vertex_count & (2^(5-n) - 1) == 0
1947 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
1950 * vertex_count & (32 / bits_per_vertex - 1) == 0
1952 * TODO: If vertex_count is an immediate, we could do some of this math
1953 * at compile time...
1956 abld.AND(bld.null_reg_d(), vertex_count,
1957 brw_imm_ud(32u / gs_compile->control_data_bits_per_vertex - 1u));
1958 inst->conditional_mod = BRW_CONDITIONAL_Z;
1960 abld.IF(BRW_PREDICATE_NORMAL);
1961 /* If vertex_count is 0, then no control data bits have been
1962 * accumulated yet, so we can skip emitting them.
1964 abld.CMP(bld.null_reg_d(), vertex_count, brw_imm_ud(0u),
1965 BRW_CONDITIONAL_NEQ);
1966 abld.IF(BRW_PREDICATE_NORMAL);
1967 emit_gs_control_data_bits(vertex_count);
1968 abld.emit(BRW_OPCODE_ENDIF);
1970 /* Reset control_data_bits to 0 so we can start accumulating a new
1973 * Note: in the case where vertex_count == 0, this neutralizes the
1974 * effect of any call to EndPrimitive() that the shader may have
1975 * made before outputting its first vertex.
1977 inst = abld.MOV(this->control_data_bits, brw_imm_ud(0u));
1978 inst->force_writemask_all = true;
1979 abld.emit(BRW_OPCODE_ENDIF);
1982 emit_urb_writes(vertex_count);
1984 /* In stream mode we have to set control data bits for all vertices
1985 * unless we have disabled control data bits completely (which we do
1986 * do for GL_POINTS outputs that don't use streams).
1988 if (gs_compile->control_data_header_size_bits > 0 &&
1989 gs_prog_data->control_data_format ==
1990 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
1991 set_gs_stream_control_data_bits(vertex_count, stream_id);
1996 fs_visitor::emit_gs_input_load(const fs_reg &dst,
1997 const nir_src &vertex_src,
1998 unsigned base_offset,
1999 const nir_src &offset_src,
2000 unsigned num_components)
2002 struct brw_gs_prog_data *gs_prog_data = (struct brw_gs_prog_data *) prog_data;
2004 nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
2005 nir_const_value *offset_const = nir_src_as_const_value(offset_src);
2006 const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
2008 /* Offset 0 is the VUE header, which contains VARYING_SLOT_LAYER [.y],
2009 * VARYING_SLOT_VIEWPORT [.z], and VARYING_SLOT_PSIZ [.w]. Only
2010 * gl_PointSize is available as a GS input, however, so it must be that.
2012 const bool is_point_size = (base_offset == 0);
2014 /* TODO: figure out push input layout for invocations == 1 */
2015 if (gs_prog_data->invocations == 1 &&
2016 offset_const != NULL && vertex_const != NULL &&
2017 4 * (base_offset + offset_const->u32[0]) < push_reg_count) {
2018 int imm_offset = (base_offset + offset_const->u32[0]) * 4 +
2019 vertex_const->u32[0] * push_reg_count;
2020 /* This input was pushed into registers. */
2021 if (is_point_size) {
2022 /* gl_PointSize comes in .w */
2023 bld.MOV(dst, fs_reg(ATTR, imm_offset + 3, dst.type));
2025 for (unsigned i = 0; i < num_components; i++) {
2026 bld.MOV(offset(dst, bld, i),
2027 fs_reg(ATTR, imm_offset + i, dst.type));
2033 /* Resort to the pull model. Ensure the VUE handles are provided. */
2034 gs_prog_data->base.include_vue_handles = true;
2036 unsigned first_icp_handle = gs_prog_data->include_primitive_id ? 3 : 2;
2037 fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2039 if (gs_prog_data->invocations == 1) {
2041 /* The vertex index is constant; just select the proper URB handle. */
2043 retype(brw_vec8_grf(first_icp_handle + vertex_const->i32[0], 0),
2044 BRW_REGISTER_TYPE_UD);
2046 /* The vertex index is non-constant. We need to use indirect
2047 * addressing to fetch the proper URB handle.
2049 * First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
2050 * indicating that channel <n> should read the handle from
2051 * DWord <n>. We convert that to bytes by multiplying by 4.
2053 * Next, we convert the vertex index to bytes by multiplying
2054 * by 32 (shifting by 5), and add the two together. This is
2055 * the final indirect byte offset.
2057 fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_W, 1);
2058 fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2059 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2060 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2062 /* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
2063 bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
2064 /* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
2065 bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
2066 /* Convert vertex_index to bytes (multiply by 32) */
2067 bld.SHL(vertex_offset_bytes,
2068 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2070 bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
2072 /* Use first_icp_handle as the base offset. There is one register
2073 * of URB handles per vertex, so inform the register allocator that
2074 * we might read up to nir->info.gs.vertices_in registers.
2076 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2077 fs_reg(brw_vec8_grf(first_icp_handle, 0)),
2078 fs_reg(icp_offset_bytes),
2079 brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
2082 assert(gs_prog_data->invocations > 1);
2085 assert(devinfo->gen >= 9 || vertex_const->i32[0] <= 5);
2087 retype(brw_vec1_grf(first_icp_handle +
2088 vertex_const->i32[0] / 8,
2089 vertex_const->i32[0] % 8),
2090 BRW_REGISTER_TYPE_UD));
2092 /* The vertex index is non-constant. We need to use indirect
2093 * addressing to fetch the proper URB handle.
2096 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2098 /* Convert vertex_index to bytes (multiply by 4) */
2099 bld.SHL(icp_offset_bytes,
2100 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2103 /* Use first_icp_handle as the base offset. There is one DWord
2104 * of URB handles per vertex, so inform the register allocator that
2105 * we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
2107 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2108 fs_reg(brw_vec8_grf(first_icp_handle, 0)),
2109 fs_reg(icp_offset_bytes),
2110 brw_imm_ud(DIV_ROUND_UP(nir->info.gs.vertices_in, 8) *
2117 fs_reg tmp_dst = dst;
2118 fs_reg indirect_offset = get_nir_src(offset_src);
2119 unsigned num_iterations = 1;
2120 unsigned orig_num_components = num_components;
2122 if (type_sz(dst.type) == 8) {
2123 if (num_components > 2) {
2127 fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dst.type);
2131 for (unsigned iter = 0; iter < num_iterations; iter++) {
2133 /* Constant indexing - use global offset. */
2134 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp_dst, icp_handle);
2135 inst->offset = base_offset + offset_const->u32[0];
2136 inst->base_mrf = -1;
2138 inst->regs_written = num_components * type_sz(tmp_dst.type) / 4;
2140 /* Indirect indexing - use per-slot offsets as well. */
2141 const fs_reg srcs[] = { icp_handle, indirect_offset };
2142 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2143 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2145 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp_dst, payload);
2146 inst->offset = base_offset;
2147 inst->base_mrf = -1;
2149 inst->regs_written = num_components * type_sz(tmp_dst.type) / 4;
2152 if (type_sz(dst.type) == 8) {
2153 shuffle_32bit_load_result_to_64bit_data(
2154 bld, tmp_dst, retype(tmp_dst, BRW_REGISTER_TYPE_F), num_components);
2156 for (unsigned c = 0; c < num_components; c++)
2157 bld.MOV(offset(dst, bld, iter * 2 + c), offset(tmp_dst, bld, c));
2160 if (num_iterations > 1) {
2161 num_components = orig_num_components - 2;
2165 fs_reg new_indirect = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2166 bld.ADD(new_indirect, indirect_offset, brw_imm_ud(1u));
2167 indirect_offset = new_indirect;
2172 if (is_point_size) {
2173 /* Read the whole VUE header (because of alignment) and read .w. */
2174 fs_reg tmp = bld.vgrf(dst.type, 4);
2176 inst->regs_written = 4;
2177 bld.MOV(dst, offset(tmp, bld, 3));
2182 fs_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
2184 nir_src *offset_src = nir_get_io_offset_src(instr);
2185 nir_const_value *const_value = nir_src_as_const_value(*offset_src);
2188 /* The only constant offset we should find is 0. brw_nir.c's
2189 * add_const_offset_to_base() will fold other constant offsets
2190 * into instr->const_index[0].
2192 assert(const_value->u32[0] == 0);
2196 return get_nir_src(*offset_src);
2200 do_untyped_vector_read(const fs_builder &bld,
2202 const fs_reg surf_index,
2203 const fs_reg offset_reg,
2204 unsigned num_components)
2206 if (type_sz(dest.type) == 4) {
2207 fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
2210 BRW_PREDICATE_NONE);
2211 read_result.type = dest.type;
2212 for (unsigned i = 0; i < num_components; i++)
2213 bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
2214 } else if (type_sz(dest.type) == 8) {
2215 /* Reading a dvec, so we need to:
2217 * 1. Multiply num_components by 2, to account for the fact that we
2218 * need to read 64-bit components.
2219 * 2. Shuffle the result of the load to form valid 64-bit elements
2220 * 3. Emit a second load (for components z/w) if needed.
2222 fs_reg read_offset = bld.vgrf(BRW_REGISTER_TYPE_UD);
2223 bld.MOV(read_offset, offset_reg);
2225 int iters = num_components <= 2 ? 1 : 2;
2227 /* Load the dvec, the first iteration loads components x/y, the second
2228 * iteration, if needed, loads components z/w
2230 for (int it = 0; it < iters; it++) {
2231 /* Compute number of components to read in this iteration */
2232 int iter_components = MIN2(2, num_components);
2233 num_components -= iter_components;
2235 /* Read. Since this message reads 32-bit components, we need to
2236 * read twice as many components.
2238 fs_reg read_result = emit_untyped_read(bld, surf_index, read_offset,
2240 iter_components * 2,
2241 BRW_PREDICATE_NONE);
2243 /* Shuffle the 32-bit load result into valid 64-bit data */
2244 const fs_reg packed_result = bld.vgrf(dest.type, iter_components);
2245 shuffle_32bit_load_result_to_64bit_data(
2246 bld, packed_result, read_result, iter_components);
2248 /* Move each component to its destination */
2249 read_result = retype(read_result, BRW_REGISTER_TYPE_DF);
2250 for (int c = 0; c < iter_components; c++) {
2251 bld.MOV(offset(dest, bld, it * 2 + c),
2252 offset(packed_result, bld, c));
2255 bld.ADD(read_offset, read_offset, brw_imm_ud(16));
2258 unreachable("Unsupported type");
2263 fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
2264 nir_intrinsic_instr *instr)
2266 assert(stage == MESA_SHADER_VERTEX);
2269 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2270 dest = get_nir_dest(instr->dest);
2272 switch (instr->intrinsic) {
2273 case nir_intrinsic_load_vertex_id:
2274 unreachable("should be lowered by lower_vertex_id()");
2276 case nir_intrinsic_load_vertex_id_zero_base:
2277 case nir_intrinsic_load_base_vertex:
2278 case nir_intrinsic_load_instance_id:
2279 case nir_intrinsic_load_base_instance:
2280 case nir_intrinsic_load_draw_id: {
2281 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
2282 fs_reg val = nir_system_values[sv];
2283 assert(val.file != BAD_FILE);
2284 dest.type = val.type;
2290 nir_emit_intrinsic(bld, instr);
2296 fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
2297 nir_intrinsic_instr *instr)
2299 assert(stage == MESA_SHADER_TESS_CTRL);
2300 struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
2301 struct brw_tcs_prog_data *tcs_prog_data =
2302 (struct brw_tcs_prog_data *) prog_data;
2305 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2306 dst = get_nir_dest(instr->dest);
2308 switch (instr->intrinsic) {
2309 case nir_intrinsic_load_primitive_id:
2310 bld.MOV(dst, fs_reg(brw_vec1_grf(0, 1)));
2312 case nir_intrinsic_load_invocation_id:
2313 bld.MOV(retype(dst, invocation_id.type), invocation_id);
2315 case nir_intrinsic_load_patch_vertices_in:
2316 bld.MOV(retype(dst, BRW_REGISTER_TYPE_D),
2317 brw_imm_d(tcs_key->input_vertices));
2320 case nir_intrinsic_barrier: {
2321 if (tcs_prog_data->instances == 1)
2324 fs_reg m0 = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2325 fs_reg m0_2 = component(m0, 2);
2327 const fs_builder chanbld = bld.exec_all().group(1, 0);
2329 /* Zero the message header */
2330 bld.exec_all().MOV(m0, brw_imm_ud(0u));
2332 /* Copy "Barrier ID" from r0.2, bits 16:13 */
2333 chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
2334 brw_imm_ud(INTEL_MASK(16, 13)));
2336 /* Shift it up to bits 27:24. */
2337 chanbld.SHL(m0_2, m0_2, brw_imm_ud(11));
2339 /* Set the Barrier Count and the enable bit */
2340 chanbld.OR(m0_2, m0_2,
2341 brw_imm_ud(tcs_prog_data->instances << 9 | (1 << 15)));
2343 bld.emit(SHADER_OPCODE_BARRIER, bld.null_reg_ud(), m0);
2347 case nir_intrinsic_load_input:
2348 unreachable("nir_lower_io should never give us these.");
2351 case nir_intrinsic_load_per_vertex_input: {
2352 fs_reg indirect_offset = get_indirect_offset(instr);
2353 unsigned imm_offset = instr->const_index[0];
2355 const nir_src &vertex_src = instr->src[0];
2356 nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
2363 /* Emit a MOV to resolve <0,1,0> regioning. */
2364 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2366 retype(brw_vec1_grf(1 + (vertex_const->i32[0] >> 3),
2367 vertex_const->i32[0] & 7),
2368 BRW_REGISTER_TYPE_UD));
2369 } else if (tcs_prog_data->instances == 1 &&
2370 vertex_src.is_ssa &&
2371 vertex_src.ssa->parent_instr->type == nir_instr_type_intrinsic &&
2372 nir_instr_as_intrinsic(vertex_src.ssa->parent_instr)->intrinsic == nir_intrinsic_load_invocation_id) {
2373 /* For the common case of only 1 instance, an array index of
2374 * gl_InvocationID means reading g1. Skip all the indirect work.
2376 icp_handle = retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD);
2378 /* The vertex index is non-constant. We need to use indirect
2379 * addressing to fetch the proper URB handle.
2381 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2383 /* Each ICP handle is a single DWord (4 bytes) */
2384 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2385 bld.SHL(vertex_offset_bytes,
2386 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2389 /* Start at g1. We might read up to 4 registers. */
2390 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2391 fs_reg(brw_vec8_grf(1, 0)), vertex_offset_bytes,
2392 brw_imm_ud(4 * REG_SIZE));
2395 /* We can only read two double components with each URB read, so
2396 * we send two read messages in that case, each one loading up to
2397 * two double components.
2399 unsigned num_iterations = 1;
2400 unsigned num_components = instr->num_components;
2401 fs_reg orig_dst = dst;
2402 if (type_sz(dst.type) == 8) {
2403 if (instr->num_components > 2) {
2408 fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dst.type);
2412 for (unsigned iter = 0; iter < num_iterations; iter++) {
2413 if (indirect_offset.file == BAD_FILE) {
2414 /* Constant indexing - use global offset. */
2415 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
2416 inst->offset = imm_offset;
2418 inst->base_mrf = -1;
2420 /* Indirect indexing - use per-slot offsets as well. */
2421 const fs_reg srcs[] = { icp_handle, indirect_offset };
2422 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2423 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2425 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
2426 inst->offset = imm_offset;
2427 inst->base_mrf = -1;
2430 inst->regs_written = num_components * type_sz(dst.type) / 4;
2432 /* If we are reading 64-bit data using 32-bit read messages we need
2433 * build proper 64-bit data elements by shuffling the low and high
2434 * 32-bit components around like we do for other things like UBOs
2437 if (type_sz(dst.type) == 8) {
2438 shuffle_32bit_load_result_to_64bit_data(
2439 bld, dst, retype(dst, BRW_REGISTER_TYPE_F), num_components);
2441 for (unsigned c = 0; c < num_components; c++) {
2442 bld.MOV(offset(orig_dst, bld, iter * 2 + c),
2443 offset(dst, bld, c));
2447 /* Copy the temporary to the destination to deal with writemasking.
2449 * Also attempt to deal with gl_PointSize being in the .w component.
2451 if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
2452 assert(type_sz(dst.type) < 8);
2453 inst->dst = bld.vgrf(dst.type, 4);
2454 inst->regs_written = 4;
2455 bld.MOV(dst, offset(inst->dst, bld, 3));
2458 /* If we are loading double data and we need a second read message
2459 * adjust the write offset
2461 if (num_iterations > 1) {
2462 num_components = instr->num_components - 2;
2463 if (indirect_offset.file == BAD_FILE) {
2466 fs_reg new_indirect = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2467 bld.ADD(new_indirect, indirect_offset, brw_imm_ud(1u));
2468 indirect_offset = new_indirect;
2475 case nir_intrinsic_load_output:
2476 case nir_intrinsic_load_per_vertex_output: {
2477 fs_reg indirect_offset = get_indirect_offset(instr);
2478 unsigned imm_offset = instr->const_index[0];
2481 if (indirect_offset.file == BAD_FILE) {
2482 /* Replicate the patch handle to all enabled channels */
2483 fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2484 bld.MOV(patch_handle,
2485 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD));
2487 if (imm_offset == 0) {
2488 /* This is a read of gl_TessLevelInner[], which lives in the
2489 * Patch URB header. The layout depends on the domain.
2491 dst.type = BRW_REGISTER_TYPE_F;
2492 switch (tcs_key->tes_primitive_mode) {
2494 /* DWords 3-2 (reversed) */
2495 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
2497 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, patch_handle);
2500 inst->base_mrf = -1;
2501 inst->regs_written = 4;
2503 /* dst.xy = tmp.wz */
2504 bld.MOV(dst, offset(tmp, bld, 3));
2505 bld.MOV(offset(dst, bld, 1), offset(tmp, bld, 2));
2509 /* DWord 4; hardcode offset = 1 and regs_written = 1 */
2510 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, patch_handle);
2513 inst->base_mrf = -1;
2514 inst->regs_written = 1;
2517 /* All channels are undefined. */
2520 unreachable("Bogus tessellation domain");
2522 } else if (imm_offset == 1) {
2523 /* This is a read of gl_TessLevelOuter[], which lives in the
2524 * Patch URB header. The layout depends on the domain.
2526 dst.type = BRW_REGISTER_TYPE_F;
2528 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
2529 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, patch_handle);
2532 inst->base_mrf = -1;
2533 inst->regs_written = 4;
2535 /* Reswizzle: WZYX */
2537 offset(tmp, bld, 3),
2538 offset(tmp, bld, 2),
2539 offset(tmp, bld, 1),
2540 offset(tmp, bld, 0),
2543 unsigned num_components;
2544 switch (tcs_key->tes_primitive_mode) {
2552 /* Isolines are not reversed; swizzle .zw -> .xy */
2553 srcs[0] = offset(tmp, bld, 2);
2554 srcs[1] = offset(tmp, bld, 3);
2558 unreachable("Bogus tessellation domain");
2560 bld.LOAD_PAYLOAD(dst, srcs, num_components, 0);
2562 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, patch_handle);
2563 inst->offset = imm_offset;
2565 inst->base_mrf = -1;
2566 inst->regs_written = instr->num_components;
2569 /* Indirect indexing - use per-slot offsets as well. */
2570 const fs_reg srcs[] = {
2571 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
2574 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2575 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2577 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
2578 inst->offset = imm_offset;
2580 inst->base_mrf = -1;
2581 inst->regs_written = instr->num_components;
2586 case nir_intrinsic_store_output:
2587 case nir_intrinsic_store_per_vertex_output: {
2588 fs_reg value = get_nir_src(instr->src[0]);
2589 bool is_64bit = (instr->src[0].is_ssa ?
2590 instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size) == 64;
2591 fs_reg indirect_offset = get_indirect_offset(instr);
2592 unsigned imm_offset = instr->const_index[0];
2593 unsigned swiz = BRW_SWIZZLE_XYZW;
2594 unsigned mask = instr->const_index[1];
2595 unsigned header_regs = 0;
2597 srcs[header_regs++] = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD);
2599 if (indirect_offset.file != BAD_FILE) {
2600 srcs[header_regs++] = indirect_offset;
2601 } else if (!is_passthrough_shader) {
2602 if (imm_offset == 0) {
2603 value.type = BRW_REGISTER_TYPE_F;
2605 mask &= (1 << tesslevel_inner_components(tcs_key->tes_primitive_mode)) - 1;
2607 /* This is a write to gl_TessLevelInner[], which lives in the
2608 * Patch URB header. The layout depends on the domain.
2610 switch (tcs_key->tes_primitive_mode) {
2612 /* gl_TessLevelInner[].xy lives at DWords 3-2 (reversed).
2613 * We use an XXYX swizzle to reverse put .xy in the .wz
2614 * channels, and use a .zw writemask.
2616 mask = writemask_for_backwards_vector(mask);
2617 swiz = BRW_SWIZZLE4(0, 0, 1, 0);
2620 /* gl_TessLevelInner[].x lives at DWord 4, so we set the
2621 * writemask to X and bump the URB offset by 1.
2626 /* Skip; gl_TessLevelInner[] doesn't exist for isolines. */
2629 unreachable("Bogus tessellation domain");
2631 } else if (imm_offset == 1) {
2632 /* This is a write to gl_TessLevelOuter[] which lives in the
2633 * Patch URB Header at DWords 4-7. However, it's reversed, so
2634 * instead of .xyzw we have .wzyx.
2636 value.type = BRW_REGISTER_TYPE_F;
2638 mask &= (1 << tesslevel_outer_components(tcs_key->tes_primitive_mode)) - 1;
2640 if (tcs_key->tes_primitive_mode == GL_ISOLINES) {
2641 /* Isolines .xy should be stored in .zw, in order. */
2642 swiz = BRW_SWIZZLE4(0, 0, 0, 1);
2645 /* Other domains are reversed; store .wzyx instead of .xyzw */
2646 swiz = BRW_SWIZZLE_WZYX;
2647 mask = writemask_for_backwards_vector(mask);
2655 unsigned num_components = _mesa_fls(mask);
2658 /* We can only pack two 64-bit components in a single message, so send
2659 * 2 messages if we have more components
2661 unsigned num_iterations = 1;
2662 unsigned iter_components = num_components;
2663 if (is_64bit && instr->num_components > 2) {
2665 iter_components = 2;
2668 /* 64-bit data needs to me shuffled before we can write it to the URB.
2669 * We will use this temporary to shuffle the components in each
2673 fs_reg(VGRF, alloc.allocate(2 * iter_components), value.type);
2675 for (unsigned iter = 0; iter < num_iterations; iter++) {
2676 if (!is_64bit && mask != WRITEMASK_XYZW) {
2677 srcs[header_regs++] = brw_imm_ud(mask << 16);
2678 opcode = indirect_offset.file != BAD_FILE ?
2679 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
2680 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
2681 } else if (is_64bit && ((mask & WRITEMASK_XY) != WRITEMASK_XY)) {
2682 /* Expand the 64-bit mask to 32-bit channels. We only handle
2683 * two channels in each iteration, so we only care about X/Y.
2685 unsigned mask32 = 0;
2686 if (mask & WRITEMASK_X)
2687 mask32 |= WRITEMASK_XY;
2688 if (mask & WRITEMASK_Y)
2689 mask32 |= WRITEMASK_ZW;
2691 /* If the mask does not include any of the channels X or Y there
2692 * is nothing to do in this iteration. Move on to the next couple
2693 * of 64-bit channels.
2701 srcs[header_regs++] = brw_imm_ud(mask32 << 16);
2702 opcode = indirect_offset.file != BAD_FILE ?
2703 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
2704 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
2706 opcode = indirect_offset.file != BAD_FILE ?
2707 SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT :
2708 SHADER_OPCODE_URB_WRITE_SIMD8;
2711 for (unsigned i = 0; i < iter_components; i++) {
2712 if (!(mask & (1 << i)))
2716 srcs[header_regs + i] = offset(value, bld, BRW_GET_SWZ(swiz, i));
2718 /* We need to shuffle the 64-bit data to match the layout
2719 * expected by our 32-bit URB write messages. We use a temporary
2722 unsigned channel = BRW_GET_SWZ(swiz, iter * 2 + i);
2723 shuffle_64bit_data_for_32bit_write(bld,
2724 retype(offset(tmp, bld, 2 * i), BRW_REGISTER_TYPE_F),
2725 retype(offset(value, bld, 2 * channel), BRW_REGISTER_TYPE_DF),
2728 /* Now copy the data to the destination */
2729 fs_reg dest = fs_reg(VGRF, alloc.allocate(2), value.type);
2730 unsigned idx = 2 * i;
2731 bld.MOV(dest, offset(tmp, bld, idx));
2732 bld.MOV(offset(dest, bld, 1), offset(tmp, bld, idx + 1));
2733 srcs[header_regs + idx] = dest;
2734 srcs[header_regs + idx + 1] = offset(dest, bld, 1);
2739 header_regs + (is_64bit ? 2 * iter_components : iter_components);
2741 bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
2742 bld.LOAD_PAYLOAD(payload, srcs, mlen, header_regs);
2744 fs_inst *inst = bld.emit(opcode, bld.null_reg_ud(), payload);
2745 inst->offset = imm_offset;
2747 inst->base_mrf = -1;
2749 /* If this is a 64-bit attribute, select the next two 64-bit channels
2750 * to be handled in the next iteration.
2761 nir_emit_intrinsic(bld, instr);
2767 fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld,
2768 nir_intrinsic_instr *instr)
2770 assert(stage == MESA_SHADER_TESS_EVAL);
2771 struct brw_tes_prog_data *tes_prog_data = (struct brw_tes_prog_data *) prog_data;
2774 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2775 dest = get_nir_dest(instr->dest);
2777 switch (instr->intrinsic) {
2778 case nir_intrinsic_load_primitive_id:
2779 bld.MOV(dest, fs_reg(brw_vec1_grf(0, 1)));
2781 case nir_intrinsic_load_tess_coord:
2782 /* gl_TessCoord is part of the payload in g1-3 */
2783 for (unsigned i = 0; i < 3; i++) {
2784 bld.MOV(offset(dest, bld, i), fs_reg(brw_vec8_grf(1 + i, 0)));
2788 case nir_intrinsic_load_tess_level_outer:
2789 /* When the TES reads gl_TessLevelOuter, we ensure that the patch header
2790 * appears as a push-model input. So, we can simply use the ATTR file
2791 * rather than issuing URB read messages. The data is stored in the
2792 * high DWords in reverse order - DWord 7 contains .x, DWord 6 contains
2795 switch (tes_prog_data->domain) {
2796 case BRW_TESS_DOMAIN_QUAD:
2797 for (unsigned i = 0; i < 4; i++)
2798 bld.MOV(offset(dest, bld, i), component(fs_reg(ATTR, 0), 7 - i));
2800 case BRW_TESS_DOMAIN_TRI:
2801 for (unsigned i = 0; i < 3; i++)
2802 bld.MOV(offset(dest, bld, i), component(fs_reg(ATTR, 0), 7 - i));
2804 case BRW_TESS_DOMAIN_ISOLINE:
2805 for (unsigned i = 0; i < 2; i++)
2806 bld.MOV(offset(dest, bld, i), component(fs_reg(ATTR, 0), 6 + i));
2811 case nir_intrinsic_load_tess_level_inner:
2812 /* When the TES reads gl_TessLevelInner, we ensure that the patch header
2813 * appears as a push-model input. So, we can simply use the ATTR file
2814 * rather than issuing URB read messages.
2816 switch (tes_prog_data->domain) {
2817 case BRW_TESS_DOMAIN_QUAD:
2818 bld.MOV(dest, component(fs_reg(ATTR, 0), 3));
2819 bld.MOV(offset(dest, bld, 1), component(fs_reg(ATTR, 0), 2));
2821 case BRW_TESS_DOMAIN_TRI:
2822 bld.MOV(dest, component(fs_reg(ATTR, 0), 4));
2824 case BRW_TESS_DOMAIN_ISOLINE:
2825 /* ignore - value is undefined */
2830 case nir_intrinsic_load_input:
2831 case nir_intrinsic_load_per_vertex_input: {
2832 fs_reg indirect_offset = get_indirect_offset(instr);
2833 unsigned imm_offset = instr->const_index[0];
2836 if (indirect_offset.file == BAD_FILE) {
2837 /* Arbitrarily only push up to 32 vec4 slots worth of data,
2838 * which is 16 registers (since each holds 2 vec4 slots).
2840 const unsigned max_push_slots = 32;
2841 if (imm_offset < max_push_slots) {
2842 fs_reg src = fs_reg(ATTR, imm_offset / 2, dest.type);
2843 for (int i = 0; i < instr->num_components; i++) {
2844 unsigned comp = 16 / type_sz(dest.type) * (imm_offset % 2) + i;
2845 bld.MOV(offset(dest, bld, i), component(src, comp));
2847 tes_prog_data->base.urb_read_length =
2848 MAX2(tes_prog_data->base.urb_read_length,
2849 DIV_ROUND_UP(imm_offset + 1, 2));
2851 /* Replicate the patch handle to all enabled channels */
2852 const fs_reg srcs[] = {
2853 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)
2855 fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2856 bld.LOAD_PAYLOAD(patch_handle, srcs, ARRAY_SIZE(srcs), 0);
2858 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dest, patch_handle);
2860 inst->offset = imm_offset;
2861 inst->base_mrf = -1;
2862 inst->regs_written = instr->num_components;
2865 /* Indirect indexing - use per-slot offsets as well. */
2866 const fs_reg srcs[] = {
2867 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
2870 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2871 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2873 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dest, payload);
2875 inst->offset = imm_offset;
2876 inst->base_mrf = -1;
2877 inst->regs_written = instr->num_components;
2882 nir_emit_intrinsic(bld, instr);
2888 fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
2889 nir_intrinsic_instr *instr)
2891 assert(stage == MESA_SHADER_GEOMETRY);
2892 fs_reg indirect_offset;
2895 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2896 dest = get_nir_dest(instr->dest);
2898 switch (instr->intrinsic) {
2899 case nir_intrinsic_load_primitive_id:
2900 assert(stage == MESA_SHADER_GEOMETRY);
2901 assert(((struct brw_gs_prog_data *)prog_data)->include_primitive_id);
2902 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
2903 retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
2906 case nir_intrinsic_load_input:
2907 unreachable("load_input intrinsics are invalid for the GS stage");
2909 case nir_intrinsic_load_per_vertex_input:
2910 emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
2911 instr->src[1], instr->num_components);
2914 case nir_intrinsic_emit_vertex_with_counter:
2915 emit_gs_vertex(instr->src[0], instr->const_index[0]);
2918 case nir_intrinsic_end_primitive_with_counter:
2919 emit_gs_end_primitive(instr->src[0]);
2922 case nir_intrinsic_set_vertex_count:
2923 bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
2926 case nir_intrinsic_load_invocation_id: {
2927 fs_reg val = nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
2928 assert(val.file != BAD_FILE);
2929 dest.type = val.type;
2935 nir_emit_intrinsic(bld, instr);
2941 fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
2942 nir_intrinsic_instr *instr)
2944 assert(stage == MESA_SHADER_FRAGMENT);
2945 struct brw_wm_prog_data *wm_prog_data =
2946 (struct brw_wm_prog_data *) prog_data;
2947 const struct brw_wm_prog_key *wm_key = (const struct brw_wm_prog_key *) key;
2950 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2951 dest = get_nir_dest(instr->dest);
2953 switch (instr->intrinsic) {
2954 case nir_intrinsic_load_front_face:
2955 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
2956 *emit_frontfacing_interpolation());
2959 case nir_intrinsic_load_sample_pos: {
2960 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
2961 assert(sample_pos.file != BAD_FILE);
2962 dest.type = sample_pos.type;
2963 bld.MOV(dest, sample_pos);
2964 bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
2968 case nir_intrinsic_load_helper_invocation:
2969 case nir_intrinsic_load_sample_mask_in:
2970 case nir_intrinsic_load_sample_id: {
2971 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
2972 fs_reg val = nir_system_values[sv];
2973 assert(val.file != BAD_FILE);
2974 dest.type = val.type;
2979 case nir_intrinsic_discard:
2980 case nir_intrinsic_discard_if: {
2981 /* We track our discarded pixels in f0.1. By predicating on it, we can
2982 * update just the flag bits that aren't yet discarded. If there's no
2983 * condition, we emit a CMP of g0 != g0, so all currently executing
2984 * channels will get turned off.
2987 if (instr->intrinsic == nir_intrinsic_discard_if) {
2988 cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
2989 brw_imm_d(0), BRW_CONDITIONAL_Z);
2991 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
2992 BRW_REGISTER_TYPE_UW));
2993 cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
2995 cmp->predicate = BRW_PREDICATE_NORMAL;
2996 cmp->flag_subreg = 1;
2998 if (devinfo->gen >= 6) {
2999 emit_discard_jump();
3004 case nir_intrinsic_interp_var_at_centroid:
3005 case nir_intrinsic_interp_var_at_sample:
3006 case nir_intrinsic_interp_var_at_offset: {
3007 /* Handle ARB_gpu_shader5 interpolation intrinsics
3009 * It's worth a quick word of explanation as to why we handle the full
3010 * variable-based interpolation intrinsic rather than a lowered version
3011 * with like we do for other inputs. We have to do that because the way
3012 * we set up inputs doesn't allow us to use the already setup inputs for
3013 * interpolation. At the beginning of the shader, we go through all of
3014 * the input variables and do the initial interpolation and put it in
3015 * the nir_inputs array based on its location as determined in
3016 * nir_lower_io. If the input isn't used, dead code cleans up and
3017 * everything works fine. However, when we get to the ARB_gpu_shader5
3018 * interpolation intrinsics, we need to reinterpolate the input
3019 * differently. If we used an intrinsic that just had an index it would
3020 * only give us the offset into the nir_inputs array. However, this is
3021 * useless because that value is post-interpolation and we need
3022 * pre-interpolation. In order to get the actual location of the bits
3023 * we get from the vertex fetching hardware, we need the variable.
3025 wm_prog_data->pulls_bary = true;
3027 fs_reg dst_xy = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
3028 const glsl_interp_qualifier interpolation =
3029 (glsl_interp_qualifier) instr->variables[0]->var->data.interpolation;
3031 switch (instr->intrinsic) {
3032 case nir_intrinsic_interp_var_at_centroid:
3033 emit_pixel_interpolater_send(bld,
3034 FS_OPCODE_INTERPOLATE_AT_CENTROID,
3041 case nir_intrinsic_interp_var_at_sample: {
3042 if (!wm_key->multisample_fbo) {
3043 /* From the ARB_gpu_shader5 specification:
3044 * "If multisample buffers are not available, the input varying
3045 * will be evaluated at the center of the pixel."
3047 emit_pixel_interpolater_send(bld,
3048 FS_OPCODE_INTERPOLATE_AT_CENTROID,
3056 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
3059 unsigned msg_data = const_sample->i32[0] << 4;
3061 emit_pixel_interpolater_send(bld,
3062 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3065 brw_imm_ud(msg_data),
3068 const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
3069 BRW_REGISTER_TYPE_UD);
3071 if (nir_src_is_dynamically_uniform(instr->src[0])) {
3072 const fs_reg sample_id = bld.emit_uniformize(sample_src);
3073 const fs_reg msg_data = vgrf(glsl_type::uint_type);
3074 bld.exec_all().group(1, 0)
3075 .SHL(msg_data, sample_id, brw_imm_ud(4u));
3076 emit_pixel_interpolater_send(bld,
3077 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3083 /* Make a loop that sends a message to the pixel interpolater
3084 * for the sample number in each live channel. If there are
3085 * multiple channels with the same sample number then these
3086 * will be handled simultaneously with a single interation of
3089 bld.emit(BRW_OPCODE_DO);
3091 /* Get the next live sample number into sample_id_reg */
3092 const fs_reg sample_id = bld.emit_uniformize(sample_src);
3094 /* Set the flag register so that we can perform the send
3095 * message on all channels that have the same sample number
3097 bld.CMP(bld.null_reg_ud(),
3098 sample_src, sample_id,
3099 BRW_CONDITIONAL_EQ);
3100 const fs_reg msg_data = vgrf(glsl_type::uint_type);
3101 bld.exec_all().group(1, 0)
3102 .SHL(msg_data, sample_id, brw_imm_ud(4u));
3104 emit_pixel_interpolater_send(bld,
3105 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3110 set_predicate(BRW_PREDICATE_NORMAL, inst);
3112 /* Continue the loop if there are any live channels left */
3113 set_predicate_inv(BRW_PREDICATE_NORMAL,
3115 bld.emit(BRW_OPCODE_WHILE));
3122 case nir_intrinsic_interp_var_at_offset: {
3123 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3126 unsigned off_x = MIN2((int)(const_offset->f32[0] * 16), 7) & 0xf;
3127 unsigned off_y = MIN2((int)(const_offset->f32[1] * 16), 7) & 0xf;
3129 emit_pixel_interpolater_send(bld,
3130 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
3133 brw_imm_ud(off_x | (off_y << 4)),
3136 fs_reg src = vgrf(glsl_type::ivec2_type);
3137 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
3138 BRW_REGISTER_TYPE_F);
3139 for (int i = 0; i < 2; i++) {
3140 fs_reg temp = vgrf(glsl_type::float_type);
3141 bld.MUL(temp, offset(offset_src, bld, i), brw_imm_f(16.0f));
3142 fs_reg itemp = vgrf(glsl_type::int_type);
3144 bld.MOV(itemp, temp);
3146 /* Clamp the upper end of the range to +7/16.
3147 * ARB_gpu_shader5 requires that we support a maximum offset
3148 * of +0.5, which isn't representable in a S0.4 value -- if
3149 * we didn't clamp it, we'd end up with -8/16, which is the
3150 * opposite of what the shader author wanted.
3152 * This is legal due to ARB_gpu_shader5's quantization
3155 * "Not all values of <offset> may be supported; x and y
3156 * offsets may be rounded to fixed-point values with the
3157 * number of fraction bits given by the
3158 * implementation-dependent constant
3159 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
3161 set_condmod(BRW_CONDITIONAL_L,
3162 bld.SEL(offset(src, bld, i), itemp, brw_imm_d(7)));
3165 const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
3166 emit_pixel_interpolater_send(bld,
3177 unreachable("Invalid intrinsic");
3180 for (unsigned j = 0; j < instr->num_components; j++) {
3181 fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
3182 src.type = dest.type;
3184 bld.emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
3185 dest = offset(dest, bld, 1);
3190 nir_emit_intrinsic(bld, instr);
3196 fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
3197 nir_intrinsic_instr *instr)
3199 assert(stage == MESA_SHADER_COMPUTE);
3200 struct brw_cs_prog_data *cs_prog_data =
3201 (struct brw_cs_prog_data *) prog_data;
3204 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3205 dest = get_nir_dest(instr->dest);
3207 switch (instr->intrinsic) {
3208 case nir_intrinsic_barrier:
3210 cs_prog_data->uses_barrier = true;
3213 case nir_intrinsic_load_local_invocation_id:
3214 case nir_intrinsic_load_work_group_id: {
3215 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
3216 fs_reg val = nir_system_values[sv];
3217 assert(val.file != BAD_FILE);
3218 dest.type = val.type;
3219 for (unsigned i = 0; i < 3; i++)
3220 bld.MOV(offset(dest, bld, i), offset(val, bld, i));
3224 case nir_intrinsic_load_num_work_groups: {
3225 const unsigned surface =
3226 cs_prog_data->binding_table.work_groups_start;
3228 cs_prog_data->uses_num_work_groups = true;
3230 fs_reg surf_index = brw_imm_ud(surface);
3231 brw_mark_surface_used(prog_data, surface);
3233 /* Read the 3 GLuint components of gl_NumWorkGroups */
3234 for (unsigned i = 0; i < 3; i++) {
3235 fs_reg read_result =
3236 emit_untyped_read(bld, surf_index,
3238 1 /* dims */, 1 /* size */,
3239 BRW_PREDICATE_NONE);
3240 read_result.type = dest.type;
3241 bld.MOV(dest, read_result);
3242 dest = offset(dest, bld, 1);
3247 case nir_intrinsic_shared_atomic_add:
3248 nir_emit_shared_atomic(bld, BRW_AOP_ADD, instr);
3250 case nir_intrinsic_shared_atomic_imin:
3251 nir_emit_shared_atomic(bld, BRW_AOP_IMIN, instr);
3253 case nir_intrinsic_shared_atomic_umin:
3254 nir_emit_shared_atomic(bld, BRW_AOP_UMIN, instr);
3256 case nir_intrinsic_shared_atomic_imax:
3257 nir_emit_shared_atomic(bld, BRW_AOP_IMAX, instr);
3259 case nir_intrinsic_shared_atomic_umax:
3260 nir_emit_shared_atomic(bld, BRW_AOP_UMAX, instr);
3262 case nir_intrinsic_shared_atomic_and:
3263 nir_emit_shared_atomic(bld, BRW_AOP_AND, instr);
3265 case nir_intrinsic_shared_atomic_or:
3266 nir_emit_shared_atomic(bld, BRW_AOP_OR, instr);
3268 case nir_intrinsic_shared_atomic_xor:
3269 nir_emit_shared_atomic(bld, BRW_AOP_XOR, instr);
3271 case nir_intrinsic_shared_atomic_exchange:
3272 nir_emit_shared_atomic(bld, BRW_AOP_MOV, instr);
3274 case nir_intrinsic_shared_atomic_comp_swap:
3275 nir_emit_shared_atomic(bld, BRW_AOP_CMPWR, instr);
3278 case nir_intrinsic_load_shared: {
3279 assert(devinfo->gen >= 7);
3281 fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
3283 /* Get the offset to read from */
3285 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3287 offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
3289 offset_reg = vgrf(glsl_type::uint_type);
3291 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
3292 brw_imm_ud(instr->const_index[0]));
3295 /* Read the vector */
3296 do_untyped_vector_read(bld, dest, surf_index, offset_reg,
3297 instr->num_components);
3301 case nir_intrinsic_store_shared: {
3302 assert(devinfo->gen >= 7);
3305 fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
3308 fs_reg val_reg = get_nir_src(instr->src[0]);
3311 unsigned writemask = instr->const_index[1];
3313 /* get_nir_src() retypes to integer. Be wary of 64-bit types though
3314 * since the untyped writes below operate in units of 32-bits, which
3315 * means that we need to write twice as many components each time.
3316 * Also, we have to suffle 64-bit data to be in the appropriate layout
3317 * expected by our 32-bit write messages.
3319 unsigned type_size = 4;
3320 unsigned bit_size = instr->src[0].is_ssa ?
3321 instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size;
3322 if (bit_size == 64) {
3325 fs_reg(VGRF, alloc.allocate(alloc.sizes[val_reg.nr]), val_reg.type);
3326 shuffle_64bit_data_for_32bit_write(
3328 retype(tmp, BRW_REGISTER_TYPE_F),
3329 retype(val_reg, BRW_REGISTER_TYPE_DF),
3330 instr->num_components);
3334 unsigned type_slots = type_size / 4;
3336 /* Combine groups of consecutive enabled channels in one write
3337 * message. We use ffs to find the first enabled channel and then ffs on
3338 * the bit-inverse, down-shifted writemask to determine the length of
3339 * the block of enabled bits.
3342 unsigned first_component = ffs(writemask) - 1;
3343 unsigned length = ffs(~(writemask >> first_component)) - 1;
3345 /* We can't write more than 2 64-bit components at once. Limit the
3346 * length of the write to what we can do and let the next iteration
3350 length = MIN2(2, length);
3353 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3355 offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0] +
3356 type_size * first_component);
3358 offset_reg = vgrf(glsl_type::uint_type);
3360 retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD),
3361 brw_imm_ud(instr->const_index[0] + type_size * first_component));
3364 emit_untyped_write(bld, surf_index, offset_reg,
3365 offset(val_reg, bld, first_component * type_slots),
3366 1 /* dims */, length * type_slots,
3367 BRW_PREDICATE_NONE);
3369 /* Clear the bits in the writemask that we just wrote, then try
3370 * again to see if more channels are left.
3372 writemask &= (15 << (first_component + length));
3379 nir_emit_intrinsic(bld, instr);
3385 fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
3388 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3389 dest = get_nir_dest(instr->dest);
3391 switch (instr->intrinsic) {
3392 case nir_intrinsic_atomic_counter_inc:
3393 case nir_intrinsic_atomic_counter_dec:
3394 case nir_intrinsic_atomic_counter_read: {
3395 if (stage == MESA_SHADER_FRAGMENT &&
3396 instr->intrinsic != nir_intrinsic_atomic_counter_read)
3397 ((struct brw_wm_prog_data *)prog_data)->has_side_effects = true;
3399 /* Get the arguments of the atomic intrinsic. */
3400 const fs_reg offset = get_nir_src(instr->src[0]);
3401 const unsigned surface = (stage_prog_data->binding_table.abo_start +
3402 instr->const_index[0]);
3405 /* Emit a surface read or atomic op. */
3406 switch (instr->intrinsic) {
3407 case nir_intrinsic_atomic_counter_read:
3408 tmp = emit_untyped_read(bld, brw_imm_ud(surface), offset, 1, 1);
3411 case nir_intrinsic_atomic_counter_inc:
3412 tmp = emit_untyped_atomic(bld, brw_imm_ud(surface), offset, fs_reg(),
3413 fs_reg(), 1, 1, BRW_AOP_INC);
3416 case nir_intrinsic_atomic_counter_dec:
3417 tmp = emit_untyped_atomic(bld, brw_imm_ud(surface), offset, fs_reg(),
3418 fs_reg(), 1, 1, BRW_AOP_PREDEC);
3422 unreachable("Unreachable");
3425 /* Assign the result. */
3426 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), tmp);
3428 /* Mark the surface as used. */
3429 brw_mark_surface_used(stage_prog_data, surface);
3433 case nir_intrinsic_image_load:
3434 case nir_intrinsic_image_store:
3435 case nir_intrinsic_image_atomic_add:
3436 case nir_intrinsic_image_atomic_min:
3437 case nir_intrinsic_image_atomic_max:
3438 case nir_intrinsic_image_atomic_and:
3439 case nir_intrinsic_image_atomic_or:
3440 case nir_intrinsic_image_atomic_xor:
3441 case nir_intrinsic_image_atomic_exchange:
3442 case nir_intrinsic_image_atomic_comp_swap: {
3443 using namespace image_access;
3445 if (stage == MESA_SHADER_FRAGMENT &&
3446 instr->intrinsic != nir_intrinsic_image_load)
3447 ((struct brw_wm_prog_data *)prog_data)->has_side_effects = true;
3449 /* Get the referenced image variable and type. */
3450 const nir_variable *var = instr->variables[0]->var;
3451 const glsl_type *type = var->type->without_array();
3452 const brw_reg_type base_type = get_image_base_type(type);
3454 /* Get some metadata from the image intrinsic. */
3455 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
3456 const unsigned arr_dims = type->sampler_array ? 1 : 0;
3457 const unsigned surf_dims = type->coordinate_components() - arr_dims;
3458 const unsigned format = var->data.image.format;
3460 /* Get the arguments of the image intrinsic. */
3461 const fs_reg image = get_nir_image_deref(instr->variables[0]);
3462 const fs_reg addr = retype(get_nir_src(instr->src[0]),
3463 BRW_REGISTER_TYPE_UD);
3464 const fs_reg src0 = (info->num_srcs >= 3 ?
3465 retype(get_nir_src(instr->src[2]), base_type) :
3467 const fs_reg src1 = (info->num_srcs >= 4 ?
3468 retype(get_nir_src(instr->src[3]), base_type) :
3472 /* Emit an image load, store or atomic op. */
3473 if (instr->intrinsic == nir_intrinsic_image_load)
3474 tmp = emit_image_load(bld, image, addr, surf_dims, arr_dims, format);
3476 else if (instr->intrinsic == nir_intrinsic_image_store)
3477 emit_image_store(bld, image, addr, src0, surf_dims, arr_dims,
3478 var->data.image.write_only ? GL_NONE : format);
3481 tmp = emit_image_atomic(bld, image, addr, src0, src1,
3482 surf_dims, arr_dims, info->dest_components,
3483 get_image_atomic_op(instr->intrinsic, type));
3485 /* Assign the result. */
3486 for (unsigned c = 0; c < info->dest_components; ++c)
3487 bld.MOV(offset(retype(dest, base_type), bld, c),
3488 offset(tmp, bld, c));
3492 case nir_intrinsic_memory_barrier_atomic_counter:
3493 case nir_intrinsic_memory_barrier_buffer:
3494 case nir_intrinsic_memory_barrier_image:
3495 case nir_intrinsic_memory_barrier: {
3496 const fs_builder ubld = bld.group(8, 0);
3497 const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
3498 ubld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
3503 case nir_intrinsic_group_memory_barrier:
3504 case nir_intrinsic_memory_barrier_shared:
3505 /* We treat these workgroup-level barriers as no-ops. This should be
3506 * safe at present and as long as:
3508 * - Memory access instructions are not subsequently reordered by the
3509 * compiler back-end.
3511 * - All threads from a given compute shader workgroup fit within a
3512 * single subslice and therefore talk to the same HDC shared unit
3513 * what supposedly guarantees ordering and coherency between threads
3514 * from the same workgroup. This may change in the future when we
3515 * start splitting workgroups across multiple subslices.
3517 * - The context is not in fault-and-stream mode, which could cause
3518 * memory transactions (including to SLM) prior to the barrier to be
3519 * replayed after the barrier if a pagefault occurs. This shouldn't
3520 * be a problem up to and including SKL because fault-and-stream is
3521 * not usable due to hardware issues, but that's likely to change in
3526 case nir_intrinsic_shader_clock: {
3527 /* We cannot do anything if there is an event, so ignore it for now */
3528 fs_reg shader_clock = get_timestamp(bld);
3529 const fs_reg srcs[] = { shader_clock.set_smear(0), shader_clock.set_smear(1) };
3531 bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
3535 case nir_intrinsic_image_size: {
3536 /* Get the referenced image variable and type. */
3537 const nir_variable *var = instr->variables[0]->var;
3538 const glsl_type *type = var->type->without_array();
3540 /* Get the size of the image. */
3541 const fs_reg image = get_nir_image_deref(instr->variables[0]);
3542 const fs_reg size = offset(image, bld, BRW_IMAGE_PARAM_SIZE_OFFSET);
3544 /* For 1DArray image types, the array index is stored in the Z component.
3545 * Fix this by swizzling the Z component to the Y component.
3547 const bool is_1d_array_image =
3548 type->sampler_dimensionality == GLSL_SAMPLER_DIM_1D &&
3549 type->sampler_array;
3551 /* For CubeArray images, we should count the number of cubes instead
3552 * of the number of faces. Fix it by dividing the (Z component) by 6.
3554 const bool is_cube_array_image =
3555 type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
3556 type->sampler_array;
3558 /* Copy all the components. */
3559 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
3560 for (unsigned c = 0; c < info->dest_components; ++c) {
3561 if ((int)c >= type->coordinate_components()) {
3562 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3564 } else if (c == 1 && is_1d_array_image) {
3565 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3566 offset(size, bld, 2));
3567 } else if (c == 2 && is_cube_array_image) {
3568 bld.emit(SHADER_OPCODE_INT_QUOTIENT,
3569 offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3570 offset(size, bld, c), brw_imm_d(6));
3572 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
3573 offset(size, bld, c));
3580 case nir_intrinsic_image_samples:
3581 /* The driver does not support multi-sampled images. */
3582 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1));
3585 case nir_intrinsic_load_uniform: {
3586 /* Offsets are in bytes but they should always be multiples of 4 */
3587 assert(instr->const_index[0] % 4 == 0);
3589 fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type);
3591 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3593 /* Offsets are in bytes but they should always be multiples of 4 */
3594 assert(const_offset->u32[0] % 4 == 0);
3595 src.reg_offset = const_offset->u32[0] / 4;
3597 for (unsigned j = 0; j < instr->num_components; j++) {
3598 bld.MOV(offset(dest, bld, j), offset(src, bld, j));
3601 fs_reg indirect = retype(get_nir_src(instr->src[0]),
3602 BRW_REGISTER_TYPE_UD);
3604 /* We need to pass a size to the MOV_INDIRECT but we don't want it to
3605 * go past the end of the uniform. In order to keep the n'th
3606 * component from running past, we subtract off the size of all but
3607 * one component of the vector.
3609 assert(instr->const_index[1] >=
3610 instr->num_components * (int) type_sz(dest.type));
3611 unsigned read_size = instr->const_index[1] -
3612 (instr->num_components - 1) * type_sz(dest.type);
3614 fs_reg indirect_chv_high_32bit;
3615 bool is_chv_bxt_64bit =
3616 (devinfo->is_cherryview || devinfo->is_broxton) &&
3617 type_sz(dest.type) == 8;
3618 if (is_chv_bxt_64bit) {
3619 indirect_chv_high_32bit = vgrf(glsl_type::uint_type);
3620 /* Calculate indirect address to read high 32 bits */
3621 bld.ADD(indirect_chv_high_32bit, indirect, brw_imm_ud(4));
3624 for (unsigned j = 0; j < instr->num_components; j++) {
3625 if (!is_chv_bxt_64bit) {
3626 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
3627 offset(dest, bld, j), offset(src, bld, j),
3628 indirect, brw_imm_ud(read_size));
3630 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
3631 subscript(offset(dest, bld, j), BRW_REGISTER_TYPE_UD, 0),
3632 offset(src, bld, j),
3633 indirect, brw_imm_ud(read_size));
3635 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
3636 subscript(offset(dest, bld, j), BRW_REGISTER_TYPE_UD, 1),
3637 offset(src, bld, j),
3638 indirect_chv_high_32bit, brw_imm_ud(read_size));
3645 case nir_intrinsic_load_ubo: {
3646 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
3650 const unsigned index = stage_prog_data->binding_table.ubo_start +
3651 const_index->u32[0];
3652 surf_index = brw_imm_ud(index);
3653 brw_mark_surface_used(prog_data, index);
3655 /* The block index is not a constant. Evaluate the index expression
3656 * per-channel and add the base UBO index; we have to select a value
3657 * from any live channel.
3659 surf_index = vgrf(glsl_type::uint_type);
3660 bld.ADD(surf_index, get_nir_src(instr->src[0]),
3661 brw_imm_ud(stage_prog_data->binding_table.ubo_start));
3662 surf_index = bld.emit_uniformize(surf_index);
3664 /* Assume this may touch any UBO. It would be nice to provide
3665 * a tighter bound, but the array information is already lowered away.
3667 brw_mark_surface_used(prog_data,
3668 stage_prog_data->binding_table.ubo_start +
3669 nir->info.num_ubos - 1);
3672 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3673 if (const_offset == NULL) {
3674 fs_reg base_offset = retype(get_nir_src(instr->src[1]),
3675 BRW_REGISTER_TYPE_UD);
3677 for (int i = 0; i < instr->num_components; i++)
3678 VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
3679 base_offset, i * type_sz(dest.type));
3681 /* Even if we are loading doubles, a pull constant load will load
3682 * a 32-bit vec4, so should only reserve vgrf space for that. If we
3683 * need to load a full dvec4 we will have to emit 2 loads. This is
3684 * similar to demote_pull_constants(), except that in that case we
3685 * see individual accesses to each component of the vector and then
3686 * we let CSE deal with duplicate loads. Here we see a vector access
3687 * and we have to split it if necessary.
3689 const unsigned type_size = type_sz(dest.type);
3690 const fs_reg packed_consts = bld.vgrf(BRW_REGISTER_TYPE_F);
3691 for (unsigned c = 0; c < instr->num_components;) {
3692 const unsigned base = const_offset->u32[0] + c * type_size;
3694 /* Number of usable components in the next 16B-aligned load */
3695 const unsigned count = MIN2(instr->num_components - c,
3696 (16 - base % 16) / type_size);
3699 .emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
3700 packed_consts, surf_index, brw_imm_ud(base & ~15));
3702 const fs_reg consts =
3703 retype(byte_offset(packed_consts, base & 15), dest.type);
3705 for (unsigned d = 0; d < count; d++)
3706 bld.MOV(offset(dest, bld, c + d), component(consts, d));
3714 case nir_intrinsic_load_ssbo: {
3715 assert(devinfo->gen >= 7);
3717 nir_const_value *const_uniform_block =
3718 nir_src_as_const_value(instr->src[0]);
3721 if (const_uniform_block) {
3722 unsigned index = stage_prog_data->binding_table.ssbo_start +
3723 const_uniform_block->u32[0];
3724 surf_index = brw_imm_ud(index);
3725 brw_mark_surface_used(prog_data, index);
3727 surf_index = vgrf(glsl_type::uint_type);
3728 bld.ADD(surf_index, get_nir_src(instr->src[0]),
3729 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
3731 /* Assume this may touch any UBO. It would be nice to provide
3732 * a tighter bound, but the array information is already lowered away.
3734 brw_mark_surface_used(prog_data,
3735 stage_prog_data->binding_table.ssbo_start +
3736 nir->info.num_ssbos - 1);
3740 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3742 offset_reg = brw_imm_ud(const_offset->u32[0]);
3744 offset_reg = get_nir_src(instr->src[1]);
3747 /* Read the vector */
3748 do_untyped_vector_read(bld, dest, surf_index, offset_reg,
3749 instr->num_components);
3754 case nir_intrinsic_load_input: {
3756 unsigned num_components = instr->num_components;
3757 enum brw_reg_type type = dest.type;
3759 if (stage == MESA_SHADER_VERTEX) {
3760 src = fs_reg(ATTR, instr->const_index[0], dest.type);
3762 assert(type_sz(type) >= 4);
3763 if (type == BRW_REGISTER_TYPE_DF) {
3764 /* const_index is in 32-bit type size units that could not be aligned
3765 * with DF. We need to read the double vector as if it was a float
3766 * vector of twice the number of components to fetch the right data.
3768 dest = retype(dest, BRW_REGISTER_TYPE_F);
3769 num_components *= 2;
3771 src = offset(retype(nir_inputs, dest.type), bld,
3772 instr->const_index[0]);
3775 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3776 assert(const_offset && "Indirect input loads not allowed");
3777 src = offset(src, bld, const_offset->u32[0]);
3779 for (unsigned j = 0; j < num_components; j++) {
3780 bld.MOV(offset(dest, bld, j), offset(src, bld, j));
3783 if (type == BRW_REGISTER_TYPE_DF) {
3784 /* Once the double vector is read, set again its original register
3785 * type to continue with normal execution.
3787 src = retype(src, type);
3788 dest = retype(dest, type);
3791 if (type_sz(src.type) == 8) {
3792 shuffle_32bit_load_result_to_64bit_data(bld,
3794 retype(dest, BRW_REGISTER_TYPE_F),
3795 instr->num_components);
3801 case nir_intrinsic_store_ssbo: {
3802 assert(devinfo->gen >= 7);
3804 if (stage == MESA_SHADER_FRAGMENT)
3805 ((struct brw_wm_prog_data *)prog_data)->has_side_effects = true;
3809 nir_const_value *const_uniform_block =
3810 nir_src_as_const_value(instr->src[1]);
3811 if (const_uniform_block) {
3812 unsigned index = stage_prog_data->binding_table.ssbo_start +
3813 const_uniform_block->u32[0];
3814 surf_index = brw_imm_ud(index);
3815 brw_mark_surface_used(prog_data, index);
3817 surf_index = vgrf(glsl_type::uint_type);
3818 bld.ADD(surf_index, get_nir_src(instr->src[1]),
3819 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
3821 brw_mark_surface_used(prog_data,
3822 stage_prog_data->binding_table.ssbo_start +
3823 nir->info.num_ssbos - 1);
3827 fs_reg val_reg = get_nir_src(instr->src[0]);
3830 unsigned writemask = instr->const_index[0];
3832 /* get_nir_src() retypes to integer. Be wary of 64-bit types though
3833 * since the untyped writes below operate in units of 32-bits, which
3834 * means that we need to write twice as many components each time.
3835 * Also, we have to suffle 64-bit data to be in the appropriate layout
3836 * expected by our 32-bit write messages.
3838 unsigned type_size = 4;
3839 unsigned bit_size = instr->src[0].is_ssa ?
3840 instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size;
3841 if (bit_size == 64) {
3844 fs_reg(VGRF, alloc.allocate(alloc.sizes[val_reg.nr]), val_reg.type);
3845 shuffle_64bit_data_for_32bit_write(bld,
3846 retype(tmp, BRW_REGISTER_TYPE_F),
3847 retype(val_reg, BRW_REGISTER_TYPE_DF),
3848 instr->num_components);
3852 unsigned type_slots = type_size / 4;
3854 /* Combine groups of consecutive enabled channels in one write
3855 * message. We use ffs to find the first enabled channel and then ffs on
3856 * the bit-inverse, down-shifted writemask to determine the length of
3857 * the block of enabled bits.
3860 unsigned first_component = ffs(writemask) - 1;
3861 unsigned length = ffs(~(writemask >> first_component)) - 1;
3863 /* We can't write more than 2 64-bit components at once. Limit the
3864 * length of the write to what we can do and let the next iteration
3868 length = MIN2(2, length);
3871 nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
3873 offset_reg = brw_imm_ud(const_offset->u32[0] +
3874 type_size * first_component);
3876 offset_reg = vgrf(glsl_type::uint_type);
3878 retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
3879 brw_imm_ud(type_size * first_component));
3883 emit_untyped_write(bld, surf_index, offset_reg,
3884 offset(val_reg, bld, first_component * type_slots),
3885 1 /* dims */, length * type_slots,
3886 BRW_PREDICATE_NONE);
3888 /* Clear the bits in the writemask that we just wrote, then try
3889 * again to see if more channels are left.
3891 writemask &= (15 << (first_component + length));
3896 case nir_intrinsic_store_output: {
3897 fs_reg src = get_nir_src(instr->src[0]);
3898 fs_reg new_dest = offset(retype(nir_outputs, src.type), bld,
3899 instr->const_index[0]);
3901 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3902 assert(const_offset && "Indirect output stores not allowed");
3903 new_dest = offset(new_dest, bld, const_offset->u32[0]);
3905 unsigned num_components = instr->num_components;
3906 unsigned bit_size = instr->src[0].is_ssa ?
3907 instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size;
3908 if (bit_size == 64) {
3910 fs_reg(VGRF, alloc.allocate(2 * num_components),
3911 BRW_REGISTER_TYPE_F);
3912 shuffle_64bit_data_for_32bit_write(
3913 bld, tmp, retype(src, BRW_REGISTER_TYPE_DF), num_components);
3914 src = retype(tmp, src.type);
3915 num_components *= 2;
3918 for (unsigned j = 0; j < num_components; j++) {
3919 bld.MOV(offset(new_dest, bld, j), offset(src, bld, j));
3924 case nir_intrinsic_ssbo_atomic_add:
3925 nir_emit_ssbo_atomic(bld, BRW_AOP_ADD, instr);
3927 case nir_intrinsic_ssbo_atomic_imin:
3928 nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
3930 case nir_intrinsic_ssbo_atomic_umin:
3931 nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
3933 case nir_intrinsic_ssbo_atomic_imax:
3934 nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
3936 case nir_intrinsic_ssbo_atomic_umax:
3937 nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
3939 case nir_intrinsic_ssbo_atomic_and:
3940 nir_emit_ssbo_atomic(bld, BRW_AOP_AND, instr);
3942 case nir_intrinsic_ssbo_atomic_or:
3943 nir_emit_ssbo_atomic(bld, BRW_AOP_OR, instr);
3945 case nir_intrinsic_ssbo_atomic_xor:
3946 nir_emit_ssbo_atomic(bld, BRW_AOP_XOR, instr);
3948 case nir_intrinsic_ssbo_atomic_exchange:
3949 nir_emit_ssbo_atomic(bld, BRW_AOP_MOV, instr);
3951 case nir_intrinsic_ssbo_atomic_comp_swap:
3952 nir_emit_ssbo_atomic(bld, BRW_AOP_CMPWR, instr);
3955 case nir_intrinsic_get_buffer_size: {
3956 nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
3957 unsigned ssbo_index = const_uniform_block ? const_uniform_block->u32[0] : 0;
3959 /* A resinfo's sampler message is used to get the buffer size. The
3960 * SIMD8's writeback message consists of four registers and SIMD16's
3961 * writeback message consists of 8 destination registers (two per each
3962 * component). Because we are only interested on the first channel of
3963 * the first returned component, where resinfo returns the buffer size
3964 * for SURFTYPE_BUFFER, we can just use the SIMD8 variant regardless of
3965 * the dispatch width.
3967 const fs_builder ubld = bld.exec_all().group(8, 0);
3968 fs_reg src_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD);
3969 fs_reg ret_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4);
3972 ubld.MOV(src_payload, brw_imm_d(0));
3974 const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
3975 fs_inst *inst = ubld.emit(FS_OPCODE_GET_BUFFER_SIZE, ret_payload,
3976 src_payload, brw_imm_ud(index));
3977 inst->header_size = 0;
3979 inst->regs_written = 4;
3981 bld.MOV(retype(dest, ret_payload.type), component(ret_payload, 0));
3982 brw_mark_surface_used(prog_data, index);
3986 case nir_intrinsic_load_channel_num: {
3987 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UW);
3988 dest = retype(dest, BRW_REGISTER_TYPE_UD);
3989 const fs_builder allbld8 = bld.group(8, 0).exec_all();
3990 allbld8.MOV(tmp, brw_imm_v(0x76543210));
3991 if (dispatch_width > 8)
3992 allbld8.ADD(byte_offset(tmp, 16), tmp, brw_imm_uw(8u));
3993 if (dispatch_width > 16) {
3994 const fs_builder allbld16 = bld.group(16, 0).exec_all();
3995 allbld16.ADD(byte_offset(tmp, 32), tmp, brw_imm_uw(16u));
4002 unreachable("unknown intrinsic");
4007 fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
4008 int op, nir_intrinsic_instr *instr)
4010 if (stage == MESA_SHADER_FRAGMENT)
4011 ((struct brw_wm_prog_data *)prog_data)->has_side_effects = true;
4014 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4015 dest = get_nir_dest(instr->dest);
4018 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
4019 if (const_surface) {
4020 unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
4021 const_surface->u32[0];
4022 surface = brw_imm_ud(surf_index);
4023 brw_mark_surface_used(prog_data, surf_index);
4025 surface = vgrf(glsl_type::uint_type);
4026 bld.ADD(surface, get_nir_src(instr->src[0]),
4027 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
4029 /* Assume this may touch any SSBO. This is the same we do for other
4030 * UBO/SSBO accesses with non-constant surface.
4032 brw_mark_surface_used(prog_data,
4033 stage_prog_data->binding_table.ssbo_start +
4034 nir->info.num_ssbos - 1);
4037 fs_reg offset = get_nir_src(instr->src[1]);
4038 fs_reg data1 = get_nir_src(instr->src[2]);
4040 if (op == BRW_AOP_CMPWR)
4041 data2 = get_nir_src(instr->src[3]);
4043 /* Emit the actual atomic operation operation */
4045 fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
4047 1 /* dims */, 1 /* rsize */,
4049 BRW_PREDICATE_NONE);
4050 dest.type = atomic_result.type;
4051 bld.MOV(dest, atomic_result);
4055 fs_visitor::nir_emit_shared_atomic(const fs_builder &bld,
4056 int op, nir_intrinsic_instr *instr)
4059 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4060 dest = get_nir_dest(instr->dest);
4062 fs_reg surface = brw_imm_ud(GEN7_BTI_SLM);
4064 fs_reg data1 = get_nir_src(instr->src[1]);
4066 if (op == BRW_AOP_CMPWR)
4067 data2 = get_nir_src(instr->src[2]);
4069 /* Get the offset */
4070 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
4072 offset = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
4074 offset = vgrf(glsl_type::uint_type);
4076 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
4077 brw_imm_ud(instr->const_index[0]));
4080 /* Emit the actual atomic operation operation */
4082 fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
4084 1 /* dims */, 1 /* rsize */,
4086 BRW_PREDICATE_NONE);
4087 dest.type = atomic_result.type;
4088 bld.MOV(dest, atomic_result);
4092 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
4094 unsigned texture = instr->texture_index;
4095 unsigned sampler = instr->sampler_index;
4097 fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
4099 srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture);
4100 srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(sampler);
4102 int lod_components = 0;
4104 /* The hardware requires a LOD for buffer textures */
4105 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
4106 srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_d(0);
4108 for (unsigned i = 0; i < instr->num_srcs; i++) {
4109 fs_reg src = get_nir_src(instr->src[i].src);
4110 switch (instr->src[i].src_type) {
4111 case nir_tex_src_bias:
4112 srcs[TEX_LOGICAL_SRC_LOD] =
4113 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
4115 case nir_tex_src_comparitor:
4116 srcs[TEX_LOGICAL_SRC_SHADOW_C] = retype(src, BRW_REGISTER_TYPE_F);
4118 case nir_tex_src_coord:
4119 switch (instr->op) {
4121 case nir_texop_txf_ms:
4122 case nir_texop_txf_ms_mcs:
4123 case nir_texop_samples_identical:
4124 srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_D);
4127 srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_F);
4131 case nir_tex_src_ddx:
4132 srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_F);
4133 lod_components = nir_tex_instr_src_size(instr, i);
4135 case nir_tex_src_ddy:
4136 srcs[TEX_LOGICAL_SRC_LOD2] = retype(src, BRW_REGISTER_TYPE_F);
4138 case nir_tex_src_lod:
4139 switch (instr->op) {
4141 srcs[TEX_LOGICAL_SRC_LOD] =
4142 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_UD);
4145 srcs[TEX_LOGICAL_SRC_LOD] =
4146 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_D);
4149 srcs[TEX_LOGICAL_SRC_LOD] =
4150 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
4154 case nir_tex_src_ms_index:
4155 srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = retype(src, BRW_REGISTER_TYPE_UD);
4158 case nir_tex_src_offset: {
4159 nir_const_value *const_offset =
4160 nir_src_as_const_value(instr->src[i].src);
4162 unsigned header_bits = brw_texture_offset(const_offset->i32, 3);
4163 if (header_bits != 0)
4164 srcs[TEX_LOGICAL_SRC_OFFSET_VALUE] = brw_imm_ud(header_bits);
4166 srcs[TEX_LOGICAL_SRC_OFFSET_VALUE] =
4167 retype(src, BRW_REGISTER_TYPE_D);
4172 case nir_tex_src_projector:
4173 unreachable("should be lowered");
4175 case nir_tex_src_texture_offset: {
4176 /* Figure out the highest possible texture index and mark it as used */
4177 uint32_t max_used = texture + instr->texture_array_size - 1;
4178 if (instr->op == nir_texop_tg4 && devinfo->gen < 8) {
4179 max_used += stage_prog_data->binding_table.gather_texture_start;
4181 max_used += stage_prog_data->binding_table.texture_start;
4183 brw_mark_surface_used(prog_data, max_used);
4185 /* Emit code to evaluate the actual indexing expression */
4186 fs_reg tmp = vgrf(glsl_type::uint_type);
4187 bld.ADD(tmp, src, brw_imm_ud(texture));
4188 srcs[TEX_LOGICAL_SRC_SURFACE] = bld.emit_uniformize(tmp);
4192 case nir_tex_src_sampler_offset: {
4193 /* Emit code to evaluate the actual indexing expression */
4194 fs_reg tmp = vgrf(glsl_type::uint_type);
4195 bld.ADD(tmp, src, brw_imm_ud(sampler));
4196 srcs[TEX_LOGICAL_SRC_SAMPLER] = bld.emit_uniformize(tmp);
4200 case nir_tex_src_ms_mcs:
4201 assert(instr->op == nir_texop_txf_ms);
4202 srcs[TEX_LOGICAL_SRC_MCS] = retype(src, BRW_REGISTER_TYPE_D);
4205 case nir_tex_src_plane: {
4206 nir_const_value *const_plane =
4207 nir_src_as_const_value(instr->src[i].src);
4208 const uint32_t plane = const_plane->u32[0];
4209 const uint32_t texture_index =
4210 instr->texture_index +
4211 stage_prog_data->binding_table.plane_start[plane] -
4212 stage_prog_data->binding_table.texture_start;
4214 srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture_index);
4219 unreachable("unknown texture source");
4223 if (srcs[TEX_LOGICAL_SRC_MCS].file == BAD_FILE &&
4224 (instr->op == nir_texop_txf_ms ||
4225 instr->op == nir_texop_samples_identical)) {
4226 if (devinfo->gen >= 7 &&
4227 key_tex->compressed_multisample_layout_mask & (1 << texture)) {
4228 srcs[TEX_LOGICAL_SRC_MCS] =
4229 emit_mcs_fetch(srcs[TEX_LOGICAL_SRC_COORDINATE],
4230 instr->coord_components,
4231 srcs[TEX_LOGICAL_SRC_SURFACE]);
4233 srcs[TEX_LOGICAL_SRC_MCS] = brw_imm_ud(0u);
4237 srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(instr->coord_components);
4238 srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(lod_components);
4240 if (instr->op == nir_texop_query_levels) {
4241 /* textureQueryLevels() is implemented in terms of TXS so we need to
4242 * pass a valid LOD argument.
4244 assert(srcs[TEX_LOGICAL_SRC_LOD].file == BAD_FILE);
4245 srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_ud(0u);
4249 switch (instr->op) {
4251 opcode = SHADER_OPCODE_TEX_LOGICAL;
4254 opcode = FS_OPCODE_TXB_LOGICAL;
4257 opcode = SHADER_OPCODE_TXL_LOGICAL;
4260 opcode = SHADER_OPCODE_TXD_LOGICAL;
4263 opcode = SHADER_OPCODE_TXF_LOGICAL;
4265 case nir_texop_txf_ms:
4266 if ((key_tex->msaa_16 & (1 << sampler)))
4267 opcode = SHADER_OPCODE_TXF_CMS_W_LOGICAL;
4269 opcode = SHADER_OPCODE_TXF_CMS_LOGICAL;
4271 case nir_texop_txf_ms_mcs:
4272 opcode = SHADER_OPCODE_TXF_MCS_LOGICAL;
4274 case nir_texop_query_levels:
4276 opcode = SHADER_OPCODE_TXS_LOGICAL;
4279 opcode = SHADER_OPCODE_LOD_LOGICAL;
4282 if (srcs[TEX_LOGICAL_SRC_OFFSET_VALUE].file != BAD_FILE &&
4283 srcs[TEX_LOGICAL_SRC_OFFSET_VALUE].file != IMM)
4284 opcode = SHADER_OPCODE_TG4_OFFSET_LOGICAL;
4286 opcode = SHADER_OPCODE_TG4_LOGICAL;
4288 case nir_texop_texture_samples:
4289 opcode = SHADER_OPCODE_SAMPLEINFO_LOGICAL;
4291 case nir_texop_samples_identical: {
4292 fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
4294 /* If mcs is an immediate value, it means there is no MCS. In that case
4295 * just return false.
4297 if (srcs[TEX_LOGICAL_SRC_MCS].file == BRW_IMMEDIATE_VALUE) {
4298 bld.MOV(dst, brw_imm_ud(0u));
4299 } else if ((key_tex->msaa_16 & (1 << sampler))) {
4300 fs_reg tmp = vgrf(glsl_type::uint_type);
4301 bld.OR(tmp, srcs[TEX_LOGICAL_SRC_MCS],
4302 offset(srcs[TEX_LOGICAL_SRC_MCS], bld, 1));
4303 bld.CMP(dst, tmp, brw_imm_ud(0u), BRW_CONDITIONAL_EQ);
4305 bld.CMP(dst, srcs[TEX_LOGICAL_SRC_MCS], brw_imm_ud(0u),
4306 BRW_CONDITIONAL_EQ);
4311 unreachable("unknown texture opcode");
4314 fs_reg dst = bld.vgrf(brw_type_for_nir_type(instr->dest_type), 4);
4315 fs_inst *inst = bld.emit(opcode, dst, srcs, ARRAY_SIZE(srcs));
4317 const unsigned dest_size = nir_tex_instr_dest_size(instr);
4318 if (devinfo->gen >= 9 &&
4319 instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
4320 unsigned write_mask = instr->dest.is_ssa ?
4321 nir_ssa_def_components_read(&instr->dest.ssa):
4322 (1 << dest_size) - 1;
4323 assert(write_mask != 0); /* dead code should have been eliminated */
4324 inst->regs_written = _mesa_fls(write_mask) * dispatch_width / 8;
4326 inst->regs_written = 4 * dispatch_width / 8;
4329 if (srcs[TEX_LOGICAL_SRC_SHADOW_C].file != BAD_FILE)
4330 inst->shadow_compare = true;
4332 if (srcs[TEX_LOGICAL_SRC_OFFSET_VALUE].file == IMM)
4333 inst->offset = srcs[TEX_LOGICAL_SRC_OFFSET_VALUE].ud;
4335 if (instr->op == nir_texop_tg4) {
4336 if (instr->component == 1 &&
4337 key_tex->gather_channel_quirk_mask & (1 << texture)) {
4338 /* gather4 sampler is broken for green channel on RG32F --
4339 * we must ask for blue instead.
4341 inst->offset |= 2 << 16;
4343 inst->offset |= instr->component << 16;
4346 if (devinfo->gen == 6)
4347 emit_gen6_gather_wa(key_tex->gen6_gather_wa[texture], dst);
4351 for (unsigned i = 0; i < dest_size; i++)
4352 nir_dest[i] = offset(dst, bld, i);
4354 bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
4357 if (instr->op == nir_texop_query_levels) {
4358 /* # levels is in .w */
4359 nir_dest[0] = offset(dst, bld, 3);
4360 } else if (instr->op == nir_texop_txs && dest_size >= 3 &&
4361 (devinfo->gen < 7 || is_cube_array)) {
4362 fs_reg depth = offset(dst, bld, 2);
4363 fs_reg fixed_depth = vgrf(glsl_type::int_type);
4365 if (is_cube_array) {
4366 /* fixup #layers for cube map arrays */
4367 bld.emit(SHADER_OPCODE_INT_QUOTIENT, fixed_depth, depth, brw_imm_d(6));
4368 } else if (devinfo->gen < 7) {
4369 /* Gen4-6 return 0 instead of 1 for single layer surfaces. */
4370 bld.emit_minmax(fixed_depth, depth, brw_imm_d(1), BRW_CONDITIONAL_GE);
4373 nir_dest[2] = fixed_depth;
4376 bld.LOAD_PAYLOAD(get_nir_dest(instr->dest), nir_dest, dest_size, 0);
4380 fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
4382 switch (instr->type) {
4383 case nir_jump_break:
4384 bld.emit(BRW_OPCODE_BREAK);
4386 case nir_jump_continue:
4387 bld.emit(BRW_OPCODE_CONTINUE);
4389 case nir_jump_return:
4391 unreachable("unknown jump");
4396 * This helper takes the result of a load operation that reads 32-bit elements
4404 * and shuffles the data to get this:
4411 * Which is exactly what we want if the load is reading 64-bit components
4412 * like doubles, where x represents the low 32-bit of the x double component
4413 * and y represents the high 32-bit of the x double component (likewise with
4414 * z and w for double component y). The parameter @components represents
4415 * the number of 64-bit components present in @src. This would typically be
4416 * 2 at most, since we can only fit 2 double elements in the result of a
4419 * Notice that @dst and @src can be the same register.
4422 shuffle_32bit_load_result_to_64bit_data(const fs_builder &bld,
4425 uint32_t components)
4427 assert(type_sz(src.type) == 4);
4428 assert(type_sz(dst.type) == 8);
4430 /* A temporary that we will use to shuffle the 32-bit data of each
4431 * component in the vector into valid 64-bit data. We can't write directly
4432 * to dst because dst can be (and would usually be) the same as src
4433 * and in that case the first MOV in the loop below would overwrite the
4434 * data read in the second MOV.
4436 fs_reg tmp = bld.vgrf(dst.type);
4438 for (unsigned i = 0; i < components; i++) {
4439 const fs_reg component_i = offset(src, bld, 2 * i);
4441 bld.MOV(subscript(tmp, src.type, 0), component_i);
4442 bld.MOV(subscript(tmp, src.type, 1), offset(component_i, bld, 1));
4444 bld.MOV(offset(dst, bld, i), tmp);
4449 * This helper does the inverse operation of
4450 * SHUFFLE_32BIT_LOAD_RESULT_TO_64BIT_DATA.
4452 * We need to do this when we are going to use untyped write messsages that
4453 * operate with 32-bit components in order to arrange our 64-bit data to be
4454 * in the expected layout.
4456 * Notice that callers of this function, unlike in the case of the inverse
4457 * operation, would typically need to call this with dst and src being
4458 * different registers, since they would otherwise corrupt the original
4459 * 64-bit data they are about to write. Because of this the function checks
4460 * that the src and dst regions involved in the operation do not overlap.
4463 shuffle_64bit_data_for_32bit_write(const fs_builder &bld,
4466 uint32_t components)
4468 assert(type_sz(src.type) == 8);
4469 assert(type_sz(dst.type) == 4);
4471 assert(!src.in_range(dst, 2 * components * bld.dispatch_width() / 8));
4473 for (unsigned i = 0; i < components; i++) {
4474 const fs_reg component_i = offset(src, bld, i);
4475 bld.MOV(offset(dst, bld, 2 * i), subscript(component_i, dst.type, 0));
4476 bld.MOV(offset(dst, bld, 2 * i + 1), subscript(component_i, dst.type, 1));