2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "compiler/glsl/ir.h"
26 #include "brw_fs_surface_builder.h"
30 using namespace brw::surface_access;
33 fs_visitor::emit_nir_code()
35 /* emit the arrays used for inputs and outputs - load/store intrinsics will
36 * be converted to reads/writes of these arrays
40 nir_emit_system_values();
42 /* get the main function and emit it */
43 nir_foreach_function(function, nir) {
44 assert(strcmp(function->name, "main") == 0);
45 assert(function->impl);
46 nir_emit_impl(function->impl);
51 fs_visitor::nir_setup_outputs()
53 if (stage == MESA_SHADER_TESS_CTRL || stage == MESA_SHADER_FRAGMENT)
56 unsigned vec4s[VARYING_SLOT_TESS_MAX] = { 0, };
58 /* Calculate the size of output registers in a separate pass, before
59 * allocating them. With ARB_enhanced_layouts, multiple output variables
60 * may occupy the same slot, but have different type sizes.
62 nir_foreach_variable(var, &nir->outputs) {
63 const int loc = var->data.driver_location;
64 const unsigned var_vec4s =
65 var->data.compact ? DIV_ROUND_UP(glsl_get_length(var->type), 4)
66 : type_size_vec4(var->type);
67 vec4s[loc] = MAX2(vec4s[loc], var_vec4s);
70 for (unsigned loc = 0; loc < ARRAY_SIZE(vec4s);) {
71 if (vec4s[loc] == 0) {
76 unsigned reg_size = vec4s[loc];
78 /* Check if there are any ranges that start within this range and extend
79 * past it. If so, include them in this allocation.
81 for (unsigned i = 1; i < reg_size; i++)
82 reg_size = MAX2(vec4s[i + loc] + i, reg_size);
84 fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_F, 4 * reg_size);
85 for (unsigned i = 0; i < reg_size; i++)
86 outputs[loc + i] = offset(reg, bld, 4 * i);
93 fs_visitor::nir_setup_uniforms()
95 /* Only the first compile gets to set up uniforms. */
96 if (push_constant_loc) {
97 assert(pull_constant_loc);
101 uniforms = nir->num_uniforms / 4;
103 if (stage == MESA_SHADER_COMPUTE) {
104 /* Add a uniform for the thread local id. It must be the last uniform
107 assert(uniforms == prog_data->nr_params);
108 uint32_t *param = brw_stage_prog_data_add_params(prog_data, 1);
109 *param = BRW_PARAM_BUILTIN_SUBGROUP_ID;
110 subgroup_id = fs_reg(UNIFORM, uniforms++, BRW_REGISTER_TYPE_UD);
115 emit_system_values_block(nir_block *block, fs_visitor *v)
119 nir_foreach_instr(instr, block) {
120 if (instr->type != nir_instr_type_intrinsic)
123 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
124 switch (intrin->intrinsic) {
125 case nir_intrinsic_load_vertex_id:
126 case nir_intrinsic_load_base_vertex:
127 unreachable("should be lowered by nir_lower_system_values().");
129 case nir_intrinsic_load_vertex_id_zero_base:
130 case nir_intrinsic_load_is_indexed_draw:
131 case nir_intrinsic_load_first_vertex:
132 case nir_intrinsic_load_instance_id:
133 case nir_intrinsic_load_base_instance:
134 case nir_intrinsic_load_draw_id:
135 unreachable("should be lowered by brw_nir_lower_vs_inputs().");
137 case nir_intrinsic_load_invocation_id:
138 if (v->stage == MESA_SHADER_TESS_CTRL)
140 assert(v->stage == MESA_SHADER_GEOMETRY);
141 reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
142 if (reg->file == BAD_FILE) {
143 const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
144 fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
145 fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
146 abld.SHR(iid, g1, brw_imm_ud(27u));
151 case nir_intrinsic_load_sample_pos:
152 assert(v->stage == MESA_SHADER_FRAGMENT);
153 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
154 if (reg->file == BAD_FILE)
155 *reg = *v->emit_samplepos_setup();
158 case nir_intrinsic_load_sample_id:
159 assert(v->stage == MESA_SHADER_FRAGMENT);
160 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
161 if (reg->file == BAD_FILE)
162 *reg = *v->emit_sampleid_setup();
165 case nir_intrinsic_load_sample_mask_in:
166 assert(v->stage == MESA_SHADER_FRAGMENT);
167 assert(v->devinfo->gen >= 7);
168 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
169 if (reg->file == BAD_FILE)
170 *reg = *v->emit_samplemaskin_setup();
173 case nir_intrinsic_load_work_group_id:
174 assert(v->stage == MESA_SHADER_COMPUTE);
175 reg = &v->nir_system_values[SYSTEM_VALUE_WORK_GROUP_ID];
176 if (reg->file == BAD_FILE)
177 *reg = *v->emit_cs_work_group_id_setup();
180 case nir_intrinsic_load_helper_invocation:
181 assert(v->stage == MESA_SHADER_FRAGMENT);
182 reg = &v->nir_system_values[SYSTEM_VALUE_HELPER_INVOCATION];
183 if (reg->file == BAD_FILE) {
184 const fs_builder abld =
185 v->bld.annotate("gl_HelperInvocation", NULL);
187 /* On Gen6+ (gl_HelperInvocation is only exposed on Gen7+) the
188 * pixel mask is in g1.7 of the thread payload.
190 * We move the per-channel pixel enable bit to the low bit of each
191 * channel by shifting the byte containing the pixel mask by the
192 * vector immediate 0x76543210UV.
194 * The region of <1,8,0> reads only 1 byte (the pixel masks for
195 * subspans 0 and 1) in SIMD8 and an additional byte (the pixel
196 * masks for 2 and 3) in SIMD16.
198 fs_reg shifted = abld.vgrf(BRW_REGISTER_TYPE_UW, 1);
200 for (unsigned i = 0; i < DIV_ROUND_UP(v->dispatch_width, 16); i++) {
201 const fs_builder hbld = abld.group(MIN2(16, v->dispatch_width), i);
202 hbld.SHR(offset(shifted, hbld, i),
203 stride(retype(brw_vec1_grf(1 + i, 7),
204 BRW_REGISTER_TYPE_UB),
206 brw_imm_v(0x76543210));
209 /* A set bit in the pixel mask means the channel is enabled, but
210 * that is the opposite of gl_HelperInvocation so we need to invert
213 * The negate source-modifier bit of logical instructions on Gen8+
214 * performs 1's complement negation, so we can use that instead of
217 fs_reg inverted = negate(shifted);
218 if (v->devinfo->gen < 8) {
219 inverted = abld.vgrf(BRW_REGISTER_TYPE_UW);
220 abld.NOT(inverted, shifted);
223 /* We then resolve the 0/1 result to 0/~0 boolean values by ANDing
224 * with 1 and negating.
226 fs_reg anded = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
227 abld.AND(anded, inverted, brw_imm_uw(1));
229 fs_reg dst = abld.vgrf(BRW_REGISTER_TYPE_D, 1);
230 abld.MOV(dst, negate(retype(anded, BRW_REGISTER_TYPE_D)));
244 fs_visitor::nir_emit_system_values()
246 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
247 for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
248 nir_system_values[i] = fs_reg();
251 /* Always emit SUBGROUP_INVOCATION. Dead code will clean it up if we
252 * never end up using it.
255 const fs_builder abld = bld.annotate("gl_SubgroupInvocation", NULL);
256 fs_reg ® = nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION];
257 reg = abld.vgrf(BRW_REGISTER_TYPE_UW);
259 const fs_builder allbld8 = abld.group(8, 0).exec_all();
260 allbld8.MOV(reg, brw_imm_v(0x76543210));
261 if (dispatch_width > 8)
262 allbld8.ADD(byte_offset(reg, 16), reg, brw_imm_uw(8u));
263 if (dispatch_width > 16) {
264 const fs_builder allbld16 = abld.group(16, 0).exec_all();
265 allbld16.ADD(byte_offset(reg, 32), reg, brw_imm_uw(16u));
269 nir_foreach_function(function, nir) {
270 assert(strcmp(function->name, "main") == 0);
271 assert(function->impl);
272 nir_foreach_block(block, function->impl) {
273 emit_system_values_block(block, this);
279 * Returns a type based on a reference_type (word, float, half-float) and a
282 * Reference BRW_REGISTER_TYPE are HF,F,DF,W,D,UW,UD.
284 * @FIXME: 64-bit return types are always DF on integer types to maintain
285 * compability with uses of DF previously to the introduction of int64
289 brw_reg_type_from_bit_size(const unsigned bit_size,
290 const brw_reg_type reference_type)
292 switch(reference_type) {
293 case BRW_REGISTER_TYPE_HF:
294 case BRW_REGISTER_TYPE_F:
295 case BRW_REGISTER_TYPE_DF:
298 return BRW_REGISTER_TYPE_HF;
300 return BRW_REGISTER_TYPE_F;
302 return BRW_REGISTER_TYPE_DF;
304 unreachable("Invalid bit size");
306 case BRW_REGISTER_TYPE_B:
307 case BRW_REGISTER_TYPE_W:
308 case BRW_REGISTER_TYPE_D:
309 case BRW_REGISTER_TYPE_Q:
312 return BRW_REGISTER_TYPE_B;
314 return BRW_REGISTER_TYPE_W;
316 return BRW_REGISTER_TYPE_D;
318 return BRW_REGISTER_TYPE_Q;
320 unreachable("Invalid bit size");
322 case BRW_REGISTER_TYPE_UB:
323 case BRW_REGISTER_TYPE_UW:
324 case BRW_REGISTER_TYPE_UD:
325 case BRW_REGISTER_TYPE_UQ:
328 return BRW_REGISTER_TYPE_UB;
330 return BRW_REGISTER_TYPE_UW;
332 return BRW_REGISTER_TYPE_UD;
334 return BRW_REGISTER_TYPE_UQ;
336 unreachable("Invalid bit size");
339 unreachable("Unknown type");
344 fs_visitor::nir_emit_impl(nir_function_impl *impl)
346 nir_locals = ralloc_array(mem_ctx, fs_reg, impl->reg_alloc);
347 for (unsigned i = 0; i < impl->reg_alloc; i++) {
348 nir_locals[i] = fs_reg();
351 foreach_list_typed(nir_register, reg, node, &impl->registers) {
352 unsigned array_elems =
353 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
354 unsigned size = array_elems * reg->num_components;
355 const brw_reg_type reg_type =
356 brw_reg_type_from_bit_size(reg->bit_size, BRW_REGISTER_TYPE_F);
357 nir_locals[reg->index] = bld.vgrf(reg_type, size);
360 nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
363 nir_emit_cf_list(&impl->body);
367 fs_visitor::nir_emit_cf_list(exec_list *list)
369 exec_list_validate(list);
370 foreach_list_typed(nir_cf_node, node, node, list) {
371 switch (node->type) {
373 nir_emit_if(nir_cf_node_as_if(node));
376 case nir_cf_node_loop:
377 nir_emit_loop(nir_cf_node_as_loop(node));
380 case nir_cf_node_block:
381 nir_emit_block(nir_cf_node_as_block(node));
385 unreachable("Invalid CFG node block");
391 fs_visitor::nir_emit_if(nir_if *if_stmt)
393 /* first, put the condition into f0 */
394 fs_inst *inst = bld.MOV(bld.null_reg_d(),
395 retype(get_nir_src(if_stmt->condition),
396 BRW_REGISTER_TYPE_D));
397 inst->conditional_mod = BRW_CONDITIONAL_NZ;
399 bld.IF(BRW_PREDICATE_NORMAL);
401 nir_emit_cf_list(&if_stmt->then_list);
403 /* note: if the else is empty, dead CF elimination will remove it */
404 bld.emit(BRW_OPCODE_ELSE);
406 nir_emit_cf_list(&if_stmt->else_list);
408 bld.emit(BRW_OPCODE_ENDIF);
410 if (devinfo->gen < 7)
411 limit_dispatch_width(16, "Non-uniform control flow unsupported "
416 fs_visitor::nir_emit_loop(nir_loop *loop)
418 bld.emit(BRW_OPCODE_DO);
420 nir_emit_cf_list(&loop->body);
422 bld.emit(BRW_OPCODE_WHILE);
424 if (devinfo->gen < 7)
425 limit_dispatch_width(16, "Non-uniform control flow unsupported "
430 fs_visitor::nir_emit_block(nir_block *block)
432 nir_foreach_instr(instr, block) {
433 nir_emit_instr(instr);
438 fs_visitor::nir_emit_instr(nir_instr *instr)
440 const fs_builder abld = bld.annotate(NULL, instr);
442 switch (instr->type) {
443 case nir_instr_type_alu:
444 nir_emit_alu(abld, nir_instr_as_alu(instr));
447 case nir_instr_type_deref:
448 /* Derefs can exist for images but they do nothing */
451 case nir_instr_type_intrinsic:
453 case MESA_SHADER_VERTEX:
454 nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
456 case MESA_SHADER_TESS_CTRL:
457 nir_emit_tcs_intrinsic(abld, nir_instr_as_intrinsic(instr));
459 case MESA_SHADER_TESS_EVAL:
460 nir_emit_tes_intrinsic(abld, nir_instr_as_intrinsic(instr));
462 case MESA_SHADER_GEOMETRY:
463 nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
465 case MESA_SHADER_FRAGMENT:
466 nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
468 case MESA_SHADER_COMPUTE:
469 nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
472 unreachable("unsupported shader stage");
476 case nir_instr_type_tex:
477 nir_emit_texture(abld, nir_instr_as_tex(instr));
480 case nir_instr_type_load_const:
481 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
484 case nir_instr_type_ssa_undef:
485 /* We create a new VGRF for undefs on every use (by handling
486 * them in get_nir_src()), rather than for each definition.
487 * This helps register coalescing eliminate MOVs from undef.
491 case nir_instr_type_jump:
492 nir_emit_jump(abld, nir_instr_as_jump(instr));
496 unreachable("unknown instruction type");
501 * Recognizes a parent instruction of nir_op_extract_* and changes the type to
505 fs_visitor::optimize_extract_to_float(nir_alu_instr *instr,
506 const fs_reg &result)
508 if (!instr->src[0].src.is_ssa ||
509 !instr->src[0].src.ssa->parent_instr)
512 if (instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
515 nir_alu_instr *src0 =
516 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
518 if (src0->op != nir_op_extract_u8 && src0->op != nir_op_extract_u16 &&
519 src0->op != nir_op_extract_i8 && src0->op != nir_op_extract_i16)
522 nir_const_value *element = nir_src_as_const_value(src0->src[1].src);
523 assert(element != NULL);
525 /* Element type to extract.*/
526 const brw_reg_type type = brw_int_type(
527 src0->op == nir_op_extract_u16 || src0->op == nir_op_extract_i16 ? 2 : 1,
528 src0->op == nir_op_extract_i16 || src0->op == nir_op_extract_i8);
530 fs_reg op0 = get_nir_src(src0->src[0].src);
531 op0.type = brw_type_for_nir_type(devinfo,
532 (nir_alu_type)(nir_op_infos[src0->op].input_types[0] |
533 nir_src_bit_size(src0->src[0].src)));
534 op0 = offset(op0, bld, src0->src[0].swizzle[0]);
536 set_saturate(instr->dest.saturate,
537 bld.MOV(result, subscript(op0, type, element->u32[0])));
542 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
543 const fs_reg &result)
545 if (!instr->src[0].src.is_ssa ||
546 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_intrinsic)
549 nir_intrinsic_instr *src0 =
550 nir_instr_as_intrinsic(instr->src[0].src.ssa->parent_instr);
552 if (src0->intrinsic != nir_intrinsic_load_front_face)
555 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
556 if (!value1 || fabsf(value1->f32[0]) != 1.0f)
559 nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
560 if (!value2 || fabsf(value2->f32[0]) != 1.0f)
563 fs_reg tmp = vgrf(glsl_type::int_type);
565 if (devinfo->gen >= 6) {
566 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
567 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
569 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
571 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
572 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
574 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
576 * This negation looks like it's safe in practice, because bits 0:4 will
577 * surely be TRIANGLES
580 if (value1->f32[0] == -1.0f) {
584 bld.OR(subscript(tmp, BRW_REGISTER_TYPE_W, 1),
585 g0, brw_imm_uw(0x3f80));
587 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
588 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
590 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
592 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
593 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
595 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
597 * This negation looks like it's safe in practice, because bits 0:4 will
598 * surely be TRIANGLES
601 if (value1->f32[0] == -1.0f) {
605 bld.OR(tmp, g1_6, brw_imm_d(0x3f800000));
607 bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, brw_imm_d(0xbf800000));
613 emit_find_msb_using_lzd(const fs_builder &bld,
614 const fs_reg &result,
622 /* LZD of an absolute value source almost always does the right
623 * thing. There are two problem values:
625 * * 0x80000000. Since abs(0x80000000) == 0x80000000, LZD returns
626 * 0. However, findMSB(int(0x80000000)) == 30.
628 * * 0xffffffff. Since abs(0xffffffff) == 1, LZD returns
629 * 31. Section 8.8 (Integer Functions) of the GLSL 4.50 spec says:
631 * For a value of zero or negative one, -1 will be returned.
633 * * Negative powers of two. LZD(abs(-(1<<x))) returns x, but
634 * findMSB(-(1<<x)) should return x-1.
636 * For all negative number cases, including 0x80000000 and
637 * 0xffffffff, the correct value is obtained from LZD if instead of
638 * negating the (already negative) value the logical-not is used. A
639 * conditonal logical-not can be achieved in two instructions.
641 temp = bld.vgrf(BRW_REGISTER_TYPE_D);
643 bld.ASR(temp, src, brw_imm_d(31));
644 bld.XOR(temp, temp, src);
647 bld.LZD(retype(result, BRW_REGISTER_TYPE_UD),
648 retype(temp, BRW_REGISTER_TYPE_UD));
650 /* LZD counts from the MSB side, while GLSL's findMSB() wants the count
651 * from the LSB side. Subtract the result from 31 to convert the MSB
652 * count into an LSB count. If no bits are set, LZD will return 32.
653 * 31-32 = -1, which is exactly what findMSB() is supposed to return.
655 inst = bld.ADD(result, retype(result, BRW_REGISTER_TYPE_D), brw_imm_d(31));
656 inst->src[0].negate = true;
660 brw_rnd_mode_from_nir_op (const nir_op op) {
662 case nir_op_f2f16_rtz:
663 return BRW_RND_MODE_RTZ;
664 case nir_op_f2f16_rtne:
665 return BRW_RND_MODE_RTNE;
667 unreachable("Operation doesn't support rounding mode");
672 fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
674 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
677 fs_reg result = get_nir_dest(instr->dest.dest);
678 result.type = brw_type_for_nir_type(devinfo,
679 (nir_alu_type)(nir_op_infos[instr->op].output_type |
680 nir_dest_bit_size(instr->dest.dest)));
683 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
684 op[i] = get_nir_src(instr->src[i].src);
685 op[i].type = brw_type_for_nir_type(devinfo,
686 (nir_alu_type)(nir_op_infos[instr->op].input_types[i] |
687 nir_src_bit_size(instr->src[i].src)));
688 op[i].abs = instr->src[i].abs;
689 op[i].negate = instr->src[i].negate;
692 /* We get a bunch of mov's out of the from_ssa pass and they may still
693 * be vectorized. We'll handle them as a special-case. We'll also
694 * handle vecN here because it's basically the same thing.
702 fs_reg temp = result;
703 bool need_extra_copy = false;
704 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
705 if (!instr->src[i].src.is_ssa &&
706 instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
707 need_extra_copy = true;
708 temp = bld.vgrf(result.type, 4);
713 for (unsigned i = 0; i < 4; i++) {
714 if (!(instr->dest.write_mask & (1 << i)))
717 if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
718 inst = bld.MOV(offset(temp, bld, i),
719 offset(op[0], bld, instr->src[0].swizzle[i]));
721 inst = bld.MOV(offset(temp, bld, i),
722 offset(op[i], bld, instr->src[i].swizzle[0]));
724 inst->saturate = instr->dest.saturate;
727 /* In this case the source and destination registers were the same,
728 * so we need to insert an extra set of moves in order to deal with
731 if (need_extra_copy) {
732 for (unsigned i = 0; i < 4; i++) {
733 if (!(instr->dest.write_mask & (1 << i)))
736 bld.MOV(offset(result, bld, i), offset(temp, bld, i));
745 /* At this point, we have dealt with any instruction that operates on
746 * more than a single channel. Therefore, we can just adjust the source
747 * and destination registers for that channel and emit the instruction.
749 unsigned channel = 0;
750 if (nir_op_infos[instr->op].output_size == 0) {
751 /* Since NIR is doing the scalarizing for us, we should only ever see
752 * vectorized operations with a single channel.
754 assert(_mesa_bitcount(instr->dest.write_mask) == 1);
755 channel = ffs(instr->dest.write_mask) - 1;
757 result = offset(result, bld, channel);
760 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
761 assert(nir_op_infos[instr->op].input_sizes[i] < 2);
762 op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
768 if (optimize_extract_to_float(instr, result))
770 inst = bld.MOV(result, op[0]);
771 inst->saturate = instr->dest.saturate;
774 case nir_op_f2f16_rtne:
775 case nir_op_f2f16_rtz:
776 bld.emit(SHADER_OPCODE_RND_MODE, bld.null_reg_ud(),
777 brw_imm_d(brw_rnd_mode_from_nir_op(instr->op)));
780 /* In theory, it would be better to use BRW_OPCODE_F32TO16. Depending
781 * on the HW gen, it is a special hw opcode or just a MOV, and
782 * brw_F32TO16 (at brw_eu_emit) would do the work to chose.
784 * But if we want to use that opcode, we need to provide support on
785 * different optimizations and lowerings. As right now HF support is
786 * only for gen8+, it will be better to use directly the MOV, and use
787 * BRW_OPCODE_F32TO16 when/if we work for HF support on gen7.
791 inst = bld.MOV(result, op[0]);
792 inst->saturate = instr->dest.saturate;
802 /* CHV PRM, vol07, 3D Media GPGPU Engine, Register Region Restrictions:
804 * "When source or destination is 64b (...), regioning in Align1
805 * must follow these rules:
807 * 1. Source and destination horizontal stride must be aligned to
811 * This means that conversions from bit-sizes smaller than 64-bit to
812 * 64-bit need to have the source data elements aligned to 64-bit.
813 * This restriction does not apply to BDW and later.
815 if (nir_dest_bit_size(instr->dest.dest) == 64 &&
816 nir_src_bit_size(instr->src[0].src) < 64 &&
817 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
818 fs_reg tmp = bld.vgrf(result.type, 1);
819 tmp = subscript(tmp, op[0].type, 0);
820 inst = bld.MOV(tmp, op[0]);
821 inst = bld.MOV(result, tmp);
822 inst->saturate = instr->dest.saturate;
839 inst = bld.MOV(result, op[0]);
840 inst->saturate = instr->dest.saturate;
845 /* Straightforward since the source can be assumed to be either
846 * strictly >= 0 or strictly <= 0 depending on the setting of the
849 set_condmod(BRW_CONDITIONAL_NZ, bld.MOV(result, op[0]));
851 inst = (op[0].negate)
852 ? bld.MOV(result, brw_imm_f(-1.0f))
853 : bld.MOV(result, brw_imm_f(1.0f));
855 set_predicate(BRW_PREDICATE_NORMAL, inst);
857 if (instr->dest.saturate)
858 inst->saturate = true;
860 } else if (type_sz(op[0].type) < 8) {
861 /* AND(val, 0x80000000) gives the sign bit.
863 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
866 bld.CMP(bld.null_reg_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
868 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
869 op[0].type = BRW_REGISTER_TYPE_UD;
870 result.type = BRW_REGISTER_TYPE_UD;
871 bld.AND(result_int, op[0], brw_imm_ud(0x80000000u));
873 inst = bld.OR(result_int, result_int, brw_imm_ud(0x3f800000u));
874 inst->predicate = BRW_PREDICATE_NORMAL;
875 if (instr->dest.saturate) {
876 inst = bld.MOV(result, result);
877 inst->saturate = true;
880 /* For doubles we do the same but we need to consider:
882 * - 2-src instructions can't operate with 64-bit immediates
883 * - The sign is encoded in the high 32-bit of each DF
884 * - We need to produce a DF result.
887 fs_reg zero = vgrf(glsl_type::double_type);
888 bld.MOV(zero, setup_imm_df(bld, 0.0));
889 bld.CMP(bld.null_reg_df(), op[0], zero, BRW_CONDITIONAL_NZ);
891 bld.MOV(result, zero);
893 fs_reg r = subscript(result, BRW_REGISTER_TYPE_UD, 1);
894 bld.AND(r, subscript(op[0], BRW_REGISTER_TYPE_UD, 1),
895 brw_imm_ud(0x80000000u));
897 set_predicate(BRW_PREDICATE_NORMAL,
898 bld.OR(r, r, brw_imm_ud(0x3ff00000u)));
900 if (instr->dest.saturate) {
901 inst = bld.MOV(result, result);
902 inst->saturate = true;
909 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
910 * -> non-negative val generates 0x00000000.
911 * Predicated OR sets 1 if val is positive.
913 uint32_t bit_size = nir_dest_bit_size(instr->dest.dest);
914 assert(bit_size == 32 || bit_size == 16);
916 fs_reg zero = bit_size == 32 ? brw_imm_d(0) : brw_imm_w(0);
917 fs_reg one = bit_size == 32 ? brw_imm_d(1) : brw_imm_w(1);
918 fs_reg shift = bit_size == 32 ? brw_imm_d(31) : brw_imm_w(15);
920 bld.CMP(bld.null_reg_d(), op[0], zero, BRW_CONDITIONAL_G);
921 bld.ASR(result, op[0], shift);
922 inst = bld.OR(result, result, one);
923 inst->predicate = BRW_PREDICATE_NORMAL;
928 inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
929 inst->saturate = instr->dest.saturate;
933 inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
934 inst->saturate = instr->dest.saturate;
938 inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
939 inst->saturate = instr->dest.saturate;
943 inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
944 inst->saturate = instr->dest.saturate;
948 inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
949 inst->saturate = instr->dest.saturate;
953 if (fs_key->high_quality_derivatives) {
954 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
956 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
958 inst->saturate = instr->dest.saturate;
960 case nir_op_fddx_fine:
961 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
962 inst->saturate = instr->dest.saturate;
964 case nir_op_fddx_coarse:
965 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
966 inst->saturate = instr->dest.saturate;
969 if (fs_key->high_quality_derivatives) {
970 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
972 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
974 inst->saturate = instr->dest.saturate;
976 case nir_op_fddy_fine:
977 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0]);
978 inst->saturate = instr->dest.saturate;
980 case nir_op_fddy_coarse:
981 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0]);
982 inst->saturate = instr->dest.saturate;
987 inst = bld.ADD(result, op[0], op[1]);
988 inst->saturate = instr->dest.saturate;
992 inst = bld.MUL(result, op[0], op[1]);
993 inst->saturate = instr->dest.saturate;
997 assert(nir_dest_bit_size(instr->dest.dest) < 64);
998 bld.MUL(result, op[0], op[1]);
1001 case nir_op_imul_high:
1002 case nir_op_umul_high:
1003 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1004 bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
1009 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1010 bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
1013 case nir_op_uadd_carry:
1014 unreachable("Should have been lowered by carry_to_arith().");
1016 case nir_op_usub_borrow:
1017 unreachable("Should have been lowered by borrow_to_arith().");
1021 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1022 * appears that our hardware just does the right thing for signed
1025 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1026 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
1030 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
1031 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
1033 /* Math instructions don't support conditional mod */
1034 inst = bld.MOV(bld.null_reg_d(), result);
1035 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1037 /* Now, we need to determine if signs of the sources are different.
1038 * When we XOR the sources, the top bit is 0 if they are the same and 1
1039 * if they are different. We can then use a conditional modifier to
1040 * turn that into a predicate. This leads us to an XOR.l instruction.
1042 * Technically, according to the PRM, you're not allowed to use .l on a
1043 * XOR instruction. However, emperical experiments and Curro's reading
1044 * of the simulator source both indicate that it's safe.
1046 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_D);
1047 inst = bld.XOR(tmp, op[0], op[1]);
1048 inst->predicate = BRW_PREDICATE_NORMAL;
1049 inst->conditional_mod = BRW_CONDITIONAL_L;
1051 /* If the result of the initial remainder operation is non-zero and the
1052 * two sources have different signs, add in a copy of op[1] to get the
1053 * final integer modulus value.
1055 inst = bld.ADD(result, result, op[1]);
1056 inst->predicate = BRW_PREDICATE_NORMAL;
1064 fs_reg dest = result;
1066 const uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
1068 dest = bld.vgrf(op[0].type, 1);
1070 brw_conditional_mod cond;
1071 switch (instr->op) {
1073 cond = BRW_CONDITIONAL_L;
1076 cond = BRW_CONDITIONAL_GE;
1079 cond = BRW_CONDITIONAL_Z;
1082 cond = BRW_CONDITIONAL_NZ;
1085 unreachable("bad opcode");
1088 bld.CMP(dest, op[0], op[1], cond);
1090 if (bit_size > 32) {
1091 bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
1092 } else if(bit_size < 32) {
1093 /* When we convert the result to 32-bit we need to be careful and do
1094 * it as a signed conversion to get sign extension (for 32-bit true)
1096 const brw_reg_type src_type =
1097 brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_D);
1099 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), retype(dest, src_type));
1110 fs_reg dest = result;
1112 const uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
1114 dest = bld.vgrf(op[0].type, 1);
1116 brw_conditional_mod cond;
1117 switch (instr->op) {
1120 cond = BRW_CONDITIONAL_L;
1124 cond = BRW_CONDITIONAL_GE;
1127 cond = BRW_CONDITIONAL_Z;
1130 cond = BRW_CONDITIONAL_NZ;
1133 unreachable("bad opcode");
1135 bld.CMP(dest, op[0], op[1], cond);
1137 if (bit_size > 32) {
1138 bld.MOV(result, subscript(dest, BRW_REGISTER_TYPE_UD, 0));
1139 } else if (bit_size < 32) {
1140 /* When we convert the result to 32-bit we need to be careful and do
1141 * it as a signed conversion to get sign extension (for 32-bit true)
1143 const brw_reg_type src_type =
1144 brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_D);
1146 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), retype(dest, src_type));
1152 if (devinfo->gen >= 8) {
1153 op[0] = resolve_source_modifiers(op[0]);
1155 bld.NOT(result, op[0]);
1158 if (devinfo->gen >= 8) {
1159 op[0] = resolve_source_modifiers(op[0]);
1160 op[1] = resolve_source_modifiers(op[1]);
1162 bld.XOR(result, op[0], op[1]);
1165 if (devinfo->gen >= 8) {
1166 op[0] = resolve_source_modifiers(op[0]);
1167 op[1] = resolve_source_modifiers(op[1]);
1169 bld.OR(result, op[0], op[1]);
1172 if (devinfo->gen >= 8) {
1173 op[0] = resolve_source_modifiers(op[0]);
1174 op[1] = resolve_source_modifiers(op[1]);
1176 bld.AND(result, op[0], op[1]);
1182 case nir_op_ball_fequal2:
1183 case nir_op_ball_iequal2:
1184 case nir_op_ball_fequal3:
1185 case nir_op_ball_iequal3:
1186 case nir_op_ball_fequal4:
1187 case nir_op_ball_iequal4:
1188 case nir_op_bany_fnequal2:
1189 case nir_op_bany_inequal2:
1190 case nir_op_bany_fnequal3:
1191 case nir_op_bany_inequal3:
1192 case nir_op_bany_fnequal4:
1193 case nir_op_bany_inequal4:
1194 unreachable("Lowered by nir_lower_alu_reductions");
1196 case nir_op_fnoise1_1:
1197 case nir_op_fnoise1_2:
1198 case nir_op_fnoise1_3:
1199 case nir_op_fnoise1_4:
1200 case nir_op_fnoise2_1:
1201 case nir_op_fnoise2_2:
1202 case nir_op_fnoise2_3:
1203 case nir_op_fnoise2_4:
1204 case nir_op_fnoise3_1:
1205 case nir_op_fnoise3_2:
1206 case nir_op_fnoise3_3:
1207 case nir_op_fnoise3_4:
1208 case nir_op_fnoise4_1:
1209 case nir_op_fnoise4_2:
1210 case nir_op_fnoise4_3:
1211 case nir_op_fnoise4_4:
1212 unreachable("not reached: should be handled by lower_noise");
1215 unreachable("not reached: should be handled by ldexp_to_arith()");
1218 inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
1219 inst->saturate = instr->dest.saturate;
1223 inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
1224 inst->saturate = instr->dest.saturate;
1229 bld.MOV(result, negate(op[0]));
1234 uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
1235 if (bit_size == 64) {
1236 /* two-argument instructions can't take 64-bit immediates */
1240 if (instr->op == nir_op_f2b) {
1241 zero = vgrf(glsl_type::double_type);
1242 tmp = vgrf(glsl_type::double_type);
1243 bld.MOV(zero, setup_imm_df(bld, 0.0));
1245 zero = vgrf(glsl_type::int64_t_type);
1246 tmp = vgrf(glsl_type::int64_t_type);
1247 bld.MOV(zero, brw_imm_q(0));
1250 /* A SIMD16 execution needs to be split in two instructions, so use
1251 * a vgrf instead of the flag register as dst so instruction splitting
1254 bld.CMP(tmp, op[0], zero, BRW_CONDITIONAL_NZ);
1255 bld.MOV(result, subscript(tmp, BRW_REGISTER_TYPE_UD, 0));
1258 if (bit_size == 32) {
1259 zero = instr->op == nir_op_f2b ? brw_imm_f(0.0f) : brw_imm_d(0);
1261 assert(bit_size == 16);
1262 zero = instr->op == nir_op_f2b ?
1263 retype(brw_imm_w(0), BRW_REGISTER_TYPE_HF) : brw_imm_w(0);
1265 bld.CMP(result, op[0], zero, BRW_CONDITIONAL_NZ);
1271 inst = bld.RNDZ(result, op[0]);
1272 inst->saturate = instr->dest.saturate;
1275 case nir_op_fceil: {
1276 op[0].negate = !op[0].negate;
1277 fs_reg temp = vgrf(glsl_type::float_type);
1278 bld.RNDD(temp, op[0]);
1280 inst = bld.MOV(result, temp);
1281 inst->saturate = instr->dest.saturate;
1285 inst = bld.RNDD(result, op[0]);
1286 inst->saturate = instr->dest.saturate;
1289 inst = bld.FRC(result, op[0]);
1290 inst->saturate = instr->dest.saturate;
1292 case nir_op_fround_even:
1293 inst = bld.RNDE(result, op[0]);
1294 inst->saturate = instr->dest.saturate;
1297 case nir_op_fquantize2f16: {
1298 fs_reg tmp16 = bld.vgrf(BRW_REGISTER_TYPE_D);
1299 fs_reg tmp32 = bld.vgrf(BRW_REGISTER_TYPE_F);
1300 fs_reg zero = bld.vgrf(BRW_REGISTER_TYPE_F);
1302 /* The destination stride must be at least as big as the source stride. */
1303 tmp16.type = BRW_REGISTER_TYPE_W;
1306 /* Check for denormal */
1307 fs_reg abs_src0 = op[0];
1308 abs_src0.abs = true;
1309 bld.CMP(bld.null_reg_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1311 /* Get the appropriately signed zero */
1312 bld.AND(retype(zero, BRW_REGISTER_TYPE_UD),
1313 retype(op[0], BRW_REGISTER_TYPE_UD),
1314 brw_imm_ud(0x80000000));
1315 /* Do the actual F32 -> F16 -> F32 conversion */
1316 bld.emit(BRW_OPCODE_F32TO16, tmp16, op[0]);
1317 bld.emit(BRW_OPCODE_F16TO32, tmp32, tmp16);
1318 /* Select that or zero based on normal status */
1319 inst = bld.SEL(result, zero, tmp32);
1320 inst->predicate = BRW_PREDICATE_NORMAL;
1321 inst->saturate = instr->dest.saturate;
1328 inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_L);
1329 inst->saturate = instr->dest.saturate;
1335 inst = bld.emit_minmax(result, op[0], op[1], BRW_CONDITIONAL_GE);
1336 inst->saturate = instr->dest.saturate;
1339 case nir_op_pack_snorm_2x16:
1340 case nir_op_pack_snorm_4x8:
1341 case nir_op_pack_unorm_2x16:
1342 case nir_op_pack_unorm_4x8:
1343 case nir_op_unpack_snorm_2x16:
1344 case nir_op_unpack_snorm_4x8:
1345 case nir_op_unpack_unorm_2x16:
1346 case nir_op_unpack_unorm_4x8:
1347 case nir_op_unpack_half_2x16:
1348 case nir_op_pack_half_2x16:
1349 unreachable("not reached: should be handled by lower_packing_builtins");
1351 case nir_op_unpack_half_2x16_split_x:
1352 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
1353 inst->saturate = instr->dest.saturate;
1355 case nir_op_unpack_half_2x16_split_y:
1356 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
1357 inst->saturate = instr->dest.saturate;
1360 case nir_op_pack_64_2x32_split:
1361 case nir_op_pack_32_2x16_split:
1362 bld.emit(FS_OPCODE_PACK, result, op[0], op[1]);
1365 case nir_op_unpack_64_2x32_split_x:
1366 case nir_op_unpack_64_2x32_split_y: {
1367 if (instr->op == nir_op_unpack_64_2x32_split_x)
1368 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 0));
1370 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UD, 1));
1374 case nir_op_unpack_32_2x16_split_x:
1375 case nir_op_unpack_32_2x16_split_y: {
1376 if (instr->op == nir_op_unpack_32_2x16_split_x)
1377 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UW, 0));
1379 bld.MOV(result, subscript(op[0], BRW_REGISTER_TYPE_UW, 1));
1384 inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
1385 inst->saturate = instr->dest.saturate;
1388 case nir_op_bitfield_reverse:
1389 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1390 bld.BFREV(result, op[0]);
1393 case nir_op_bit_count:
1394 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1395 bld.CBIT(result, op[0]);
1398 case nir_op_ufind_msb: {
1399 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1400 emit_find_msb_using_lzd(bld, result, op[0], false);
1404 case nir_op_ifind_msb: {
1405 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1407 if (devinfo->gen < 7) {
1408 emit_find_msb_using_lzd(bld, result, op[0], true);
1410 bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
1412 /* FBH counts from the MSB side, while GLSL's findMSB() wants the
1413 * count from the LSB side. If FBH didn't return an error
1414 * (0xFFFFFFFF), then subtract the result from 31 to convert the MSB
1415 * count into an LSB count.
1417 bld.CMP(bld.null_reg_d(), result, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
1419 inst = bld.ADD(result, result, brw_imm_d(31));
1420 inst->predicate = BRW_PREDICATE_NORMAL;
1421 inst->src[0].negate = true;
1426 case nir_op_find_lsb:
1427 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1429 if (devinfo->gen < 7) {
1430 fs_reg temp = vgrf(glsl_type::int_type);
1432 /* (x & -x) generates a value that consists of only the LSB of x.
1433 * For all powers of 2, findMSB(y) == findLSB(y).
1435 fs_reg src = retype(op[0], BRW_REGISTER_TYPE_D);
1436 fs_reg negated_src = src;
1438 /* One must be negated, and the other must be non-negated. It
1439 * doesn't matter which is which.
1441 negated_src.negate = true;
1444 bld.AND(temp, src, negated_src);
1445 emit_find_msb_using_lzd(bld, result, temp, false);
1447 bld.FBL(result, op[0]);
1451 case nir_op_ubitfield_extract:
1452 case nir_op_ibitfield_extract:
1453 unreachable("should have been lowered");
1456 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1457 bld.BFE(result, op[2], op[1], op[0]);
1460 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1461 bld.BFI1(result, op[0], op[1]);
1464 assert(nir_dest_bit_size(instr->dest.dest) < 64);
1465 bld.BFI2(result, op[0], op[1], op[2]);
1468 case nir_op_bitfield_insert:
1469 unreachable("not reached: should have been lowered");
1474 fs_reg shift_count = op[1];
1476 if (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo)) {
1477 if (op[1].file == VGRF &&
1478 (result.type == BRW_REGISTER_TYPE_Q ||
1479 result.type == BRW_REGISTER_TYPE_UQ)) {
1480 shift_count = fs_reg(VGRF, alloc.allocate(dispatch_width / 4),
1481 BRW_REGISTER_TYPE_UD);
1482 shift_count.stride = 2;
1483 bld.MOV(shift_count, op[1]);
1487 switch (instr->op) {
1489 bld.SHL(result, op[0], shift_count);
1492 bld.ASR(result, op[0], shift_count);
1495 bld.SHR(result, op[0], shift_count);
1498 unreachable("not reached");
1503 case nir_op_pack_half_2x16_split:
1504 bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
1508 inst = bld.MAD(result, op[2], op[1], op[0]);
1509 inst->saturate = instr->dest.saturate;
1513 inst = bld.LRP(result, op[0], op[1], op[2]);
1514 inst->saturate = instr->dest.saturate;
1518 if (optimize_frontfacing_ternary(instr, result))
1521 bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
1522 inst = bld.SEL(result, op[1], op[2]);
1523 inst->predicate = BRW_PREDICATE_NORMAL;
1526 case nir_op_extract_u8:
1527 case nir_op_extract_i8: {
1528 nir_const_value *byte = nir_src_as_const_value(instr->src[1].src);
1529 assert(byte != NULL);
1534 * There is no direct conversion from B/UB to Q/UQ or Q/UQ to B/UB.
1535 * Use two instructions and a word or DWord intermediate integer type.
1537 if (nir_dest_bit_size(instr->dest.dest) == 64) {
1538 const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i8);
1540 if (instr->op == nir_op_extract_i8) {
1541 /* If we need to sign extend, extract to a word first */
1542 fs_reg w_temp = bld.vgrf(BRW_REGISTER_TYPE_W);
1543 bld.MOV(w_temp, subscript(op[0], type, byte->u32[0]));
1544 bld.MOV(result, w_temp);
1546 /* Otherwise use an AND with 0xff and a word type */
1547 bld.AND(result, subscript(op[0], type, byte->u32[0] / 2), brw_imm_uw(0xff));
1550 const brw_reg_type type = brw_int_type(1, instr->op == nir_op_extract_i8);
1551 bld.MOV(result, subscript(op[0], type, byte->u32[0]));
1556 case nir_op_extract_u16:
1557 case nir_op_extract_i16: {
1558 const brw_reg_type type = brw_int_type(2, instr->op == nir_op_extract_i16);
1559 nir_const_value *word = nir_src_as_const_value(instr->src[1].src);
1560 assert(word != NULL);
1561 bld.MOV(result, subscript(op[0], type, word->u32[0]));
1566 unreachable("unhandled instruction");
1569 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1570 * to sign extend the low bit to 0/~0
1572 if (devinfo->gen <= 5 &&
1573 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1574 fs_reg masked = vgrf(glsl_type::int_type);
1575 bld.AND(masked, result, brw_imm_d(1));
1576 masked.negate = true;
1577 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
1582 fs_visitor::nir_emit_load_const(const fs_builder &bld,
1583 nir_load_const_instr *instr)
1585 const brw_reg_type reg_type =
1586 brw_reg_type_from_bit_size(instr->def.bit_size, BRW_REGISTER_TYPE_D);
1587 fs_reg reg = bld.vgrf(reg_type, instr->def.num_components);
1589 switch (instr->def.bit_size) {
1591 for (unsigned i = 0; i < instr->def.num_components; i++)
1592 bld.MOV(offset(reg, bld, i), setup_imm_b(bld, instr->value.i8[i]));
1596 for (unsigned i = 0; i < instr->def.num_components; i++)
1597 bld.MOV(offset(reg, bld, i), brw_imm_w(instr->value.i16[i]));
1601 for (unsigned i = 0; i < instr->def.num_components; i++)
1602 bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i32[i]));
1606 assert(devinfo->gen >= 7);
1607 if (devinfo->gen == 7) {
1608 /* We don't get 64-bit integer types until gen8 */
1609 for (unsigned i = 0; i < instr->def.num_components; i++) {
1610 bld.MOV(retype(offset(reg, bld, i), BRW_REGISTER_TYPE_DF),
1611 setup_imm_df(bld, instr->value.f64[i]));
1614 for (unsigned i = 0; i < instr->def.num_components; i++)
1615 bld.MOV(offset(reg, bld, i), brw_imm_q(instr->value.i64[i]));
1620 unreachable("Invalid bit size");
1623 nir_ssa_values[instr->def.index] = reg;
1627 fs_visitor::get_nir_src(const nir_src &src)
1631 if (src.ssa->parent_instr->type == nir_instr_type_ssa_undef) {
1632 const brw_reg_type reg_type =
1633 brw_reg_type_from_bit_size(src.ssa->bit_size, BRW_REGISTER_TYPE_D);
1634 reg = bld.vgrf(reg_type, src.ssa->num_components);
1636 reg = nir_ssa_values[src.ssa->index];
1639 /* We don't handle indirects on locals */
1640 assert(src.reg.indirect == NULL);
1641 reg = offset(nir_locals[src.reg.reg->index], bld,
1642 src.reg.base_offset * src.reg.reg->num_components);
1645 if (nir_src_bit_size(src) == 64 && devinfo->gen == 7) {
1646 /* The only 64-bit type available on gen7 is DF, so use that. */
1647 reg.type = BRW_REGISTER_TYPE_DF;
1649 /* To avoid floating-point denorm flushing problems, set the type by
1650 * default to an integer type - instructions that need floating point
1651 * semantics will set this to F if they need to
1653 reg.type = brw_reg_type_from_bit_size(nir_src_bit_size(src),
1654 BRW_REGISTER_TYPE_D);
1661 * Return an IMM for constants; otherwise call get_nir_src() as normal.
1663 * This function should not be called on any value which may be 64 bits.
1664 * We could theoretically support 64-bit on gen8+ but we choose not to
1665 * because it wouldn't work in general (no gen7 support) and there are
1666 * enough restrictions in 64-bit immediates that you can't take the return
1667 * value and treat it the same as the result of get_nir_src().
1670 fs_visitor::get_nir_src_imm(const nir_src &src)
1672 nir_const_value *val = nir_src_as_const_value(src);
1673 assert(nir_src_bit_size(src) == 32);
1674 return val ? fs_reg(brw_imm_d(val->i32[0])) : get_nir_src(src);
1678 fs_visitor::get_nir_dest(const nir_dest &dest)
1681 const brw_reg_type reg_type =
1682 brw_reg_type_from_bit_size(dest.ssa.bit_size,
1683 dest.ssa.bit_size == 8 ?
1684 BRW_REGISTER_TYPE_D :
1685 BRW_REGISTER_TYPE_F);
1686 nir_ssa_values[dest.ssa.index] =
1687 bld.vgrf(reg_type, dest.ssa.num_components);
1688 return nir_ssa_values[dest.ssa.index];
1690 /* We don't handle indirects on locals */
1691 assert(dest.reg.indirect == NULL);
1692 return offset(nir_locals[dest.reg.reg->index], bld,
1693 dest.reg.base_offset * dest.reg.reg->num_components);
1698 fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1701 for (unsigned i = 0; i < 4; i++) {
1702 if (!((wr_mask >> i) & 1))
1705 fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
1706 new_inst->dst = offset(new_inst->dst, bld, i);
1707 for (unsigned j = 0; j < new_inst->sources; j++)
1708 if (new_inst->src[j].file == VGRF)
1709 new_inst->src[j] = offset(new_inst->src[j], bld, i);
1716 emit_pixel_interpolater_send(const fs_builder &bld,
1721 glsl_interp_mode interpolation)
1723 struct brw_wm_prog_data *wm_prog_data =
1724 brw_wm_prog_data(bld.shader->stage_prog_data);
1726 fs_inst *inst = bld.emit(opcode, dst, src, desc);
1727 /* 2 floats per slot returned */
1728 inst->size_written = 2 * dst.component_size(inst->exec_size);
1729 inst->pi_noperspective = interpolation == INTERP_MODE_NOPERSPECTIVE;
1731 wm_prog_data->pulls_bary = true;
1737 * Computes 1 << x, given a D/UD register containing some value x.
1740 intexp2(const fs_builder &bld, const fs_reg &x)
1742 assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
1744 fs_reg result = bld.vgrf(x.type, 1);
1745 fs_reg one = bld.vgrf(x.type, 1);
1747 bld.MOV(one, retype(brw_imm_d(1), one.type));
1748 bld.SHL(result, one, x);
1753 fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
1755 assert(stage == MESA_SHADER_GEOMETRY);
1757 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
1759 if (gs_compile->control_data_header_size_bits == 0)
1762 /* We can only do EndPrimitive() functionality when the control data
1763 * consists of cut bits. Fortunately, the only time it isn't is when the
1764 * output type is points, in which case EndPrimitive() is a no-op.
1766 if (gs_prog_data->control_data_format !=
1767 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
1771 /* Cut bits use one bit per vertex. */
1772 assert(gs_compile->control_data_bits_per_vertex == 1);
1774 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1775 vertex_count.type = BRW_REGISTER_TYPE_UD;
1777 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
1778 * vertex n, 0 otherwise. So all we need to do here is mark bit
1779 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
1780 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
1781 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
1783 * Note that if EndPrimitive() is called before emitting any vertices, this
1784 * will cause us to set bit 31 of the control_data_bits register to 1.
1785 * That's fine because:
1787 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
1788 * output, so the hardware will ignore cut bit 31.
1790 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
1791 * last vertex, so setting cut bit 31 has no effect (since the primitive
1792 * is automatically ended when the GS terminates).
1794 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
1795 * control_data_bits register to 0 when the first vertex is emitted.
1798 const fs_builder abld = bld.annotate("end primitive");
1800 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
1801 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1802 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
1803 fs_reg mask = intexp2(abld, prev_count);
1804 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1805 * attention to the lower 5 bits of its second source argument, so on this
1806 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
1807 * ((vertex_count - 1) % 32).
1809 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1813 fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
1815 assert(stage == MESA_SHADER_GEOMETRY);
1816 assert(gs_compile->control_data_bits_per_vertex != 0);
1818 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
1820 const fs_builder abld = bld.annotate("emit control data bits");
1821 const fs_builder fwa_bld = bld.exec_all();
1823 /* We use a single UD register to accumulate control data bits (32 bits
1824 * for each of the SIMD8 channels). So we need to write a DWord (32 bits)
1827 * Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
1828 * We have select a 128-bit group via the Global and Per-Slot Offsets, then
1829 * use the Channel Mask phase to enable/disable which DWord within that
1830 * group to write. (Remember, different SIMD8 channels may have emitted
1831 * different numbers of vertices, so we may need per-slot offsets.)
1833 * Channel masking presents an annoying problem: we may have to replicate
1834 * the data up to 4 times:
1836 * Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
1838 * To avoid penalizing shaders that emit a small number of vertices, we
1839 * can avoid these sometimes: if the size of the control data header is
1840 * <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
1841 * land in the same 128-bit group, so we can skip per-slot offsets.
1843 * Similarly, if the control data header is <= 32 bits, there is only one
1844 * DWord, so we can skip channel masks.
1846 enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
1848 fs_reg channel_mask, per_slot_offset;
1850 if (gs_compile->control_data_header_size_bits > 32) {
1851 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
1852 channel_mask = vgrf(glsl_type::uint_type);
1855 if (gs_compile->control_data_header_size_bits > 128) {
1856 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
1857 per_slot_offset = vgrf(glsl_type::uint_type);
1860 /* Figure out which DWord we're trying to write to using the formula:
1862 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
1864 * Since bits_per_vertex is a power of two, and is known at compile
1865 * time, this can be optimized to:
1867 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
1869 if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
1870 fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1871 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1872 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
1873 unsigned log2_bits_per_vertex =
1874 util_last_bit(gs_compile->control_data_bits_per_vertex);
1875 abld.SHR(dword_index, prev_count, brw_imm_ud(6u - log2_bits_per_vertex));
1877 if (per_slot_offset.file != BAD_FILE) {
1878 /* Set the per-slot offset to dword_index / 4, so that we'll write to
1879 * the appropriate OWord within the control data header.
1881 abld.SHR(per_slot_offset, dword_index, brw_imm_ud(2u));
1884 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
1885 * write to the appropriate DWORD within the OWORD.
1887 fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1888 fwa_bld.AND(channel, dword_index, brw_imm_ud(3u));
1889 channel_mask = intexp2(fwa_bld, channel);
1890 /* Then the channel masks need to be in bits 23:16. */
1891 fwa_bld.SHL(channel_mask, channel_mask, brw_imm_ud(16u));
1894 /* Store the control data bits in the message payload and send it. */
1896 if (channel_mask.file != BAD_FILE)
1897 mlen += 4; /* channel masks, plus 3 extra copies of the data */
1898 if (per_slot_offset.file != BAD_FILE)
1901 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
1902 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
1904 sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1905 if (per_slot_offset.file != BAD_FILE)
1906 sources[i++] = per_slot_offset;
1907 if (channel_mask.file != BAD_FILE)
1908 sources[i++] = channel_mask;
1910 sources[i++] = this->control_data_bits;
1913 abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
1914 fs_inst *inst = abld.emit(opcode, reg_undef, payload);
1916 /* We need to increment Global Offset by 256-bits to make room for
1917 * Broadwell's extra "Vertex Count" payload at the beginning of the
1918 * URB entry. Since this is an OWord message, Global Offset is counted
1919 * in 128-bit units, so we must set it to 2.
1921 if (gs_prog_data->static_vertex_count == -1)
1926 fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
1929 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
1931 /* Note: we are calling this *before* increasing vertex_count, so
1932 * this->vertex_count == vertex_count - 1 in the formula above.
1935 /* Stream mode uses 2 bits per vertex */
1936 assert(gs_compile->control_data_bits_per_vertex == 2);
1938 /* Must be a valid stream */
1939 assert(stream_id < MAX_VERTEX_STREAMS);
1941 /* Control data bits are initialized to 0 so we don't have to set any
1942 * bits when sending vertices to stream 0.
1947 const fs_builder abld = bld.annotate("set stream control data bits", NULL);
1949 /* reg::sid = stream_id */
1950 fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1951 abld.MOV(sid, brw_imm_ud(stream_id));
1953 /* reg:shift_count = 2 * (vertex_count - 1) */
1954 fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1955 abld.SHL(shift_count, vertex_count, brw_imm_ud(1u));
1957 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1958 * attention to the lower 5 bits of its second source argument, so on this
1959 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
1960 * stream_id << ((2 * (vertex_count - 1)) % 32).
1962 fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1963 abld.SHL(mask, sid, shift_count);
1964 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1968 fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
1971 assert(stage == MESA_SHADER_GEOMETRY);
1973 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
1975 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1976 vertex_count.type = BRW_REGISTER_TYPE_UD;
1978 /* Haswell and later hardware ignores the "Render Stream Select" bits
1979 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
1980 * and instead sends all primitives down the pipeline for rasterization.
1981 * If the SOL stage is enabled, "Render Stream Select" is honored and
1982 * primitives bound to non-zero streams are discarded after stream output.
1984 * Since the only purpose of primives sent to non-zero streams is to
1985 * be recorded by transform feedback, we can simply discard all geometry
1986 * bound to these streams when transform feedback is disabled.
1988 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
1991 /* If we're outputting 32 control data bits or less, then we can wait
1992 * until the shader is over to output them all. Otherwise we need to
1993 * output them as we go. Now is the time to do it, since we're about to
1994 * output the vertex_count'th vertex, so it's guaranteed that the
1995 * control data bits associated with the (vertex_count - 1)th vertex are
1998 if (gs_compile->control_data_header_size_bits > 32) {
1999 const fs_builder abld =
2000 bld.annotate("emit vertex: emit control data bits");
2002 /* Only emit control data bits if we've finished accumulating a batch
2003 * of 32 bits. This is the case when:
2005 * (vertex_count * bits_per_vertex) % 32 == 0
2007 * (in other words, when the last 5 bits of vertex_count *
2008 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
2009 * integer n (which is always the case, since bits_per_vertex is
2010 * always 1 or 2), this is equivalent to requiring that the last 5-n
2011 * bits of vertex_count are 0:
2013 * vertex_count & (2^(5-n) - 1) == 0
2015 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
2018 * vertex_count & (32 / bits_per_vertex - 1) == 0
2020 * TODO: If vertex_count is an immediate, we could do some of this math
2021 * at compile time...
2024 abld.AND(bld.null_reg_d(), vertex_count,
2025 brw_imm_ud(32u / gs_compile->control_data_bits_per_vertex - 1u));
2026 inst->conditional_mod = BRW_CONDITIONAL_Z;
2028 abld.IF(BRW_PREDICATE_NORMAL);
2029 /* If vertex_count is 0, then no control data bits have been
2030 * accumulated yet, so we can skip emitting them.
2032 abld.CMP(bld.null_reg_d(), vertex_count, brw_imm_ud(0u),
2033 BRW_CONDITIONAL_NEQ);
2034 abld.IF(BRW_PREDICATE_NORMAL);
2035 emit_gs_control_data_bits(vertex_count);
2036 abld.emit(BRW_OPCODE_ENDIF);
2038 /* Reset control_data_bits to 0 so we can start accumulating a new
2041 * Note: in the case where vertex_count == 0, this neutralizes the
2042 * effect of any call to EndPrimitive() that the shader may have
2043 * made before outputting its first vertex.
2045 inst = abld.MOV(this->control_data_bits, brw_imm_ud(0u));
2046 inst->force_writemask_all = true;
2047 abld.emit(BRW_OPCODE_ENDIF);
2050 emit_urb_writes(vertex_count);
2052 /* In stream mode we have to set control data bits for all vertices
2053 * unless we have disabled control data bits completely (which we do
2054 * do for GL_POINTS outputs that don't use streams).
2056 if (gs_compile->control_data_header_size_bits > 0 &&
2057 gs_prog_data->control_data_format ==
2058 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
2059 set_gs_stream_control_data_bits(vertex_count, stream_id);
2064 fs_visitor::emit_gs_input_load(const fs_reg &dst,
2065 const nir_src &vertex_src,
2066 unsigned base_offset,
2067 const nir_src &offset_src,
2068 unsigned num_components,
2069 unsigned first_component)
2071 struct brw_gs_prog_data *gs_prog_data = brw_gs_prog_data(prog_data);
2073 nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
2074 nir_const_value *offset_const = nir_src_as_const_value(offset_src);
2075 const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
2077 /* TODO: figure out push input layout for invocations == 1 */
2078 /* TODO: make this work with 64-bit inputs */
2079 if (gs_prog_data->invocations == 1 &&
2080 type_sz(dst.type) <= 4 &&
2081 offset_const != NULL && vertex_const != NULL &&
2082 4 * (base_offset + offset_const->u32[0]) < push_reg_count) {
2083 int imm_offset = (base_offset + offset_const->u32[0]) * 4 +
2084 vertex_const->u32[0] * push_reg_count;
2085 for (unsigned i = 0; i < num_components; i++) {
2086 bld.MOV(offset(dst, bld, i),
2087 fs_reg(ATTR, imm_offset + i + first_component, dst.type));
2092 /* Resort to the pull model. Ensure the VUE handles are provided. */
2093 assert(gs_prog_data->base.include_vue_handles);
2095 unsigned first_icp_handle = gs_prog_data->include_primitive_id ? 3 : 2;
2096 fs_reg icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2098 if (gs_prog_data->invocations == 1) {
2100 /* The vertex index is constant; just select the proper URB handle. */
2102 retype(brw_vec8_grf(first_icp_handle + vertex_const->i32[0], 0),
2103 BRW_REGISTER_TYPE_UD);
2105 /* The vertex index is non-constant. We need to use indirect
2106 * addressing to fetch the proper URB handle.
2108 * First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
2109 * indicating that channel <n> should read the handle from
2110 * DWord <n>. We convert that to bytes by multiplying by 4.
2112 * Next, we convert the vertex index to bytes by multiplying
2113 * by 32 (shifting by 5), and add the two together. This is
2114 * the final indirect byte offset.
2116 fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_UW, 1);
2117 fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2118 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2119 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2121 /* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
2122 bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
2123 /* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
2124 bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
2125 /* Convert vertex_index to bytes (multiply by 32) */
2126 bld.SHL(vertex_offset_bytes,
2127 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2129 bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
2131 /* Use first_icp_handle as the base offset. There is one register
2132 * of URB handles per vertex, so inform the register allocator that
2133 * we might read up to nir->info.gs.vertices_in registers.
2135 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2136 retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
2137 fs_reg(icp_offset_bytes),
2138 brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
2141 assert(gs_prog_data->invocations > 1);
2144 assert(devinfo->gen >= 9 || vertex_const->i32[0] <= 5);
2146 retype(brw_vec1_grf(first_icp_handle +
2147 vertex_const->i32[0] / 8,
2148 vertex_const->i32[0] % 8),
2149 BRW_REGISTER_TYPE_UD));
2151 /* The vertex index is non-constant. We need to use indirect
2152 * addressing to fetch the proper URB handle.
2155 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2157 /* Convert vertex_index to bytes (multiply by 4) */
2158 bld.SHL(icp_offset_bytes,
2159 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2162 /* Use first_icp_handle as the base offset. There is one DWord
2163 * of URB handles per vertex, so inform the register allocator that
2164 * we might read up to ceil(nir->info.gs.vertices_in / 8) registers.
2166 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2167 retype(brw_vec8_grf(first_icp_handle, 0), icp_handle.type),
2168 fs_reg(icp_offset_bytes),
2169 brw_imm_ud(DIV_ROUND_UP(nir->info.gs.vertices_in, 8) *
2176 fs_reg tmp_dst = dst;
2177 fs_reg indirect_offset = get_nir_src(offset_src);
2178 unsigned num_iterations = 1;
2179 unsigned orig_num_components = num_components;
2181 if (type_sz(dst.type) == 8) {
2182 if (num_components > 2) {
2186 fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dst.type);
2188 first_component = first_component / 2;
2191 for (unsigned iter = 0; iter < num_iterations; iter++) {
2193 /* Constant indexing - use global offset. */
2194 if (first_component != 0) {
2195 unsigned read_components = num_components + first_component;
2196 fs_reg tmp = bld.vgrf(dst.type, read_components);
2197 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, icp_handle);
2198 inst->size_written = read_components *
2199 tmp.component_size(inst->exec_size);
2200 for (unsigned i = 0; i < num_components; i++) {
2201 bld.MOV(offset(tmp_dst, bld, i),
2202 offset(tmp, bld, i + first_component));
2205 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp_dst,
2207 inst->size_written = num_components *
2208 tmp_dst.component_size(inst->exec_size);
2210 inst->offset = base_offset + offset_const->u32[0];
2213 /* Indirect indexing - use per-slot offsets as well. */
2214 const fs_reg srcs[] = { icp_handle, indirect_offset };
2215 unsigned read_components = num_components + first_component;
2216 fs_reg tmp = bld.vgrf(dst.type, read_components);
2217 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2218 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2219 if (first_component != 0) {
2220 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2222 inst->size_written = read_components *
2223 tmp.component_size(inst->exec_size);
2224 for (unsigned i = 0; i < num_components; i++) {
2225 bld.MOV(offset(tmp_dst, bld, i),
2226 offset(tmp, bld, i + first_component));
2229 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp_dst,
2231 inst->size_written = num_components *
2232 tmp_dst.component_size(inst->exec_size);
2234 inst->offset = base_offset;
2238 if (type_sz(dst.type) == 8) {
2239 shuffle_from_32bit_read(bld,
2240 offset(dst, bld, iter * 2),
2241 retype(tmp_dst, BRW_REGISTER_TYPE_D),
2246 if (num_iterations > 1) {
2247 num_components = orig_num_components - 2;
2251 fs_reg new_indirect = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2252 bld.ADD(new_indirect, indirect_offset, brw_imm_ud(1u));
2253 indirect_offset = new_indirect;
2260 fs_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
2262 nir_src *offset_src = nir_get_io_offset_src(instr);
2263 nir_const_value *const_value = nir_src_as_const_value(*offset_src);
2266 /* The only constant offset we should find is 0. brw_nir.c's
2267 * add_const_offset_to_base() will fold other constant offsets
2268 * into instr->const_index[0].
2270 assert(const_value->u32[0] == 0);
2274 return get_nir_src(*offset_src);
2278 do_untyped_vector_read(const fs_builder &bld,
2280 const fs_reg surf_index,
2281 const fs_reg offset_reg,
2282 unsigned num_components)
2284 if (type_sz(dest.type) <= 2) {
2285 assert(dest.stride == 1);
2286 boolean is_const_offset = offset_reg.file == BRW_IMMEDIATE_VALUE;
2288 if (is_const_offset) {
2289 uint32_t start = offset_reg.ud & ~3;
2290 uint32_t end = offset_reg.ud + num_components * type_sz(dest.type);
2291 end = ALIGN(end, 4);
2292 assert (end - start <= 16);
2294 /* At this point we have 16-bit component/s that have constant
2295 * offset aligned to 4-bytes that can be read with untyped_reads.
2296 * untyped_read message requires 32-bit aligned offsets.
2298 unsigned first_component = (offset_reg.ud & 3) / type_sz(dest.type);
2299 unsigned num_components_32bit = (end - start) / 4;
2301 fs_reg read_result =
2302 emit_untyped_read(bld, surf_index, brw_imm_ud(start),
2304 num_components_32bit,
2305 BRW_PREDICATE_NONE);
2306 shuffle_from_32bit_read(bld, dest, read_result, first_component,
2309 fs_reg read_offset = bld.vgrf(BRW_REGISTER_TYPE_UD);
2310 for (unsigned i = 0; i < num_components; i++) {
2312 bld.MOV(read_offset, offset_reg);
2314 bld.ADD(read_offset, offset_reg,
2315 brw_imm_ud(i * type_sz(dest.type)));
2317 /* Non constant offsets are not guaranteed to be aligned 32-bits
2318 * so they are read using one byte_scattered_read message
2319 * for each component.
2321 fs_reg read_result =
2322 emit_byte_scattered_read(bld, surf_index, read_offset,
2324 type_sz(dest.type) * 8 /* bit_size */,
2325 BRW_PREDICATE_NONE);
2326 bld.MOV(offset(dest, bld, i),
2327 subscript (read_result, dest.type, 0));
2330 } else if (type_sz(dest.type) == 4) {
2331 fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
2334 BRW_PREDICATE_NONE);
2335 read_result.type = dest.type;
2336 for (unsigned i = 0; i < num_components; i++)
2337 bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
2338 } else if (type_sz(dest.type) == 8) {
2339 /* Reading a dvec, so we need to:
2341 * 1. Multiply num_components by 2, to account for the fact that we
2342 * need to read 64-bit components.
2343 * 2. Shuffle the result of the load to form valid 64-bit elements
2344 * 3. Emit a second load (for components z/w) if needed.
2346 fs_reg read_offset = bld.vgrf(BRW_REGISTER_TYPE_UD);
2347 bld.MOV(read_offset, offset_reg);
2349 int iters = num_components <= 2 ? 1 : 2;
2351 /* Load the dvec, the first iteration loads components x/y, the second
2352 * iteration, if needed, loads components z/w
2354 for (int it = 0; it < iters; it++) {
2355 /* Compute number of components to read in this iteration */
2356 int iter_components = MIN2(2, num_components);
2357 num_components -= iter_components;
2359 /* Read. Since this message reads 32-bit components, we need to
2360 * read twice as many components.
2362 fs_reg read_result = emit_untyped_read(bld, surf_index, read_offset,
2364 iter_components * 2,
2365 BRW_PREDICATE_NONE);
2367 /* Shuffle the 32-bit load result into valid 64-bit data */
2368 shuffle_from_32bit_read(bld, offset(dest, bld, it * 2),
2369 read_result, 0, iter_components);
2371 bld.ADD(read_offset, read_offset, brw_imm_ud(16));
2374 unreachable("Unsupported type");
2379 fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
2380 nir_intrinsic_instr *instr)
2382 assert(stage == MESA_SHADER_VERTEX);
2385 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2386 dest = get_nir_dest(instr->dest);
2388 switch (instr->intrinsic) {
2389 case nir_intrinsic_load_vertex_id:
2390 case nir_intrinsic_load_base_vertex:
2391 unreachable("should be lowered by nir_lower_system_values()");
2393 case nir_intrinsic_load_input: {
2394 fs_reg src = fs_reg(ATTR, nir_intrinsic_base(instr) * 4, dest.type);
2395 unsigned first_component = nir_intrinsic_component(instr);
2396 unsigned num_components = instr->num_components;
2398 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
2399 assert(const_offset && "Indirect input loads not allowed");
2400 src = offset(src, bld, const_offset->u32[0]);
2402 if (type_sz(dest.type) == 8)
2403 first_component /= 2;
2405 /* For 16-bit support maybe a temporary will be needed to copy from
2408 shuffle_from_32bit_read(bld, dest, retype(src, BRW_REGISTER_TYPE_D),
2409 first_component, num_components);
2413 case nir_intrinsic_load_vertex_id_zero_base:
2414 case nir_intrinsic_load_instance_id:
2415 case nir_intrinsic_load_base_instance:
2416 case nir_intrinsic_load_draw_id:
2417 case nir_intrinsic_load_first_vertex:
2418 case nir_intrinsic_load_is_indexed_draw:
2419 unreachable("lowered by brw_nir_lower_vs_inputs");
2422 nir_emit_intrinsic(bld, instr);
2428 fs_visitor::nir_emit_tcs_intrinsic(const fs_builder &bld,
2429 nir_intrinsic_instr *instr)
2431 assert(stage == MESA_SHADER_TESS_CTRL);
2432 struct brw_tcs_prog_key *tcs_key = (struct brw_tcs_prog_key *) key;
2433 struct brw_tcs_prog_data *tcs_prog_data = brw_tcs_prog_data(prog_data);
2436 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2437 dst = get_nir_dest(instr->dest);
2439 switch (instr->intrinsic) {
2440 case nir_intrinsic_load_primitive_id:
2441 bld.MOV(dst, fs_reg(brw_vec1_grf(0, 1)));
2443 case nir_intrinsic_load_invocation_id:
2444 bld.MOV(retype(dst, invocation_id.type), invocation_id);
2446 case nir_intrinsic_load_patch_vertices_in:
2447 bld.MOV(retype(dst, BRW_REGISTER_TYPE_D),
2448 brw_imm_d(tcs_key->input_vertices));
2451 case nir_intrinsic_barrier: {
2452 if (tcs_prog_data->instances == 1)
2455 fs_reg m0 = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2456 fs_reg m0_2 = component(m0, 2);
2458 const fs_builder chanbld = bld.exec_all().group(1, 0);
2460 /* Zero the message header */
2461 bld.exec_all().MOV(m0, brw_imm_ud(0u));
2463 /* Copy "Barrier ID" from r0.2, bits 16:13 */
2464 chanbld.AND(m0_2, retype(brw_vec1_grf(0, 2), BRW_REGISTER_TYPE_UD),
2465 brw_imm_ud(INTEL_MASK(16, 13)));
2467 /* Shift it up to bits 27:24. */
2468 chanbld.SHL(m0_2, m0_2, brw_imm_ud(11));
2470 /* Set the Barrier Count and the enable bit */
2471 chanbld.OR(m0_2, m0_2,
2472 brw_imm_ud(tcs_prog_data->instances << 9 | (1 << 15)));
2474 bld.emit(SHADER_OPCODE_BARRIER, bld.null_reg_ud(), m0);
2478 case nir_intrinsic_load_input:
2479 unreachable("nir_lower_io should never give us these.");
2482 case nir_intrinsic_load_per_vertex_input: {
2483 fs_reg indirect_offset = get_indirect_offset(instr);
2484 unsigned imm_offset = instr->const_index[0];
2486 const nir_src &vertex_src = instr->src[0];
2487 nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
2494 /* Emit a MOV to resolve <0,1,0> regioning. */
2495 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2497 retype(brw_vec1_grf(1 + (vertex_const->i32[0] >> 3),
2498 vertex_const->i32[0] & 7),
2499 BRW_REGISTER_TYPE_UD));
2500 } else if (tcs_prog_data->instances == 1 &&
2501 vertex_src.is_ssa &&
2502 vertex_src.ssa->parent_instr->type == nir_instr_type_intrinsic &&
2503 nir_instr_as_intrinsic(vertex_src.ssa->parent_instr)->intrinsic == nir_intrinsic_load_invocation_id) {
2504 /* For the common case of only 1 instance, an array index of
2505 * gl_InvocationID means reading g1. Skip all the indirect work.
2507 icp_handle = retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD);
2509 /* The vertex index is non-constant. We need to use indirect
2510 * addressing to fetch the proper URB handle.
2512 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2514 /* Each ICP handle is a single DWord (4 bytes) */
2515 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2516 bld.SHL(vertex_offset_bytes,
2517 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
2520 /* Start at g1. We might read up to 4 registers. */
2521 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
2522 retype(brw_vec8_grf(1, 0), icp_handle.type), vertex_offset_bytes,
2523 brw_imm_ud(4 * REG_SIZE));
2526 /* We can only read two double components with each URB read, so
2527 * we send two read messages in that case, each one loading up to
2528 * two double components.
2530 unsigned num_iterations = 1;
2531 unsigned num_components = instr->num_components;
2532 unsigned first_component = nir_intrinsic_component(instr);
2533 fs_reg orig_dst = dst;
2534 if (type_sz(dst.type) == 8) {
2535 first_component = first_component / 2;
2536 if (instr->num_components > 2) {
2541 fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dst.type);
2545 for (unsigned iter = 0; iter < num_iterations; iter++) {
2546 if (indirect_offset.file == BAD_FILE) {
2547 /* Constant indexing - use global offset. */
2548 if (first_component != 0) {
2549 unsigned read_components = num_components + first_component;
2550 fs_reg tmp = bld.vgrf(dst.type, read_components);
2551 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp, icp_handle);
2552 for (unsigned i = 0; i < num_components; i++) {
2553 bld.MOV(offset(dst, bld, i),
2554 offset(tmp, bld, i + first_component));
2557 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
2559 inst->offset = imm_offset;
2562 /* Indirect indexing - use per-slot offsets as well. */
2563 const fs_reg srcs[] = { icp_handle, indirect_offset };
2564 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2565 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2566 if (first_component != 0) {
2567 unsigned read_components = num_components + first_component;
2568 fs_reg tmp = bld.vgrf(dst.type, read_components);
2569 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2571 for (unsigned i = 0; i < num_components; i++) {
2572 bld.MOV(offset(dst, bld, i),
2573 offset(tmp, bld, i + first_component));
2576 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst,
2579 inst->offset = imm_offset;
2582 inst->size_written = (num_components + first_component) *
2583 inst->dst.component_size(inst->exec_size);
2585 /* If we are reading 64-bit data using 32-bit read messages we need
2586 * build proper 64-bit data elements by shuffling the low and high
2587 * 32-bit components around like we do for other things like UBOs
2590 if (type_sz(dst.type) == 8) {
2591 shuffle_from_32bit_read(bld,
2592 offset(orig_dst, bld, iter * 2),
2593 retype(dst, BRW_REGISTER_TYPE_D),
2597 /* Copy the temporary to the destination to deal with writemasking.
2599 * Also attempt to deal with gl_PointSize being in the .w component.
2601 if (inst->offset == 0 && indirect_offset.file == BAD_FILE) {
2602 assert(type_sz(dst.type) < 8);
2603 inst->dst = bld.vgrf(dst.type, 4);
2604 inst->size_written = 4 * REG_SIZE;
2605 bld.MOV(dst, offset(inst->dst, bld, 3));
2608 /* If we are loading double data and we need a second read message
2609 * adjust the write offset
2611 if (num_iterations > 1) {
2612 num_components = instr->num_components - 2;
2619 case nir_intrinsic_load_output:
2620 case nir_intrinsic_load_per_vertex_output: {
2621 fs_reg indirect_offset = get_indirect_offset(instr);
2622 unsigned imm_offset = instr->const_index[0];
2623 unsigned first_component = nir_intrinsic_component(instr);
2626 if (indirect_offset.file == BAD_FILE) {
2627 /* Replicate the patch handle to all enabled channels */
2628 fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2629 bld.MOV(patch_handle,
2630 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD));
2633 if (first_component != 0) {
2634 unsigned read_components =
2635 instr->num_components + first_component;
2636 fs_reg tmp = bld.vgrf(dst.type, read_components);
2637 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
2639 inst->size_written = read_components * REG_SIZE;
2640 for (unsigned i = 0; i < instr->num_components; i++) {
2641 bld.MOV(offset(dst, bld, i),
2642 offset(tmp, bld, i + first_component));
2645 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst,
2647 inst->size_written = instr->num_components * REG_SIZE;
2649 inst->offset = imm_offset;
2653 /* Indirect indexing - use per-slot offsets as well. */
2654 const fs_reg srcs[] = {
2655 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
2658 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2659 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2660 if (first_component != 0) {
2661 unsigned read_components =
2662 instr->num_components + first_component;
2663 fs_reg tmp = bld.vgrf(dst.type, read_components);
2664 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2666 inst->size_written = read_components * REG_SIZE;
2667 for (unsigned i = 0; i < instr->num_components; i++) {
2668 bld.MOV(offset(dst, bld, i),
2669 offset(tmp, bld, i + first_component));
2672 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst,
2674 inst->size_written = instr->num_components * REG_SIZE;
2676 inst->offset = imm_offset;
2682 case nir_intrinsic_store_output:
2683 case nir_intrinsic_store_per_vertex_output: {
2684 fs_reg value = get_nir_src(instr->src[0]);
2685 bool is_64bit = (instr->src[0].is_ssa ?
2686 instr->src[0].ssa->bit_size : instr->src[0].reg.reg->bit_size) == 64;
2687 fs_reg indirect_offset = get_indirect_offset(instr);
2688 unsigned imm_offset = instr->const_index[0];
2689 unsigned mask = instr->const_index[1];
2690 unsigned header_regs = 0;
2692 srcs[header_regs++] = retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD);
2694 if (indirect_offset.file != BAD_FILE) {
2695 srcs[header_regs++] = indirect_offset;
2701 unsigned num_components = util_last_bit(mask);
2704 /* We can only pack two 64-bit components in a single message, so send
2705 * 2 messages if we have more components
2707 unsigned num_iterations = 1;
2708 unsigned iter_components = num_components;
2709 unsigned first_component = nir_intrinsic_component(instr);
2711 first_component = first_component / 2;
2712 if (instr->num_components > 2) {
2714 iter_components = 2;
2718 mask = mask << first_component;
2720 for (unsigned iter = 0; iter < num_iterations; iter++) {
2721 if (!is_64bit && mask != WRITEMASK_XYZW) {
2722 srcs[header_regs++] = brw_imm_ud(mask << 16);
2723 opcode = indirect_offset.file != BAD_FILE ?
2724 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
2725 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
2726 } else if (is_64bit && ((mask & WRITEMASK_XY) != WRITEMASK_XY)) {
2727 /* Expand the 64-bit mask to 32-bit channels. We only handle
2728 * two channels in each iteration, so we only care about X/Y.
2730 unsigned mask32 = 0;
2731 if (mask & WRITEMASK_X)
2732 mask32 |= WRITEMASK_XY;
2733 if (mask & WRITEMASK_Y)
2734 mask32 |= WRITEMASK_ZW;
2736 /* If the mask does not include any of the channels X or Y there
2737 * is nothing to do in this iteration. Move on to the next couple
2738 * of 64-bit channels.
2746 srcs[header_regs++] = brw_imm_ud(mask32 << 16);
2747 opcode = indirect_offset.file != BAD_FILE ?
2748 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT :
2749 SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
2751 opcode = indirect_offset.file != BAD_FILE ?
2752 SHADER_OPCODE_URB_WRITE_SIMD8_PER_SLOT :
2753 SHADER_OPCODE_URB_WRITE_SIMD8;
2756 for (unsigned i = 0; i < iter_components; i++) {
2757 if (!(mask & (1 << (i + first_component))))
2761 srcs[header_regs + i + first_component] = offset(value, bld, i);
2763 /* We need to shuffle the 64-bit data to match the layout
2764 * expected by our 32-bit URB write messages. We use a temporary
2767 unsigned channel = iter * 2 + i;
2768 fs_reg dest = shuffle_for_32bit_write(bld, value, channel, 1);
2770 srcs[header_regs + (i + first_component) * 2] = dest;
2771 srcs[header_regs + (i + first_component) * 2 + 1] =
2772 offset(dest, bld, 1);
2777 header_regs + (is_64bit ? 2 * iter_components : iter_components) +
2778 (is_64bit ? 2 * first_component : first_component);
2780 bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
2781 bld.LOAD_PAYLOAD(payload, srcs, mlen, header_regs);
2783 fs_inst *inst = bld.emit(opcode, bld.null_reg_ud(), payload);
2784 inst->offset = imm_offset;
2787 /* If this is a 64-bit attribute, select the next two 64-bit channels
2788 * to be handled in the next iteration.
2799 nir_emit_intrinsic(bld, instr);
2805 fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld,
2806 nir_intrinsic_instr *instr)
2808 assert(stage == MESA_SHADER_TESS_EVAL);
2809 struct brw_tes_prog_data *tes_prog_data = brw_tes_prog_data(prog_data);
2812 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2813 dest = get_nir_dest(instr->dest);
2815 switch (instr->intrinsic) {
2816 case nir_intrinsic_load_primitive_id:
2817 bld.MOV(dest, fs_reg(brw_vec1_grf(0, 1)));
2819 case nir_intrinsic_load_tess_coord:
2820 /* gl_TessCoord is part of the payload in g1-3 */
2821 for (unsigned i = 0; i < 3; i++) {
2822 bld.MOV(offset(dest, bld, i), fs_reg(brw_vec8_grf(1 + i, 0)));
2826 case nir_intrinsic_load_input:
2827 case nir_intrinsic_load_per_vertex_input: {
2828 fs_reg indirect_offset = get_indirect_offset(instr);
2829 unsigned imm_offset = instr->const_index[0];
2830 unsigned first_component = nir_intrinsic_component(instr);
2832 if (type_sz(dest.type) == 8) {
2833 first_component = first_component / 2;
2837 if (indirect_offset.file == BAD_FILE) {
2838 /* Arbitrarily only push up to 32 vec4 slots worth of data,
2839 * which is 16 registers (since each holds 2 vec4 slots).
2841 unsigned slot_count = 1;
2842 if (type_sz(dest.type) == 8 && instr->num_components > 2)
2845 const unsigned max_push_slots = 32;
2846 if (imm_offset + slot_count <= max_push_slots) {
2847 fs_reg src = fs_reg(ATTR, imm_offset / 2, dest.type);
2848 for (int i = 0; i < instr->num_components; i++) {
2849 unsigned comp = 16 / type_sz(dest.type) * (imm_offset % 2) +
2850 i + first_component;
2851 bld.MOV(offset(dest, bld, i), component(src, comp));
2854 tes_prog_data->base.urb_read_length =
2855 MAX2(tes_prog_data->base.urb_read_length,
2856 DIV_ROUND_UP(imm_offset + slot_count, 2));
2858 /* Replicate the patch handle to all enabled channels */
2859 const fs_reg srcs[] = {
2860 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)
2862 fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
2863 bld.LOAD_PAYLOAD(patch_handle, srcs, ARRAY_SIZE(srcs), 0);
2865 if (first_component != 0) {
2866 unsigned read_components =
2867 instr->num_components + first_component;
2868 fs_reg tmp = bld.vgrf(dest.type, read_components);
2869 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, tmp,
2871 inst->size_written = read_components * REG_SIZE;
2872 for (unsigned i = 0; i < instr->num_components; i++) {
2873 bld.MOV(offset(dest, bld, i),
2874 offset(tmp, bld, i + first_component));
2877 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dest,
2879 inst->size_written = instr->num_components * REG_SIZE;
2882 inst->offset = imm_offset;
2885 /* Indirect indexing - use per-slot offsets as well. */
2887 /* We can only read two double components with each URB read, so
2888 * we send two read messages in that case, each one loading up to
2889 * two double components.
2891 unsigned num_iterations = 1;
2892 unsigned num_components = instr->num_components;
2893 fs_reg orig_dest = dest;
2894 if (type_sz(dest.type) == 8) {
2895 if (instr->num_components > 2) {
2899 fs_reg tmp = fs_reg(VGRF, alloc.allocate(4), dest.type);
2903 for (unsigned iter = 0; iter < num_iterations; iter++) {
2904 const fs_reg srcs[] = {
2905 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
2908 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
2909 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
2911 if (first_component != 0) {
2912 unsigned read_components =
2913 num_components + first_component;
2914 fs_reg tmp = bld.vgrf(dest.type, read_components);
2915 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, tmp,
2917 for (unsigned i = 0; i < num_components; i++) {
2918 bld.MOV(offset(dest, bld, i),
2919 offset(tmp, bld, i + first_component));
2922 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dest,
2926 inst->offset = imm_offset;
2927 inst->size_written = (num_components + first_component) *
2928 inst->dst.component_size(inst->exec_size);
2930 /* If we are reading 64-bit data using 32-bit read messages we need
2931 * build proper 64-bit data elements by shuffling the low and high
2932 * 32-bit components around like we do for other things like UBOs
2935 if (type_sz(dest.type) == 8) {
2936 shuffle_from_32bit_read(bld,
2937 offset(orig_dest, bld, iter * 2),
2938 retype(dest, BRW_REGISTER_TYPE_D),
2942 /* If we are loading double data and we need a second read message
2945 if (num_iterations > 1) {
2946 num_components = instr->num_components - 2;
2954 nir_emit_intrinsic(bld, instr);
2960 fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
2961 nir_intrinsic_instr *instr)
2963 assert(stage == MESA_SHADER_GEOMETRY);
2964 fs_reg indirect_offset;
2967 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2968 dest = get_nir_dest(instr->dest);
2970 switch (instr->intrinsic) {
2971 case nir_intrinsic_load_primitive_id:
2972 assert(stage == MESA_SHADER_GEOMETRY);
2973 assert(brw_gs_prog_data(prog_data)->include_primitive_id);
2974 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
2975 retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
2978 case nir_intrinsic_load_input:
2979 unreachable("load_input intrinsics are invalid for the GS stage");
2981 case nir_intrinsic_load_per_vertex_input:
2982 emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
2983 instr->src[1], instr->num_components,
2984 nir_intrinsic_component(instr));
2987 case nir_intrinsic_emit_vertex_with_counter:
2988 emit_gs_vertex(instr->src[0], instr->const_index[0]);
2991 case nir_intrinsic_end_primitive_with_counter:
2992 emit_gs_end_primitive(instr->src[0]);
2995 case nir_intrinsic_set_vertex_count:
2996 bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
2999 case nir_intrinsic_load_invocation_id: {
3000 fs_reg val = nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
3001 assert(val.file != BAD_FILE);
3002 dest.type = val.type;
3008 nir_emit_intrinsic(bld, instr);
3014 * Fetch the current render target layer index.
3017 fetch_render_target_array_index(const fs_builder &bld)
3019 if (bld.shader->devinfo->gen >= 6) {
3020 /* The render target array index is provided in the thread payload as
3021 * bits 26:16 of r0.0.
3023 const fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_UD);
3024 bld.AND(idx, brw_uw1_reg(BRW_GENERAL_REGISTER_FILE, 0, 1),
3028 /* Pre-SNB we only ever render into the first layer of the framebuffer
3029 * since layered rendering is not implemented.
3031 return brw_imm_ud(0);
3036 * Fake non-coherent framebuffer read implemented using TXF to fetch from the
3037 * framebuffer at the current fragment coordinates and sample index.
3040 fs_visitor::emit_non_coherent_fb_read(const fs_builder &bld, const fs_reg &dst,
3043 const struct gen_device_info *devinfo = bld.shader->devinfo;
3045 assert(bld.shader->stage == MESA_SHADER_FRAGMENT);
3046 const brw_wm_prog_key *wm_key =
3047 reinterpret_cast<const brw_wm_prog_key *>(key);
3048 assert(!wm_key->coherent_fb_fetch);
3049 const struct brw_wm_prog_data *wm_prog_data =
3050 brw_wm_prog_data(stage_prog_data);
3052 /* Calculate the surface index relative to the start of the texture binding
3053 * table block, since that's what the texturing messages expect.
3055 const unsigned surface = target +
3056 wm_prog_data->binding_table.render_target_read_start -
3057 wm_prog_data->base.binding_table.texture_start;
3059 brw_mark_surface_used(
3060 bld.shader->stage_prog_data,
3061 wm_prog_data->binding_table.render_target_read_start + target);
3063 /* Calculate the fragment coordinates. */
3064 const fs_reg coords = bld.vgrf(BRW_REGISTER_TYPE_UD, 3);
3065 bld.MOV(offset(coords, bld, 0), pixel_x);
3066 bld.MOV(offset(coords, bld, 1), pixel_y);
3067 bld.MOV(offset(coords, bld, 2), fetch_render_target_array_index(bld));
3069 /* Calculate the sample index and MCS payload when multisampling. Luckily
3070 * the MCS fetch message behaves deterministically for UMS surfaces, so it
3071 * shouldn't be necessary to recompile based on whether the framebuffer is
3074 if (wm_key->multisample_fbo &&
3075 nir_system_values[SYSTEM_VALUE_SAMPLE_ID].file == BAD_FILE)
3076 nir_system_values[SYSTEM_VALUE_SAMPLE_ID] = *emit_sampleid_setup();
3078 const fs_reg sample = nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
3079 const fs_reg mcs = wm_key->multisample_fbo ?
3080 emit_mcs_fetch(coords, 3, brw_imm_ud(surface)) : fs_reg();
3082 /* Use either a normal or a CMS texel fetch message depending on whether
3083 * the framebuffer is single or multisample. On SKL+ use the wide CMS
3084 * message just in case the framebuffer uses 16x multisampling, it should
3085 * be equivalent to the normal CMS fetch for lower multisampling modes.
3087 const opcode op = !wm_key->multisample_fbo ? SHADER_OPCODE_TXF_LOGICAL :
3088 devinfo->gen >= 9 ? SHADER_OPCODE_TXF_CMS_W_LOGICAL :
3089 SHADER_OPCODE_TXF_CMS_LOGICAL;
3091 /* Emit the instruction. */
3092 const fs_reg srcs[] = { coords, fs_reg(), brw_imm_ud(0), fs_reg(),
3094 brw_imm_ud(surface), brw_imm_ud(0),
3095 fs_reg(), brw_imm_ud(3), brw_imm_ud(0) };
3096 STATIC_ASSERT(ARRAY_SIZE(srcs) == TEX_LOGICAL_NUM_SRCS);
3098 fs_inst *inst = bld.emit(op, dst, srcs, ARRAY_SIZE(srcs));
3099 inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
3105 * Actual coherent framebuffer read implemented using the native render target
3106 * read message. Requires SKL+.
3109 emit_coherent_fb_read(const fs_builder &bld, const fs_reg &dst, unsigned target)
3111 assert(bld.shader->devinfo->gen >= 9);
3112 fs_inst *inst = bld.emit(FS_OPCODE_FB_READ_LOGICAL, dst);
3113 inst->target = target;
3114 inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
3120 alloc_temporary(const fs_builder &bld, unsigned size, fs_reg *regs, unsigned n)
3122 if (n && regs[0].file != BAD_FILE) {
3126 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, size);
3128 for (unsigned i = 0; i < n; i++)
3136 alloc_frag_output(fs_visitor *v, unsigned location)
3138 assert(v->stage == MESA_SHADER_FRAGMENT);
3139 const brw_wm_prog_key *const key =
3140 reinterpret_cast<const brw_wm_prog_key *>(v->key);
3141 const unsigned l = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_LOCATION);
3142 const unsigned i = GET_FIELD(location, BRW_NIR_FRAG_OUTPUT_INDEX);
3144 if (i > 0 || (key->force_dual_color_blend && l == FRAG_RESULT_DATA1))
3145 return alloc_temporary(v->bld, 4, &v->dual_src_output, 1);
3147 else if (l == FRAG_RESULT_COLOR)
3148 return alloc_temporary(v->bld, 4, v->outputs,
3149 MAX2(key->nr_color_regions, 1));
3151 else if (l == FRAG_RESULT_DEPTH)
3152 return alloc_temporary(v->bld, 1, &v->frag_depth, 1);
3154 else if (l == FRAG_RESULT_STENCIL)
3155 return alloc_temporary(v->bld, 1, &v->frag_stencil, 1);
3157 else if (l == FRAG_RESULT_SAMPLE_MASK)
3158 return alloc_temporary(v->bld, 1, &v->sample_mask, 1);
3160 else if (l >= FRAG_RESULT_DATA0 &&
3161 l < FRAG_RESULT_DATA0 + BRW_MAX_DRAW_BUFFERS)
3162 return alloc_temporary(v->bld, 4,
3163 &v->outputs[l - FRAG_RESULT_DATA0], 1);
3166 unreachable("Invalid location");
3170 fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
3171 nir_intrinsic_instr *instr)
3173 assert(stage == MESA_SHADER_FRAGMENT);
3176 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3177 dest = get_nir_dest(instr->dest);
3179 switch (instr->intrinsic) {
3180 case nir_intrinsic_load_front_face:
3181 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
3182 *emit_frontfacing_interpolation());
3185 case nir_intrinsic_load_sample_pos: {
3186 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
3187 assert(sample_pos.file != BAD_FILE);
3188 dest.type = sample_pos.type;
3189 bld.MOV(dest, sample_pos);
3190 bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
3194 case nir_intrinsic_load_layer_id:
3195 dest.type = BRW_REGISTER_TYPE_UD;
3196 bld.MOV(dest, fetch_render_target_array_index(bld));
3199 case nir_intrinsic_load_helper_invocation:
3200 case nir_intrinsic_load_sample_mask_in:
3201 case nir_intrinsic_load_sample_id: {
3202 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
3203 fs_reg val = nir_system_values[sv];
3204 assert(val.file != BAD_FILE);
3205 dest.type = val.type;
3210 case nir_intrinsic_store_output: {
3211 const fs_reg src = get_nir_src(instr->src[0]);
3212 const nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3213 assert(const_offset && "Indirect output stores not allowed");
3214 const unsigned location = nir_intrinsic_base(instr) +
3215 SET_FIELD(const_offset->u32[0], BRW_NIR_FRAG_OUTPUT_LOCATION);
3216 const fs_reg new_dest = retype(alloc_frag_output(this, location),
3219 for (unsigned j = 0; j < instr->num_components; j++)
3220 bld.MOV(offset(new_dest, bld, nir_intrinsic_component(instr) + j),
3221 offset(src, bld, j));
3226 case nir_intrinsic_load_output: {
3227 const unsigned l = GET_FIELD(nir_intrinsic_base(instr),
3228 BRW_NIR_FRAG_OUTPUT_LOCATION);
3229 assert(l >= FRAG_RESULT_DATA0);
3230 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3231 assert(const_offset && "Indirect output loads not allowed");
3232 const unsigned target = l - FRAG_RESULT_DATA0 + const_offset->u32[0];
3233 const fs_reg tmp = bld.vgrf(dest.type, 4);
3235 if (reinterpret_cast<const brw_wm_prog_key *>(key)->coherent_fb_fetch)
3236 emit_coherent_fb_read(bld, tmp, target);
3238 emit_non_coherent_fb_read(bld, tmp, target);
3240 for (unsigned j = 0; j < instr->num_components; j++) {
3241 bld.MOV(offset(dest, bld, j),
3242 offset(tmp, bld, nir_intrinsic_component(instr) + j));
3248 case nir_intrinsic_discard:
3249 case nir_intrinsic_discard_if: {
3250 /* We track our discarded pixels in f0.1. By predicating on it, we can
3251 * update just the flag bits that aren't yet discarded. If there's no
3252 * condition, we emit a CMP of g0 != g0, so all currently executing
3253 * channels will get turned off.
3256 if (instr->intrinsic == nir_intrinsic_discard_if) {
3257 cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
3258 brw_imm_d(0), BRW_CONDITIONAL_Z);
3260 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
3261 BRW_REGISTER_TYPE_UW));
3262 cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
3264 cmp->predicate = BRW_PREDICATE_NORMAL;
3265 cmp->flag_subreg = 1;
3267 if (devinfo->gen >= 6) {
3268 emit_discard_jump();
3271 limit_dispatch_width(16, "Fragment discard not implemented in SIMD32 mode.");
3275 case nir_intrinsic_load_input: {
3276 /* load_input is only used for flat inputs */
3277 unsigned base = nir_intrinsic_base(instr);
3278 unsigned comp = nir_intrinsic_component(instr);
3279 unsigned num_components = instr->num_components;
3280 fs_reg orig_dest = dest;
3281 enum brw_reg_type type = dest.type;
3283 /* Special case fields in the VUE header */
3284 if (base == VARYING_SLOT_LAYER)
3286 else if (base == VARYING_SLOT_VIEWPORT)
3289 if (nir_dest_bit_size(instr->dest) == 64) {
3290 /* const_index is in 32-bit type size units that could not be aligned
3291 * with DF. We need to read the double vector as if it was a float
3292 * vector of twice the number of components to fetch the right data.
3294 type = BRW_REGISTER_TYPE_F;
3295 num_components *= 2;
3296 dest = bld.vgrf(type, num_components);
3299 for (unsigned int i = 0; i < num_components; i++) {
3300 bld.MOV(offset(retype(dest, type), bld, i),
3301 retype(component(interp_reg(base, comp + i), 3), type));
3304 if (nir_dest_bit_size(instr->dest) == 64) {
3305 shuffle_from_32bit_read(bld, orig_dest, dest, 0,
3306 instr->num_components);
3311 case nir_intrinsic_load_barycentric_pixel:
3312 case nir_intrinsic_load_barycentric_centroid:
3313 case nir_intrinsic_load_barycentric_sample:
3314 /* Do nothing - load_interpolated_input handling will handle it later. */
3317 case nir_intrinsic_load_barycentric_at_sample: {
3318 const glsl_interp_mode interpolation =
3319 (enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
3321 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
3324 unsigned msg_data = const_sample->i32[0] << 4;
3326 emit_pixel_interpolater_send(bld,
3327 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3330 brw_imm_ud(msg_data),
3333 const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
3334 BRW_REGISTER_TYPE_UD);
3336 if (nir_src_is_dynamically_uniform(instr->src[0])) {
3337 const fs_reg sample_id = bld.emit_uniformize(sample_src);
3338 const fs_reg msg_data = vgrf(glsl_type::uint_type);
3339 bld.exec_all().group(1, 0)
3340 .SHL(msg_data, sample_id, brw_imm_ud(4u));
3341 emit_pixel_interpolater_send(bld,
3342 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3348 /* Make a loop that sends a message to the pixel interpolater
3349 * for the sample number in each live channel. If there are
3350 * multiple channels with the same sample number then these
3351 * will be handled simultaneously with a single interation of
3354 bld.emit(BRW_OPCODE_DO);
3356 /* Get the next live sample number into sample_id_reg */
3357 const fs_reg sample_id = bld.emit_uniformize(sample_src);
3359 /* Set the flag register so that we can perform the send
3360 * message on all channels that have the same sample number
3362 bld.CMP(bld.null_reg_ud(),
3363 sample_src, sample_id,
3364 BRW_CONDITIONAL_EQ);
3365 const fs_reg msg_data = vgrf(glsl_type::uint_type);
3366 bld.exec_all().group(1, 0)
3367 .SHL(msg_data, sample_id, brw_imm_ud(4u));
3369 emit_pixel_interpolater_send(bld,
3370 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
3373 component(msg_data, 0),
3375 set_predicate(BRW_PREDICATE_NORMAL, inst);
3377 /* Continue the loop if there are any live channels left */
3378 set_predicate_inv(BRW_PREDICATE_NORMAL,
3380 bld.emit(BRW_OPCODE_WHILE));
3386 case nir_intrinsic_load_barycentric_at_offset: {
3387 const glsl_interp_mode interpolation =
3388 (enum glsl_interp_mode) nir_intrinsic_interp_mode(instr);
3390 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3393 unsigned off_x = MIN2((int)(const_offset->f32[0] * 16), 7) & 0xf;
3394 unsigned off_y = MIN2((int)(const_offset->f32[1] * 16), 7) & 0xf;
3396 emit_pixel_interpolater_send(bld,
3397 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
3400 brw_imm_ud(off_x | (off_y << 4)),
3403 fs_reg src = vgrf(glsl_type::ivec2_type);
3404 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
3405 BRW_REGISTER_TYPE_F);
3406 for (int i = 0; i < 2; i++) {
3407 fs_reg temp = vgrf(glsl_type::float_type);
3408 bld.MUL(temp, offset(offset_src, bld, i), brw_imm_f(16.0f));
3409 fs_reg itemp = vgrf(glsl_type::int_type);
3411 bld.MOV(itemp, temp);
3413 /* Clamp the upper end of the range to +7/16.
3414 * ARB_gpu_shader5 requires that we support a maximum offset
3415 * of +0.5, which isn't representable in a S0.4 value -- if
3416 * we didn't clamp it, we'd end up with -8/16, which is the
3417 * opposite of what the shader author wanted.
3419 * This is legal due to ARB_gpu_shader5's quantization
3422 * "Not all values of <offset> may be supported; x and y
3423 * offsets may be rounded to fixed-point values with the
3424 * number of fraction bits given by the
3425 * implementation-dependent constant
3426 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
3428 set_condmod(BRW_CONDITIONAL_L,
3429 bld.SEL(offset(src, bld, i), itemp, brw_imm_d(7)));
3432 const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
3433 emit_pixel_interpolater_send(bld,
3443 case nir_intrinsic_load_interpolated_input: {
3444 if (nir_intrinsic_base(instr) == VARYING_SLOT_POS) {
3445 emit_fragcoord_interpolation(dest);
3449 assert(instr->src[0].ssa &&
3450 instr->src[0].ssa->parent_instr->type == nir_instr_type_intrinsic);
3451 nir_intrinsic_instr *bary_intrinsic =
3452 nir_instr_as_intrinsic(instr->src[0].ssa->parent_instr);
3453 nir_intrinsic_op bary_intrin = bary_intrinsic->intrinsic;
3454 enum glsl_interp_mode interp_mode =
3455 (enum glsl_interp_mode) nir_intrinsic_interp_mode(bary_intrinsic);
3458 if (bary_intrin == nir_intrinsic_load_barycentric_at_offset ||
3459 bary_intrin == nir_intrinsic_load_barycentric_at_sample) {
3460 /* Use the result of the PI message */
3461 dst_xy = retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_F);
3463 /* Use the delta_xy values computed from the payload */
3464 enum brw_barycentric_mode bary =
3465 brw_barycentric_mode(interp_mode, bary_intrin);
3467 dst_xy = this->delta_xy[bary];
3470 for (unsigned int i = 0; i < instr->num_components; i++) {
3472 component(interp_reg(nir_intrinsic_base(instr),
3473 nir_intrinsic_component(instr) + i), 0);
3474 interp.type = BRW_REGISTER_TYPE_F;
3475 dest.type = BRW_REGISTER_TYPE_F;
3477 if (devinfo->gen < 6 && interp_mode == INTERP_MODE_SMOOTH) {
3478 fs_reg tmp = vgrf(glsl_type::float_type);
3479 bld.emit(FS_OPCODE_LINTERP, tmp, dst_xy, interp);
3480 bld.MUL(offset(dest, bld, i), tmp, this->pixel_w);
3482 bld.emit(FS_OPCODE_LINTERP, offset(dest, bld, i), dst_xy, interp);
3489 nir_emit_intrinsic(bld, instr);
3495 get_op_for_atomic_add(nir_intrinsic_instr *instr, unsigned src)
3497 const nir_const_value *const val = nir_src_as_const_value(instr->src[src]);
3500 if (val->i32[0] == 1)
3502 else if (val->i32[0] == -1)
3510 fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
3511 nir_intrinsic_instr *instr)
3513 assert(stage == MESA_SHADER_COMPUTE);
3514 struct brw_cs_prog_data *cs_prog_data = brw_cs_prog_data(prog_data);
3517 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3518 dest = get_nir_dest(instr->dest);
3520 switch (instr->intrinsic) {
3521 case nir_intrinsic_barrier:
3523 cs_prog_data->uses_barrier = true;
3526 case nir_intrinsic_load_subgroup_id:
3527 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), subgroup_id);
3530 case nir_intrinsic_load_local_invocation_id:
3531 case nir_intrinsic_load_work_group_id: {
3532 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
3533 fs_reg val = nir_system_values[sv];
3534 assert(val.file != BAD_FILE);
3535 dest.type = val.type;
3536 for (unsigned i = 0; i < 3; i++)
3537 bld.MOV(offset(dest, bld, i), offset(val, bld, i));
3541 case nir_intrinsic_load_num_work_groups: {
3542 const unsigned surface =
3543 cs_prog_data->binding_table.work_groups_start;
3545 cs_prog_data->uses_num_work_groups = true;
3547 fs_reg surf_index = brw_imm_ud(surface);
3548 brw_mark_surface_used(prog_data, surface);
3550 /* Read the 3 GLuint components of gl_NumWorkGroups */
3551 for (unsigned i = 0; i < 3; i++) {
3552 fs_reg read_result =
3553 emit_untyped_read(bld, surf_index,
3555 1 /* dims */, 1 /* size */,
3556 BRW_PREDICATE_NONE);
3557 read_result.type = dest.type;
3558 bld.MOV(dest, read_result);
3559 dest = offset(dest, bld, 1);
3564 case nir_intrinsic_shared_atomic_add:
3565 nir_emit_shared_atomic(bld, get_op_for_atomic_add(instr, 1), instr);
3567 case nir_intrinsic_shared_atomic_imin:
3568 nir_emit_shared_atomic(bld, BRW_AOP_IMIN, instr);
3570 case nir_intrinsic_shared_atomic_umin:
3571 nir_emit_shared_atomic(bld, BRW_AOP_UMIN, instr);
3573 case nir_intrinsic_shared_atomic_imax:
3574 nir_emit_shared_atomic(bld, BRW_AOP_IMAX, instr);
3576 case nir_intrinsic_shared_atomic_umax:
3577 nir_emit_shared_atomic(bld, BRW_AOP_UMAX, instr);
3579 case nir_intrinsic_shared_atomic_and:
3580 nir_emit_shared_atomic(bld, BRW_AOP_AND, instr);
3582 case nir_intrinsic_shared_atomic_or:
3583 nir_emit_shared_atomic(bld, BRW_AOP_OR, instr);
3585 case nir_intrinsic_shared_atomic_xor:
3586 nir_emit_shared_atomic(bld, BRW_AOP_XOR, instr);
3588 case nir_intrinsic_shared_atomic_exchange:
3589 nir_emit_shared_atomic(bld, BRW_AOP_MOV, instr);
3591 case nir_intrinsic_shared_atomic_comp_swap:
3592 nir_emit_shared_atomic(bld, BRW_AOP_CMPWR, instr);
3594 case nir_intrinsic_shared_atomic_fmin:
3595 nir_emit_shared_atomic_float(bld, BRW_AOP_FMIN, instr);
3597 case nir_intrinsic_shared_atomic_fmax:
3598 nir_emit_shared_atomic_float(bld, BRW_AOP_FMAX, instr);
3600 case nir_intrinsic_shared_atomic_fcomp_swap:
3601 nir_emit_shared_atomic_float(bld, BRW_AOP_FCMPWR, instr);
3604 case nir_intrinsic_load_shared: {
3605 assert(devinfo->gen >= 7);
3607 fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
3609 /* Get the offset to read from */
3611 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
3613 offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
3615 offset_reg = vgrf(glsl_type::uint_type);
3617 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
3618 brw_imm_ud(instr->const_index[0]));
3621 /* Read the vector */
3622 do_untyped_vector_read(bld, dest, surf_index, offset_reg,
3623 instr->num_components);
3627 case nir_intrinsic_store_shared: {
3628 assert(devinfo->gen >= 7);
3631 fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
3634 fs_reg val_reg = get_nir_src(instr->src[0]);
3637 unsigned writemask = instr->const_index[1];
3639 /* get_nir_src() retypes to integer. Be wary of 64-bit types though
3640 * since the untyped writes below operate in units of 32-bits, which
3641 * means that we need to write twice as many components each time.
3642 * Also, we have to suffle 64-bit data to be in the appropriate layout
3643 * expected by our 32-bit write messages.
3645 unsigned type_size = 4;
3646 if (nir_src_bit_size(instr->src[0]) == 64) {
3648 val_reg = shuffle_for_32bit_write(bld, val_reg, 0,
3649 instr->num_components);
3652 unsigned type_slots = type_size / 4;
3654 /* Combine groups of consecutive enabled channels in one write
3655 * message. We use ffs to find the first enabled channel and then ffs on
3656 * the bit-inverse, down-shifted writemask to determine the length of
3657 * the block of enabled bits.
3660 unsigned first_component = ffs(writemask) - 1;
3661 unsigned length = ffs(~(writemask >> first_component)) - 1;
3663 /* We can't write more than 2 64-bit components at once. Limit the
3664 * length of the write to what we can do and let the next iteration
3668 length = MIN2(2, length);
3671 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
3673 offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u32[0] +
3674 type_size * first_component);
3676 offset_reg = vgrf(glsl_type::uint_type);
3678 retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD),
3679 brw_imm_ud(instr->const_index[0] + type_size * first_component));
3682 emit_untyped_write(bld, surf_index, offset_reg,
3683 offset(val_reg, bld, first_component * type_slots),
3684 1 /* dims */, length * type_slots,
3685 BRW_PREDICATE_NONE);
3687 /* Clear the bits in the writemask that we just wrote, then try
3688 * again to see if more channels are left.
3690 writemask &= (15 << (first_component + length));
3697 nir_emit_intrinsic(bld, instr);
3703 brw_nir_reduction_op_identity(const fs_builder &bld,
3704 nir_op op, brw_reg_type type)
3706 nir_const_value value = nir_alu_binop_identity(op, type_sz(type) * 8);
3707 switch (type_sz(type)) {
3709 assert(type != BRW_REGISTER_TYPE_HF);
3710 return retype(brw_imm_uw(value.u16[0]), type);
3712 return retype(brw_imm_ud(value.u32[0]), type);
3714 if (type == BRW_REGISTER_TYPE_DF)
3715 return setup_imm_df(bld, value.f64[0]);
3717 return retype(brw_imm_u64(value.u64[0]), type);
3719 unreachable("Invalid type size");
3724 brw_op_for_nir_reduction_op(nir_op op)
3727 case nir_op_iadd: return BRW_OPCODE_ADD;
3728 case nir_op_fadd: return BRW_OPCODE_ADD;
3729 case nir_op_imul: return BRW_OPCODE_MUL;
3730 case nir_op_fmul: return BRW_OPCODE_MUL;
3731 case nir_op_imin: return BRW_OPCODE_SEL;
3732 case nir_op_umin: return BRW_OPCODE_SEL;
3733 case nir_op_fmin: return BRW_OPCODE_SEL;
3734 case nir_op_imax: return BRW_OPCODE_SEL;
3735 case nir_op_umax: return BRW_OPCODE_SEL;
3736 case nir_op_fmax: return BRW_OPCODE_SEL;
3737 case nir_op_iand: return BRW_OPCODE_AND;
3738 case nir_op_ior: return BRW_OPCODE_OR;
3739 case nir_op_ixor: return BRW_OPCODE_XOR;
3741 unreachable("Invalid reduction operation");
3745 static brw_conditional_mod
3746 brw_cond_mod_for_nir_reduction_op(nir_op op)
3749 case nir_op_iadd: return BRW_CONDITIONAL_NONE;
3750 case nir_op_fadd: return BRW_CONDITIONAL_NONE;
3751 case nir_op_imul: return BRW_CONDITIONAL_NONE;
3752 case nir_op_fmul: return BRW_CONDITIONAL_NONE;
3753 case nir_op_imin: return BRW_CONDITIONAL_L;
3754 case nir_op_umin: return BRW_CONDITIONAL_L;
3755 case nir_op_fmin: return BRW_CONDITIONAL_L;
3756 case nir_op_imax: return BRW_CONDITIONAL_GE;
3757 case nir_op_umax: return BRW_CONDITIONAL_GE;
3758 case nir_op_fmax: return BRW_CONDITIONAL_GE;
3759 case nir_op_iand: return BRW_CONDITIONAL_NONE;
3760 case nir_op_ior: return BRW_CONDITIONAL_NONE;
3761 case nir_op_ixor: return BRW_CONDITIONAL_NONE;
3763 unreachable("Invalid reduction operation");
3768 fs_visitor::get_nir_image_intrinsic_image(const brw::fs_builder &bld,
3769 nir_intrinsic_instr *instr)
3771 fs_reg image = retype(get_nir_src_imm(instr->src[0]), BRW_REGISTER_TYPE_UD);
3773 if (stage_prog_data->binding_table.image_start > 0) {
3774 if (image.file == BRW_IMMEDIATE_VALUE) {
3775 image.d += stage_prog_data->binding_table.image_start;
3777 bld.ADD(image, image,
3778 brw_imm_d(stage_prog_data->binding_table.image_start));
3782 return bld.emit_uniformize(image);
3786 image_intrinsic_coord_components(nir_intrinsic_instr *instr)
3788 switch (nir_intrinsic_image_dim(instr)) {
3789 case GLSL_SAMPLER_DIM_1D:
3790 return 1 + nir_intrinsic_image_array(instr);
3791 case GLSL_SAMPLER_DIM_2D:
3792 case GLSL_SAMPLER_DIM_RECT:
3793 return 2 + nir_intrinsic_image_array(instr);
3794 case GLSL_SAMPLER_DIM_3D:
3795 case GLSL_SAMPLER_DIM_CUBE:
3797 case GLSL_SAMPLER_DIM_BUF:
3799 case GLSL_SAMPLER_DIM_MS:
3800 return 2 + nir_intrinsic_image_array(instr);
3802 unreachable("Invalid image dimension");
3807 fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
3810 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
3811 dest = get_nir_dest(instr->dest);
3813 switch (instr->intrinsic) {
3814 case nir_intrinsic_image_load:
3815 case nir_intrinsic_image_store:
3816 case nir_intrinsic_image_atomic_add:
3817 case nir_intrinsic_image_atomic_min:
3818 case nir_intrinsic_image_atomic_max:
3819 case nir_intrinsic_image_atomic_and:
3820 case nir_intrinsic_image_atomic_or:
3821 case nir_intrinsic_image_atomic_xor:
3822 case nir_intrinsic_image_atomic_exchange:
3823 case nir_intrinsic_image_atomic_comp_swap: {
3824 if (stage == MESA_SHADER_FRAGMENT &&
3825 instr->intrinsic != nir_intrinsic_image_load)
3826 brw_wm_prog_data(prog_data)->has_side_effects = true;
3828 /* Get some metadata from the image intrinsic. */
3829 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
3830 const unsigned dims = image_intrinsic_coord_components(instr);
3831 const GLenum format = nir_intrinsic_format(instr);
3832 const unsigned dest_components = nir_intrinsic_dest_components(instr);
3834 /* Get the arguments of the image intrinsic. */
3835 const fs_reg image = get_nir_image_intrinsic_image(bld, instr);
3836 const fs_reg coords = retype(get_nir_src(instr->src[1]),
3837 BRW_REGISTER_TYPE_UD);
3840 /* Emit an image load, store or atomic op. */
3841 if (instr->intrinsic == nir_intrinsic_image_load) {
3842 tmp = emit_typed_read(bld, image, coords, dims,
3843 instr->num_components);
3844 } else if (instr->intrinsic == nir_intrinsic_image_store) {
3845 const fs_reg src0 = get_nir_src(instr->src[3]);
3846 emit_typed_write(bld, image, coords, src0, dims,
3847 instr->num_components);
3850 unsigned num_srcs = info->num_srcs;
3852 switch (instr->intrinsic) {
3853 case nir_intrinsic_image_atomic_add:
3854 assert(num_srcs == 4);
3856 op = get_op_for_atomic_add(instr, 3);
3858 if (op != BRW_AOP_ADD)
3861 case nir_intrinsic_image_atomic_min:
3862 assert(format == GL_R32UI || format == GL_R32I);
3863 op = (format == GL_R32I) ? BRW_AOP_IMIN : BRW_AOP_UMIN;
3865 case nir_intrinsic_image_atomic_max:
3866 assert(format == GL_R32UI || format == GL_R32I);
3867 op = (format == GL_R32I) ? BRW_AOP_IMAX : BRW_AOP_UMAX;
3869 case nir_intrinsic_image_atomic_and:
3872 case nir_intrinsic_image_atomic_or:
3875 case nir_intrinsic_image_atomic_xor:
3878 case nir_intrinsic_image_atomic_exchange:
3881 case nir_intrinsic_image_atomic_comp_swap:
3885 unreachable("Not reachable.");
3888 const fs_reg src0 = (num_srcs >= 4 ?
3889 get_nir_src(instr->src[3]) : fs_reg());
3890 const fs_reg src1 = (num_srcs >= 5 ?
3891 get_nir_src(instr->src[4]) : fs_reg());
3893 tmp = emit_typed_atomic(bld, image, coords, src0, src1, dims, 1, op);
3896 /* Assign the result. */
3897 for (unsigned c = 0; c < dest_components; ++c) {
3898 bld.MOV(offset(retype(dest, tmp.type), bld, c),
3899 offset(tmp, bld, c));
3904 case nir_intrinsic_image_size: {
3905 /* Unlike the [un]typed load and store opcodes, the TXS that this turns
3906 * into will handle the binding table index for us in the geneerator.
3908 fs_reg image = retype(get_nir_src_imm(instr->src[0]),
3909 BRW_REGISTER_TYPE_UD);
3910 image = bld.emit_uniformize(image);
3912 /* Since the image size is always uniform, we can just emit a SIMD8
3913 * query instruction and splat the result out.
3915 const fs_builder ubld = bld.exec_all().group(8, 0);
3917 /* The LOD also serves as the message payload */
3918 fs_reg lod = ubld.vgrf(BRW_REGISTER_TYPE_UD);
3919 ubld.MOV(lod, brw_imm_ud(0));
3921 fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4);
3922 fs_inst *inst = ubld.emit(SHADER_OPCODE_IMAGE_SIZE, tmp, lod, image);
3924 inst->size_written = 4 * REG_SIZE;
3926 for (unsigned c = 0; c < instr->dest.ssa.num_components; ++c) {
3927 if (c == 2 && nir_intrinsic_image_dim(instr) == GLSL_SAMPLER_DIM_CUBE) {
3928 bld.emit(SHADER_OPCODE_INT_QUOTIENT,
3929 offset(retype(dest, tmp.type), bld, c),
3930 component(offset(tmp, ubld, c), 0), brw_imm_ud(6));
3932 bld.MOV(offset(retype(dest, tmp.type), bld, c),
3933 component(offset(tmp, ubld, c), 0));
3939 case nir_intrinsic_image_load_raw_intel: {
3940 const fs_reg image = get_nir_image_intrinsic_image(bld, instr);
3941 const fs_reg addr = retype(get_nir_src(instr->src[1]),
3942 BRW_REGISTER_TYPE_UD);
3944 fs_reg tmp = emit_untyped_read(bld, image, addr, 1,
3945 instr->num_components);
3947 for (unsigned c = 0; c < instr->num_components; ++c) {
3948 bld.MOV(offset(retype(dest, tmp.type), bld, c),
3949 offset(tmp, bld, c));
3954 case nir_intrinsic_image_store_raw_intel: {
3955 const fs_reg image = get_nir_image_intrinsic_image(bld, instr);
3956 const fs_reg addr = retype(get_nir_src(instr->src[1]),
3957 BRW_REGISTER_TYPE_UD);
3958 const fs_reg data = retype(get_nir_src(instr->src[2]),
3959 BRW_REGISTER_TYPE_UD);
3961 brw_wm_prog_data(prog_data)->has_side_effects = true;
3963 emit_untyped_write(bld, image, addr, data, 1,
3964 instr->num_components);
3968 case nir_intrinsic_group_memory_barrier:
3969 case nir_intrinsic_memory_barrier_shared:
3970 case nir_intrinsic_memory_barrier_atomic_counter:
3971 case nir_intrinsic_memory_barrier_buffer:
3972 case nir_intrinsic_memory_barrier_image:
3973 case nir_intrinsic_memory_barrier: {
3974 const fs_builder ubld = bld.group(8, 0);
3975 const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
3976 ubld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
3977 ->size_written = 2 * REG_SIZE;
3981 case nir_intrinsic_shader_clock: {
3982 /* We cannot do anything if there is an event, so ignore it for now */
3983 const fs_reg shader_clock = get_timestamp(bld);
3984 const fs_reg srcs[] = { component(shader_clock, 0),
3985 component(shader_clock, 1) };
3986 bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
3990 case nir_intrinsic_image_samples:
3991 /* The driver does not support multi-sampled images. */
3992 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1));
3995 case nir_intrinsic_load_uniform: {
3996 /* Offsets are in bytes but they should always aligned to
3999 assert(instr->const_index[0] % 4 == 0 ||
4000 instr->const_index[0] % type_sz(dest.type) == 0);
4002 fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type);
4004 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
4006 assert(const_offset->u32[0] % type_sz(dest.type) == 0);
4007 /* For 16-bit types we add the module of the const_index[0]
4008 * offset to access to not 32-bit aligned element
4010 src.offset = const_offset->u32[0] + instr->const_index[0] % 4;
4012 for (unsigned j = 0; j < instr->num_components; j++) {
4013 bld.MOV(offset(dest, bld, j), offset(src, bld, j));
4016 fs_reg indirect = retype(get_nir_src(instr->src[0]),
4017 BRW_REGISTER_TYPE_UD);
4019 /* We need to pass a size to the MOV_INDIRECT but we don't want it to
4020 * go past the end of the uniform. In order to keep the n'th
4021 * component from running past, we subtract off the size of all but
4022 * one component of the vector.
4024 assert(instr->const_index[1] >=
4025 instr->num_components * (int) type_sz(dest.type));
4026 unsigned read_size = instr->const_index[1] -
4027 (instr->num_components - 1) * type_sz(dest.type);
4029 bool supports_64bit_indirects =
4030 !devinfo->is_cherryview && !gen_device_info_is_9lp(devinfo);
4032 if (type_sz(dest.type) != 8 || supports_64bit_indirects) {
4033 for (unsigned j = 0; j < instr->num_components; j++) {
4034 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
4035 offset(dest, bld, j), offset(src, bld, j),
4036 indirect, brw_imm_ud(read_size));
4039 const unsigned num_mov_indirects =
4040 type_sz(dest.type) / type_sz(BRW_REGISTER_TYPE_UD);
4041 /* We read a little bit less per MOV INDIRECT, as they are now
4042 * 32-bits ones instead of 64-bit. Fix read_size then.
4044 const unsigned read_size_32bit = read_size -
4045 (num_mov_indirects - 1) * type_sz(BRW_REGISTER_TYPE_UD);
4046 for (unsigned j = 0; j < instr->num_components; j++) {
4047 for (unsigned i = 0; i < num_mov_indirects; i++) {
4048 bld.emit(SHADER_OPCODE_MOV_INDIRECT,
4049 subscript(offset(dest, bld, j), BRW_REGISTER_TYPE_UD, i),
4050 subscript(offset(src, bld, j), BRW_REGISTER_TYPE_UD, i),
4051 indirect, brw_imm_ud(read_size_32bit));
4059 case nir_intrinsic_load_ubo: {
4060 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
4064 const unsigned index = stage_prog_data->binding_table.ubo_start +
4065 const_index->u32[0];
4066 surf_index = brw_imm_ud(index);
4067 brw_mark_surface_used(prog_data, index);
4069 /* The block index is not a constant. Evaluate the index expression
4070 * per-channel and add the base UBO index; we have to select a value
4071 * from any live channel.
4073 surf_index = vgrf(glsl_type::uint_type);
4074 bld.ADD(surf_index, get_nir_src(instr->src[0]),
4075 brw_imm_ud(stage_prog_data->binding_table.ubo_start));
4076 surf_index = bld.emit_uniformize(surf_index);
4078 /* Assume this may touch any UBO. It would be nice to provide
4079 * a tighter bound, but the array information is already lowered away.
4081 brw_mark_surface_used(prog_data,
4082 stage_prog_data->binding_table.ubo_start +
4083 nir->info.num_ubos - 1);
4086 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
4087 if (const_offset == NULL) {
4088 fs_reg base_offset = retype(get_nir_src(instr->src[1]),
4089 BRW_REGISTER_TYPE_UD);
4091 for (int i = 0; i < instr->num_components; i++)
4092 VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
4093 base_offset, i * type_sz(dest.type));
4095 /* Even if we are loading doubles, a pull constant load will load
4096 * a 32-bit vec4, so should only reserve vgrf space for that. If we
4097 * need to load a full dvec4 we will have to emit 2 loads. This is
4098 * similar to demote_pull_constants(), except that in that case we
4099 * see individual accesses to each component of the vector and then
4100 * we let CSE deal with duplicate loads. Here we see a vector access
4101 * and we have to split it if necessary.
4103 const unsigned type_size = type_sz(dest.type);
4105 /* See if we've selected this as a push constant candidate */
4107 const unsigned ubo_block = const_index->u32[0];
4108 const unsigned offset_256b = const_offset->u32[0] / 32;
4111 for (int i = 0; i < 4; i++) {
4112 const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
4113 if (range->block == ubo_block &&
4114 offset_256b >= range->start &&
4115 offset_256b < range->start + range->length) {
4117 push_reg = fs_reg(UNIFORM, UBO_START + i, dest.type);
4118 push_reg.offset = const_offset->u32[0] - 32 * range->start;
4123 if (push_reg.file != BAD_FILE) {
4124 for (unsigned i = 0; i < instr->num_components; i++) {
4125 bld.MOV(offset(dest, bld, i),
4126 byte_offset(push_reg, i * type_size));
4132 const unsigned block_sz = 64; /* Fetch one cacheline at a time. */
4133 const fs_builder ubld = bld.exec_all().group(block_sz / 4, 0);
4134 const fs_reg packed_consts = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4136 for (unsigned c = 0; c < instr->num_components;) {
4137 const unsigned base = const_offset->u32[0] + c * type_size;
4138 /* Number of usable components in the next block-aligned load. */
4139 const unsigned count = MIN2(instr->num_components - c,
4140 (block_sz - base % block_sz) / type_size);
4142 ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
4143 packed_consts, surf_index,
4144 brw_imm_ud(base & ~(block_sz - 1)));
4146 const fs_reg consts =
4147 retype(byte_offset(packed_consts, base & (block_sz - 1)),
4150 for (unsigned d = 0; d < count; d++)
4151 bld.MOV(offset(dest, bld, c + d), component(consts, d));
4159 case nir_intrinsic_load_ssbo: {
4160 assert(devinfo->gen >= 7);
4162 nir_const_value *const_uniform_block =
4163 nir_src_as_const_value(instr->src[0]);
4166 if (const_uniform_block) {
4167 unsigned index = stage_prog_data->binding_table.ssbo_start +
4168 const_uniform_block->u32[0];
4169 surf_index = brw_imm_ud(index);
4170 brw_mark_surface_used(prog_data, index);
4172 surf_index = vgrf(glsl_type::uint_type);
4173 bld.ADD(surf_index, get_nir_src(instr->src[0]),
4174 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
4176 /* Assume this may touch any UBO. It would be nice to provide
4177 * a tighter bound, but the array information is already lowered away.
4179 brw_mark_surface_used(prog_data,
4180 stage_prog_data->binding_table.ssbo_start +
4181 nir->info.num_ssbos - 1);
4185 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
4187 offset_reg = brw_imm_ud(const_offset->u32[0]);
4189 offset_reg = retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD);
4192 /* Read the vector */
4193 do_untyped_vector_read(bld, dest, surf_index, offset_reg,
4194 instr->num_components);
4199 case nir_intrinsic_store_ssbo: {
4200 assert(devinfo->gen >= 7);
4202 if (stage == MESA_SHADER_FRAGMENT)
4203 brw_wm_prog_data(prog_data)->has_side_effects = true;
4207 nir_const_value *const_uniform_block =
4208 nir_src_as_const_value(instr->src[1]);
4209 if (const_uniform_block) {
4210 unsigned index = stage_prog_data->binding_table.ssbo_start +
4211 const_uniform_block->u32[0];
4212 surf_index = brw_imm_ud(index);
4213 brw_mark_surface_used(prog_data, index);
4215 surf_index = vgrf(glsl_type::uint_type);
4216 bld.ADD(surf_index, get_nir_src(instr->src[1]),
4217 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
4219 brw_mark_surface_used(prog_data,
4220 stage_prog_data->binding_table.ssbo_start +
4221 nir->info.num_ssbos - 1);
4225 fs_reg val_reg = get_nir_src(instr->src[0]);
4228 unsigned writemask = instr->const_index[0];
4230 /* get_nir_src() retypes to integer. Be wary of 64-bit types though
4231 * since the untyped writes below operate in units of 32-bits, which
4232 * means that we need to write twice as many components each time.
4233 * Also, we have to suffle 64-bit data to be in the appropriate layout
4234 * expected by our 32-bit write messages.
4236 unsigned bit_size = nir_src_bit_size(instr->src[0]);
4237 unsigned type_size = bit_size / 8;
4239 /* Combine groups of consecutive enabled channels in one write
4240 * message. We use ffs to find the first enabled channel and then ffs on
4241 * the bit-inverse, down-shifted writemask to determine the num_components
4242 * of the block of enabled bits.
4245 unsigned first_component = ffs(writemask) - 1;
4246 unsigned num_components = ffs(~(writemask >> first_component)) - 1;
4247 fs_reg write_src = offset(val_reg, bld, first_component);
4249 nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
4251 if (type_size > 4) {
4252 /* We can't write more than 2 64-bit components at once. Limit
4253 * the num_components of the write to what we can do and let the next
4254 * iteration handle the rest.
4256 num_components = MIN2(2, num_components);
4257 write_src = shuffle_for_32bit_write(bld, write_src, 0,
4259 } else if (type_size < 4) {
4260 /* For 16-bit types we pack two consecutive values into a 32-bit
4261 * word and use an untyped write message. For single values or not
4262 * 32-bit-aligned we need to use byte-scattered writes because
4263 * untyped writes works with 32-bit components with 32-bit
4264 * alignment. byte_scattered_write messages only support one
4265 * 16-bit component at a time. As VK_KHR_relaxed_block_layout
4266 * could be enabled we can not guarantee that not constant offsets
4267 * to be 32-bit aligned for 16-bit types. For example an array, of
4268 * 16-bit vec3 with array element stride of 6.
4270 * In the case of 32-bit aligned constant offsets if there is
4271 * a 3-components vector we submit one untyped-write message
4272 * of 32-bit (first two components), and one byte-scattered
4273 * write message (the last component).
4276 if ( !const_offset || ((const_offset->u32[0] +
4277 type_size * first_component) % 4)) {
4278 /* If we use a .yz writemask we also need to emit 2
4279 * byte-scattered write messages because of y-component not
4280 * being aligned to 32-bit.
4283 } else if (num_components * type_size > 4 &&
4284 (num_components * type_size % 4)) {
4285 /* If the pending components size is not a multiple of 4 bytes
4286 * we left the not aligned components for following emits of
4287 * length == 1 with byte_scattered_write.
4289 num_components -= (num_components * type_size % 4) / type_size;
4290 } else if (num_components * type_size < 4) {
4293 /* For num_components == 1 we are also shuffling the component
4294 * because byte scattered writes of 16-bit need values to be dword
4295 * aligned. Shuffling only one component would be the same as
4298 write_src = shuffle_for_32bit_write(bld, write_src, 0,
4305 offset_reg = brw_imm_ud(const_offset->u32[0] +
4306 type_size * first_component);
4308 offset_reg = vgrf(glsl_type::uint_type);
4310 retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
4311 brw_imm_ud(type_size * first_component));
4314 if (type_size < 4 && num_components == 1) {
4315 /* Untyped Surface messages have a fixed 32-bit size, so we need
4316 * to rely on byte scattered in order to write 16-bit elements.
4317 * The byte_scattered_write message needs that every written 16-bit
4318 * type to be aligned 32-bits (stride=2).
4320 emit_byte_scattered_write(bld, surf_index, offset_reg,
4324 BRW_PREDICATE_NONE);
4326 assert(num_components * type_size <= 16);
4327 assert((num_components * type_size) % 4 == 0);
4328 assert(offset_reg.file != BRW_IMMEDIATE_VALUE ||
4329 offset_reg.ud % 4 == 0);
4330 unsigned num_slots = (num_components * type_size) / 4;
4332 emit_untyped_write(bld, surf_index, offset_reg,
4334 1 /* dims */, num_slots,
4335 BRW_PREDICATE_NONE);
4338 /* Clear the bits in the writemask that we just wrote, then try
4339 * again to see if more channels are left.
4341 writemask &= (15 << (first_component + num_components));
4346 case nir_intrinsic_store_output: {
4347 fs_reg src = get_nir_src(instr->src[0]);
4349 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
4350 assert(const_offset && "Indirect output stores not allowed");
4352 unsigned num_components = instr->num_components;
4353 unsigned first_component = nir_intrinsic_component(instr);
4354 if (nir_src_bit_size(instr->src[0]) == 64) {
4355 src = shuffle_for_32bit_write(bld, src, 0, num_components);
4356 num_components *= 2;
4359 fs_reg new_dest = retype(offset(outputs[instr->const_index[0]], bld,
4360 4 * const_offset->u32[0]), src.type);
4361 for (unsigned j = 0; j < num_components; j++) {
4362 bld.MOV(offset(new_dest, bld, j + first_component),
4363 offset(src, bld, j));
4368 case nir_intrinsic_ssbo_atomic_add:
4369 nir_emit_ssbo_atomic(bld, get_op_for_atomic_add(instr, 2), instr);
4371 case nir_intrinsic_ssbo_atomic_imin:
4372 nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
4374 case nir_intrinsic_ssbo_atomic_umin:
4375 nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
4377 case nir_intrinsic_ssbo_atomic_imax:
4378 nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
4380 case nir_intrinsic_ssbo_atomic_umax:
4381 nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
4383 case nir_intrinsic_ssbo_atomic_and:
4384 nir_emit_ssbo_atomic(bld, BRW_AOP_AND, instr);
4386 case nir_intrinsic_ssbo_atomic_or:
4387 nir_emit_ssbo_atomic(bld, BRW_AOP_OR, instr);
4389 case nir_intrinsic_ssbo_atomic_xor:
4390 nir_emit_ssbo_atomic(bld, BRW_AOP_XOR, instr);
4392 case nir_intrinsic_ssbo_atomic_exchange:
4393 nir_emit_ssbo_atomic(bld, BRW_AOP_MOV, instr);
4395 case nir_intrinsic_ssbo_atomic_comp_swap:
4396 nir_emit_ssbo_atomic(bld, BRW_AOP_CMPWR, instr);
4398 case nir_intrinsic_ssbo_atomic_fmin:
4399 nir_emit_ssbo_atomic_float(bld, BRW_AOP_FMIN, instr);
4401 case nir_intrinsic_ssbo_atomic_fmax:
4402 nir_emit_ssbo_atomic_float(bld, BRW_AOP_FMAX, instr);
4404 case nir_intrinsic_ssbo_atomic_fcomp_swap:
4405 nir_emit_ssbo_atomic_float(bld, BRW_AOP_FCMPWR, instr);
4408 case nir_intrinsic_get_buffer_size: {
4409 nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
4410 unsigned ssbo_index = const_uniform_block ? const_uniform_block->u32[0] : 0;
4412 /* A resinfo's sampler message is used to get the buffer size. The
4413 * SIMD8's writeback message consists of four registers and SIMD16's
4414 * writeback message consists of 8 destination registers (two per each
4415 * component). Because we are only interested on the first channel of
4416 * the first returned component, where resinfo returns the buffer size
4417 * for SURFTYPE_BUFFER, we can just use the SIMD8 variant regardless of
4418 * the dispatch width.
4420 const fs_builder ubld = bld.exec_all().group(8, 0);
4421 fs_reg src_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4422 fs_reg ret_payload = ubld.vgrf(BRW_REGISTER_TYPE_UD, 4);
4425 ubld.MOV(src_payload, brw_imm_d(0));
4427 const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
4428 fs_inst *inst = ubld.emit(SHADER_OPCODE_GET_BUFFER_SIZE, ret_payload,
4429 src_payload, brw_imm_ud(index));
4430 inst->header_size = 0;
4432 inst->size_written = 4 * REG_SIZE;
4434 /* SKL PRM, vol07, 3D Media GPGPU Engine, Bounds Checking and Faulting:
4436 * "Out-of-bounds checking is always performed at a DWord granularity. If
4437 * any part of the DWord is out-of-bounds then the whole DWord is
4438 * considered out-of-bounds."
4440 * This implies that types with size smaller than 4-bytes need to be
4441 * padded if they don't complete the last dword of the buffer. But as we
4442 * need to maintain the original size we need to reverse the padding
4443 * calculation to return the correct size to know the number of elements
4444 * of an unsized array. As we stored in the last two bits of the surface
4445 * size the needed padding for the buffer, we calculate here the
4446 * original buffer_size reversing the surface_size calculation:
4448 * surface_size = isl_align(buffer_size, 4) +
4449 * (isl_align(buffer_size) - buffer_size)
4451 * buffer_size = surface_size & ~3 - surface_size & 3
4454 fs_reg size_aligned4 = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4455 fs_reg size_padding = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4456 fs_reg buffer_size = ubld.vgrf(BRW_REGISTER_TYPE_UD);
4458 ubld.AND(size_padding, ret_payload, brw_imm_ud(3));
4459 ubld.AND(size_aligned4, ret_payload, brw_imm_ud(~3));
4460 ubld.ADD(buffer_size, size_aligned4, negate(size_padding));
4462 bld.MOV(retype(dest, ret_payload.type), component(buffer_size, 0));
4464 brw_mark_surface_used(prog_data, index);
4468 case nir_intrinsic_load_subgroup_invocation:
4469 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
4470 nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION]);
4473 case nir_intrinsic_load_subgroup_eq_mask:
4474 case nir_intrinsic_load_subgroup_ge_mask:
4475 case nir_intrinsic_load_subgroup_gt_mask:
4476 case nir_intrinsic_load_subgroup_le_mask:
4477 case nir_intrinsic_load_subgroup_lt_mask:
4478 unreachable("not reached");
4480 case nir_intrinsic_vote_any: {
4481 const fs_builder ubld = bld.exec_all().group(1, 0);
4483 /* The any/all predicates do not consider channel enables. To prevent
4484 * dead channels from affecting the result, we initialize the flag with
4485 * with the identity value for the logical operation.
4487 if (dispatch_width == 32) {
4488 /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
4489 ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
4492 ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0));
4494 bld.CMP(bld.null_reg_d(), get_nir_src(instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
4496 /* For some reason, the any/all predicates don't work properly with
4497 * SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
4498 * doesn't read the correct subset of the flag register and you end up
4499 * getting garbage in the second half. Work around this by using a pair
4500 * of 1-wide MOVs and scattering the result.
4502 fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
4503 ubld.MOV(res1, brw_imm_d(0));
4504 set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ANY8H :
4505 dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ANY16H :
4506 BRW_PREDICATE_ALIGN1_ANY32H,
4507 ubld.MOV(res1, brw_imm_d(-1)));
4509 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
4512 case nir_intrinsic_vote_all: {
4513 const fs_builder ubld = bld.exec_all().group(1, 0);
4515 /* The any/all predicates do not consider channel enables. To prevent
4516 * dead channels from affecting the result, we initialize the flag with
4517 * with the identity value for the logical operation.
4519 if (dispatch_width == 32) {
4520 /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
4521 ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
4522 brw_imm_ud(0xffffffff));
4524 ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
4526 bld.CMP(bld.null_reg_d(), get_nir_src(instr->src[0]), brw_imm_d(0), BRW_CONDITIONAL_NZ);
4528 /* For some reason, the any/all predicates don't work properly with
4529 * SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
4530 * doesn't read the correct subset of the flag register and you end up
4531 * getting garbage in the second half. Work around this by using a pair
4532 * of 1-wide MOVs and scattering the result.
4534 fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
4535 ubld.MOV(res1, brw_imm_d(0));
4536 set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ALL8H :
4537 dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
4538 BRW_PREDICATE_ALIGN1_ALL32H,
4539 ubld.MOV(res1, brw_imm_d(-1)));
4541 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
4544 case nir_intrinsic_vote_feq:
4545 case nir_intrinsic_vote_ieq: {
4546 fs_reg value = get_nir_src(instr->src[0]);
4547 if (instr->intrinsic == nir_intrinsic_vote_feq) {
4548 const unsigned bit_size = nir_src_bit_size(instr->src[0]);
4549 value.type = brw_reg_type_from_bit_size(bit_size, BRW_REGISTER_TYPE_F);
4552 fs_reg uniformized = bld.emit_uniformize(value);
4553 const fs_builder ubld = bld.exec_all().group(1, 0);
4555 /* The any/all predicates do not consider channel enables. To prevent
4556 * dead channels from affecting the result, we initialize the flag with
4557 * with the identity value for the logical operation.
4559 if (dispatch_width == 32) {
4560 /* For SIMD32, we use a UD type so we fill both f0.0 and f0.1. */
4561 ubld.MOV(retype(brw_flag_reg(0, 0), BRW_REGISTER_TYPE_UD),
4562 brw_imm_ud(0xffffffff));
4564 ubld.MOV(brw_flag_reg(0, 0), brw_imm_uw(0xffff));
4566 bld.CMP(bld.null_reg_d(), value, uniformized, BRW_CONDITIONAL_Z);
4568 /* For some reason, the any/all predicates don't work properly with
4569 * SIMD32. In particular, it appears that a SEL with a QtrCtrl of 2H
4570 * doesn't read the correct subset of the flag register and you end up
4571 * getting garbage in the second half. Work around this by using a pair
4572 * of 1-wide MOVs and scattering the result.
4574 fs_reg res1 = ubld.vgrf(BRW_REGISTER_TYPE_D);
4575 ubld.MOV(res1, brw_imm_d(0));
4576 set_predicate(dispatch_width == 8 ? BRW_PREDICATE_ALIGN1_ALL8H :
4577 dispatch_width == 16 ? BRW_PREDICATE_ALIGN1_ALL16H :
4578 BRW_PREDICATE_ALIGN1_ALL32H,
4579 ubld.MOV(res1, brw_imm_d(-1)));
4581 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), component(res1, 0));
4585 case nir_intrinsic_ballot: {
4586 const fs_reg value = retype(get_nir_src(instr->src[0]),
4587 BRW_REGISTER_TYPE_UD);
4588 struct brw_reg flag = brw_flag_reg(0, 0);
4589 /* FIXME: For SIMD32 programs, this causes us to stomp on f0.1 as well
4590 * as f0.0. This is a problem for fragment programs as we currently use
4591 * f0.1 for discards. Fortunately, we don't support SIMD32 fragment
4592 * programs yet so this isn't a problem. When we do, something will
4595 if (dispatch_width == 32)
4596 flag.type = BRW_REGISTER_TYPE_UD;
4598 bld.exec_all().group(1, 0).MOV(flag, brw_imm_ud(0u));
4599 bld.CMP(bld.null_reg_ud(), value, brw_imm_ud(0u), BRW_CONDITIONAL_NZ);
4601 if (instr->dest.ssa.bit_size > 32) {
4602 dest.type = BRW_REGISTER_TYPE_UQ;
4604 dest.type = BRW_REGISTER_TYPE_UD;
4606 bld.MOV(dest, flag);
4610 case nir_intrinsic_read_invocation: {
4611 const fs_reg value = get_nir_src(instr->src[0]);
4612 const fs_reg invocation = get_nir_src(instr->src[1]);
4613 fs_reg tmp = bld.vgrf(value.type);
4615 bld.exec_all().emit(SHADER_OPCODE_BROADCAST, tmp, value,
4616 bld.emit_uniformize(invocation));
4618 bld.MOV(retype(dest, value.type), fs_reg(component(tmp, 0)));
4622 case nir_intrinsic_read_first_invocation: {
4623 const fs_reg value = get_nir_src(instr->src[0]);
4624 bld.MOV(retype(dest, value.type), bld.emit_uniformize(value));
4628 case nir_intrinsic_shuffle: {
4629 const fs_reg value = get_nir_src(instr->src[0]);
4630 const fs_reg index = get_nir_src(instr->src[1]);
4632 bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, index);
4636 case nir_intrinsic_first_invocation: {
4637 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD);
4638 bld.exec_all().emit(SHADER_OPCODE_FIND_LIVE_CHANNEL, tmp);
4639 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
4640 fs_reg(component(tmp, 0)));
4644 case nir_intrinsic_quad_broadcast: {
4645 const fs_reg value = get_nir_src(instr->src[0]);
4646 nir_const_value *index = nir_src_as_const_value(instr->src[1]);
4647 assert(nir_src_bit_size(instr->src[1]) == 32);
4649 bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, retype(dest, value.type),
4650 value, brw_imm_ud(index->u32[0]), brw_imm_ud(4));
4654 case nir_intrinsic_quad_swap_horizontal: {
4655 const fs_reg value = get_nir_src(instr->src[0]);
4656 const fs_reg tmp = bld.vgrf(value.type);
4657 const fs_builder ubld = bld.exec_all().group(dispatch_width / 2, 0);
4659 const fs_reg src_left = horiz_stride(value, 2);
4660 const fs_reg src_right = horiz_stride(horiz_offset(value, 1), 2);
4661 const fs_reg tmp_left = horiz_stride(tmp, 2);
4662 const fs_reg tmp_right = horiz_stride(horiz_offset(tmp, 1), 2);
4664 /* From the Cherryview PRM Vol. 7, "Register Region Restrictiosn":
4666 * "When source or destination datatype is 64b or operation is
4667 * integer DWord multiply, regioning in Align1 must follow
4672 * 3. Source and Destination offset must be the same, except
4673 * the case of scalar source."
4675 * In order to work around this, we have to emit two 32-bit MOVs instead
4676 * of a single 64-bit MOV to do the shuffle.
4678 if (type_sz(value.type) > 4 &&
4679 (devinfo->is_cherryview || gen_device_info_is_9lp(devinfo))) {
4680 ubld.MOV(subscript(tmp_left, BRW_REGISTER_TYPE_D, 0),
4681 subscript(src_right, BRW_REGISTER_TYPE_D, 0));
4682 ubld.MOV(subscript(tmp_left, BRW_REGISTER_TYPE_D, 1),
4683 subscript(src_right, BRW_REGISTER_TYPE_D, 1));
4684 ubld.MOV(subscript(tmp_right, BRW_REGISTER_TYPE_D, 0),
4685 subscript(src_left, BRW_REGISTER_TYPE_D, 0));
4686 ubld.MOV(subscript(tmp_right, BRW_REGISTER_TYPE_D, 1),
4687 subscript(src_left, BRW_REGISTER_TYPE_D, 1));
4689 ubld.MOV(tmp_left, src_right);
4690 ubld.MOV(tmp_right, src_left);
4692 bld.MOV(retype(dest, value.type), tmp);
4696 case nir_intrinsic_quad_swap_vertical: {
4697 const fs_reg value = get_nir_src(instr->src[0]);
4698 if (nir_src_bit_size(instr->src[0]) == 32) {
4699 /* For 32-bit, we can use a SIMD4x2 instruction to do this easily */
4700 const fs_reg tmp = bld.vgrf(value.type);
4701 const fs_builder ubld = bld.exec_all();
4702 ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
4703 brw_imm_ud(BRW_SWIZZLE4(2,3,0,1)));
4704 bld.MOV(retype(dest, value.type), tmp);
4706 /* For larger data types, we have to either emit dispatch_width many
4707 * MOVs or else fall back to doing indirects.
4709 fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
4710 bld.XOR(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
4712 bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, idx);
4717 case nir_intrinsic_quad_swap_diagonal: {
4718 const fs_reg value = get_nir_src(instr->src[0]);
4719 if (nir_src_bit_size(instr->src[0]) == 32) {
4720 /* For 32-bit, we can use a SIMD4x2 instruction to do this easily */
4721 const fs_reg tmp = bld.vgrf(value.type);
4722 const fs_builder ubld = bld.exec_all();
4723 ubld.emit(SHADER_OPCODE_QUAD_SWIZZLE, tmp, value,
4724 brw_imm_ud(BRW_SWIZZLE4(3,2,1,0)));
4725 bld.MOV(retype(dest, value.type), tmp);
4727 /* For larger data types, we have to either emit dispatch_width many
4728 * MOVs or else fall back to doing indirects.
4730 fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
4731 bld.XOR(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
4733 bld.emit(SHADER_OPCODE_SHUFFLE, retype(dest, value.type), value, idx);
4738 case nir_intrinsic_reduce: {
4739 fs_reg src = get_nir_src(instr->src[0]);
4740 nir_op redop = (nir_op)nir_intrinsic_reduction_op(instr);
4741 unsigned cluster_size = nir_intrinsic_cluster_size(instr);
4742 if (cluster_size == 0 || cluster_size > dispatch_width)
4743 cluster_size = dispatch_width;
4745 /* Figure out the source type */
4746 src.type = brw_type_for_nir_type(devinfo,
4747 (nir_alu_type)(nir_op_infos[redop].input_types[0] |
4748 nir_src_bit_size(instr->src[0])));
4750 fs_reg identity = brw_nir_reduction_op_identity(bld, redop, src.type);
4751 opcode brw_op = brw_op_for_nir_reduction_op(redop);
4752 brw_conditional_mod cond_mod = brw_cond_mod_for_nir_reduction_op(redop);
4754 /* Set up a register for all of our scratching around and initialize it
4755 * to reduction operation's identity value.
4757 fs_reg scan = bld.vgrf(src.type);
4758 bld.exec_all().emit(SHADER_OPCODE_SEL_EXEC, scan, src, identity);
4760 bld.emit_scan(brw_op, scan, cluster_size, cond_mod);
4762 dest.type = src.type;
4763 if (cluster_size * type_sz(src.type) >= REG_SIZE * 2) {
4764 /* In this case, CLUSTER_BROADCAST instruction isn't needed because
4765 * the distance between clusters is at least 2 GRFs. In this case,
4766 * we don't need the weird striding of the CLUSTER_BROADCAST
4767 * instruction and can just do regular MOVs.
4769 assert((cluster_size * type_sz(src.type)) % (REG_SIZE * 2) == 0);
4770 const unsigned groups =
4771 (dispatch_width * type_sz(src.type)) / (REG_SIZE * 2);
4772 const unsigned group_size = dispatch_width / groups;
4773 for (unsigned i = 0; i < groups; i++) {
4774 const unsigned cluster = (i * group_size) / cluster_size;
4775 const unsigned comp = cluster * cluster_size + (cluster_size - 1);
4776 bld.group(group_size, i).MOV(horiz_offset(dest, i * group_size),
4777 component(scan, comp));
4780 bld.emit(SHADER_OPCODE_CLUSTER_BROADCAST, dest, scan,
4781 brw_imm_ud(cluster_size - 1), brw_imm_ud(cluster_size));
4786 case nir_intrinsic_inclusive_scan:
4787 case nir_intrinsic_exclusive_scan: {
4788 fs_reg src = get_nir_src(instr->src[0]);
4789 nir_op redop = (nir_op)nir_intrinsic_reduction_op(instr);
4791 /* Figure out the source type */
4792 src.type = brw_type_for_nir_type(devinfo,
4793 (nir_alu_type)(nir_op_infos[redop].input_types[0] |
4794 nir_src_bit_size(instr->src[0])));
4796 fs_reg identity = brw_nir_reduction_op_identity(bld, redop, src.type);
4797 opcode brw_op = brw_op_for_nir_reduction_op(redop);
4798 brw_conditional_mod cond_mod = brw_cond_mod_for_nir_reduction_op(redop);
4800 /* Set up a register for all of our scratching around and initialize it
4801 * to reduction operation's identity value.
4803 fs_reg scan = bld.vgrf(src.type);
4804 const fs_builder allbld = bld.exec_all();
4805 allbld.emit(SHADER_OPCODE_SEL_EXEC, scan, src, identity);
4807 if (instr->intrinsic == nir_intrinsic_exclusive_scan) {
4808 /* Exclusive scan is a bit harder because we have to do an annoying
4809 * shift of the contents before we can begin. To make things worse,
4810 * we can't do this with a normal stride; we have to use indirects.
4812 fs_reg shifted = bld.vgrf(src.type);
4813 fs_reg idx = bld.vgrf(BRW_REGISTER_TYPE_W);
4814 allbld.ADD(idx, nir_system_values[SYSTEM_VALUE_SUBGROUP_INVOCATION],
4816 allbld.emit(SHADER_OPCODE_SHUFFLE, shifted, scan, idx);
4817 allbld.group(1, 0).MOV(component(shifted, 0), identity);
4821 bld.emit_scan(brw_op, scan, dispatch_width, cond_mod);
4823 bld.MOV(retype(dest, src.type), scan);
4827 case nir_intrinsic_begin_fragment_shader_ordering:
4828 case nir_intrinsic_begin_invocation_interlock: {
4829 const fs_builder ubld = bld.group(8, 0);
4830 const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
4832 ubld.emit(SHADER_OPCODE_INTERLOCK, tmp)->size_written = 2 *
4838 case nir_intrinsic_end_invocation_interlock: {
4839 /* We don't need to do anything here */
4844 unreachable("unknown intrinsic");
4849 fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
4850 int op, nir_intrinsic_instr *instr)
4852 if (stage == MESA_SHADER_FRAGMENT)
4853 brw_wm_prog_data(prog_data)->has_side_effects = true;
4856 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4857 dest = get_nir_dest(instr->dest);
4860 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
4861 if (const_surface) {
4862 unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
4863 const_surface->u32[0];
4864 surface = brw_imm_ud(surf_index);
4865 brw_mark_surface_used(prog_data, surf_index);
4867 surface = vgrf(glsl_type::uint_type);
4868 bld.ADD(surface, get_nir_src(instr->src[0]),
4869 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
4871 /* Assume this may touch any SSBO. This is the same we do for other
4872 * UBO/SSBO accesses with non-constant surface.
4874 brw_mark_surface_used(prog_data,
4875 stage_prog_data->binding_table.ssbo_start +
4876 nir->info.num_ssbos - 1);
4879 fs_reg offset = get_nir_src(instr->src[1]);
4881 if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
4882 data1 = get_nir_src(instr->src[2]);
4884 if (op == BRW_AOP_CMPWR)
4885 data2 = get_nir_src(instr->src[3]);
4887 /* Emit the actual atomic operation */
4889 fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
4891 1 /* dims */, 1 /* rsize */,
4893 BRW_PREDICATE_NONE);
4894 dest.type = atomic_result.type;
4895 bld.MOV(dest, atomic_result);
4899 fs_visitor::nir_emit_ssbo_atomic_float(const fs_builder &bld,
4900 int op, nir_intrinsic_instr *instr)
4902 if (stage == MESA_SHADER_FRAGMENT)
4903 brw_wm_prog_data(prog_data)->has_side_effects = true;
4906 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4907 dest = get_nir_dest(instr->dest);
4910 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
4911 if (const_surface) {
4912 unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
4913 const_surface->u32[0];
4914 surface = brw_imm_ud(surf_index);
4915 brw_mark_surface_used(prog_data, surf_index);
4917 surface = vgrf(glsl_type::uint_type);
4918 bld.ADD(surface, get_nir_src(instr->src[0]),
4919 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
4921 /* Assume this may touch any SSBO. This is the same we do for other
4922 * UBO/SSBO accesses with non-constant surface.
4924 brw_mark_surface_used(prog_data,
4925 stage_prog_data->binding_table.ssbo_start +
4926 nir->info.num_ssbos - 1);
4929 fs_reg offset = get_nir_src(instr->src[1]);
4930 fs_reg data1 = get_nir_src(instr->src[2]);
4932 if (op == BRW_AOP_FCMPWR)
4933 data2 = get_nir_src(instr->src[3]);
4935 /* Emit the actual atomic operation */
4937 fs_reg atomic_result = emit_untyped_atomic_float(bld, surface, offset,
4939 1 /* dims */, 1 /* rsize */,
4941 BRW_PREDICATE_NONE);
4942 dest.type = atomic_result.type;
4943 bld.MOV(dest, atomic_result);
4947 fs_visitor::nir_emit_shared_atomic(const fs_builder &bld,
4948 int op, nir_intrinsic_instr *instr)
4951 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4952 dest = get_nir_dest(instr->dest);
4954 fs_reg surface = brw_imm_ud(GEN7_BTI_SLM);
4957 if (op != BRW_AOP_INC && op != BRW_AOP_DEC && op != BRW_AOP_PREDEC)
4958 data1 = get_nir_src(instr->src[1]);
4960 if (op == BRW_AOP_CMPWR)
4961 data2 = get_nir_src(instr->src[2]);
4963 /* Get the offset */
4964 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
4966 offset = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
4968 offset = vgrf(glsl_type::uint_type);
4970 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
4971 brw_imm_ud(instr->const_index[0]));
4974 /* Emit the actual atomic operation operation */
4976 fs_reg atomic_result = emit_untyped_atomic(bld, surface, offset,
4978 1 /* dims */, 1 /* rsize */,
4980 BRW_PREDICATE_NONE);
4981 dest.type = atomic_result.type;
4982 bld.MOV(dest, atomic_result);
4986 fs_visitor::nir_emit_shared_atomic_float(const fs_builder &bld,
4987 int op, nir_intrinsic_instr *instr)
4990 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
4991 dest = get_nir_dest(instr->dest);
4993 fs_reg surface = brw_imm_ud(GEN7_BTI_SLM);
4995 fs_reg data1 = get_nir_src(instr->src[1]);
4997 if (op == BRW_AOP_FCMPWR)
4998 data2 = get_nir_src(instr->src[2]);
5000 /* Get the offset */
5001 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
5003 offset = brw_imm_ud(instr->const_index[0] + const_offset->u32[0]);
5005 offset = vgrf(glsl_type::uint_type);
5007 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
5008 brw_imm_ud(instr->const_index[0]));
5011 /* Emit the actual atomic operation operation */
5013 fs_reg atomic_result = emit_untyped_atomic_float(bld, surface, offset,
5015 1 /* dims */, 1 /* rsize */,
5017 BRW_PREDICATE_NONE);
5018 dest.type = atomic_result.type;
5019 bld.MOV(dest, atomic_result);
5023 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
5025 unsigned texture = instr->texture_index;
5026 unsigned sampler = instr->sampler_index;
5028 fs_reg srcs[TEX_LOGICAL_NUM_SRCS];
5030 srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture);
5031 srcs[TEX_LOGICAL_SRC_SAMPLER] = brw_imm_ud(sampler);
5033 int lod_components = 0;
5035 /* The hardware requires a LOD for buffer textures */
5036 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
5037 srcs[TEX_LOGICAL_SRC_LOD] = brw_imm_d(0);
5039 uint32_t header_bits = 0;
5040 for (unsigned i = 0; i < instr->num_srcs; i++) {
5041 fs_reg src = get_nir_src(instr->src[i].src);
5042 switch (instr->src[i].src_type) {
5043 case nir_tex_src_bias:
5044 srcs[TEX_LOGICAL_SRC_LOD] =
5045 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
5047 case nir_tex_src_comparator:
5048 srcs[TEX_LOGICAL_SRC_SHADOW_C] = retype(src, BRW_REGISTER_TYPE_F);
5050 case nir_tex_src_coord:
5051 switch (instr->op) {
5053 case nir_texop_txf_ms:
5054 case nir_texop_txf_ms_mcs:
5055 case nir_texop_samples_identical:
5056 srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_D);
5059 srcs[TEX_LOGICAL_SRC_COORDINATE] = retype(src, BRW_REGISTER_TYPE_F);
5063 case nir_tex_src_ddx:
5064 srcs[TEX_LOGICAL_SRC_LOD] = retype(src, BRW_REGISTER_TYPE_F);
5065 lod_components = nir_tex_instr_src_size(instr, i);
5067 case nir_tex_src_ddy:
5068 srcs[TEX_LOGICAL_SRC_LOD2] = retype(src, BRW_REGISTER_TYPE_F);
5070 case nir_tex_src_lod:
5071 switch (instr->op) {
5073 srcs[TEX_LOGICAL_SRC_LOD] =
5074 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_UD);
5077 srcs[TEX_LOGICAL_SRC_LOD] =
5078 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_D);
5081 srcs[TEX_LOGICAL_SRC_LOD] =
5082 retype(get_nir_src_imm(instr->src[i].src), BRW_REGISTER_TYPE_F);
5086 case nir_tex_src_ms_index:
5087 srcs[TEX_LOGICAL_SRC_SAMPLE_INDEX] = retype(src, BRW_REGISTER_TYPE_UD);
5090 case nir_tex_src_offset: {
5091 nir_const_value *const_offset =
5092 nir_src_as_const_value(instr->src[i].src);
5093 unsigned offset_bits = 0;
5095 brw_texture_offset(const_offset->i32,
5096 nir_tex_instr_src_size(instr, i),
5098 header_bits |= offset_bits;
5100 srcs[TEX_LOGICAL_SRC_TG4_OFFSET] =
5101 retype(src, BRW_REGISTER_TYPE_D);
5106 case nir_tex_src_projector:
5107 unreachable("should be lowered");
5109 case nir_tex_src_texture_offset: {
5110 /* Figure out the highest possible texture index and mark it as used */
5111 uint32_t max_used = texture + instr->texture_array_size - 1;
5112 if (instr->op == nir_texop_tg4 && devinfo->gen < 8) {
5113 max_used += stage_prog_data->binding_table.gather_texture_start;
5115 max_used += stage_prog_data->binding_table.texture_start;
5117 brw_mark_surface_used(prog_data, max_used);
5119 /* Emit code to evaluate the actual indexing expression */
5120 fs_reg tmp = vgrf(glsl_type::uint_type);
5121 bld.ADD(tmp, src, brw_imm_ud(texture));
5122 srcs[TEX_LOGICAL_SRC_SURFACE] = bld.emit_uniformize(tmp);
5126 case nir_tex_src_sampler_offset: {
5127 /* Emit code to evaluate the actual indexing expression */
5128 fs_reg tmp = vgrf(glsl_type::uint_type);
5129 bld.ADD(tmp, src, brw_imm_ud(sampler));
5130 srcs[TEX_LOGICAL_SRC_SAMPLER] = bld.emit_uniformize(tmp);
5134 case nir_tex_src_ms_mcs:
5135 assert(instr->op == nir_texop_txf_ms);
5136 srcs[TEX_LOGICAL_SRC_MCS] = retype(src, BRW_REGISTER_TYPE_D);
5139 case nir_tex_src_plane: {
5140 nir_const_value *const_plane =
5141 nir_src_as_const_value(instr->src[i].src);
5142 const uint32_t plane = const_plane->u32[0];
5143 const uint32_t texture_index =
5144 instr->texture_index +
5145 stage_prog_data->binding_table.plane_start[plane] -
5146 stage_prog_data->binding_table.texture_start;
5148 srcs[TEX_LOGICAL_SRC_SURFACE] = brw_imm_ud(texture_index);
5153 unreachable("unknown texture source");
5157 if (srcs[TEX_LOGICAL_SRC_MCS].file == BAD_FILE &&
5158 (instr->op == nir_texop_txf_ms ||
5159 instr->op == nir_texop_samples_identical)) {
5160 if (devinfo->gen >= 7 &&
5161 key_tex->compressed_multisample_layout_mask & (1 << texture)) {
5162 srcs[TEX_LOGICAL_SRC_MCS] =
5163 emit_mcs_fetch(srcs[TEX_LOGICAL_SRC_COORDINATE],
5164 instr->coord_components,
5165 srcs[TEX_LOGICAL_SRC_SURFACE]);
5167 srcs[TEX_LOGICAL_SRC_MCS] = brw_imm_ud(0u);
5171 srcs[TEX_LOGICAL_SRC_COORD_COMPONENTS] = brw_imm_d(instr->coord_components);
5172 srcs[TEX_LOGICAL_SRC_GRAD_COMPONENTS] = brw_imm_d(lod_components);
5175 switch (instr->op) {
5177 opcode = (stage == MESA_SHADER_FRAGMENT ? SHADER_OPCODE_TEX_LOGICAL :
5178 SHADER_OPCODE_TXL_LOGICAL);
5181 opcode = FS_OPCODE_TXB_LOGICAL;
5184 opcode = SHADER_OPCODE_TXL_LOGICAL;
5187 opcode = SHADER_OPCODE_TXD_LOGICAL;
5190 opcode = SHADER_OPCODE_TXF_LOGICAL;
5192 case nir_texop_txf_ms:
5193 if ((key_tex->msaa_16 & (1 << sampler)))
5194 opcode = SHADER_OPCODE_TXF_CMS_W_LOGICAL;
5196 opcode = SHADER_OPCODE_TXF_CMS_LOGICAL;
5198 case nir_texop_txf_ms_mcs:
5199 opcode = SHADER_OPCODE_TXF_MCS_LOGICAL;
5201 case nir_texop_query_levels:
5203 opcode = SHADER_OPCODE_TXS_LOGICAL;
5206 opcode = SHADER_OPCODE_LOD_LOGICAL;
5209 if (srcs[TEX_LOGICAL_SRC_TG4_OFFSET].file != BAD_FILE)
5210 opcode = SHADER_OPCODE_TG4_OFFSET_LOGICAL;
5212 opcode = SHADER_OPCODE_TG4_LOGICAL;
5214 case nir_texop_texture_samples:
5215 opcode = SHADER_OPCODE_SAMPLEINFO_LOGICAL;
5217 case nir_texop_samples_identical: {
5218 fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
5220 /* If mcs is an immediate value, it means there is no MCS. In that case
5221 * just return false.
5223 if (srcs[TEX_LOGICAL_SRC_MCS].file == BRW_IMMEDIATE_VALUE) {
5224 bld.MOV(dst, brw_imm_ud(0u));
5225 } else if ((key_tex->msaa_16 & (1 << sampler))) {
5226 fs_reg tmp = vgrf(glsl_type::uint_type);
5227 bld.OR(tmp, srcs[TEX_LOGICAL_SRC_MCS],
5228 offset(srcs[TEX_LOGICAL_SRC_MCS], bld, 1));
5229 bld.CMP(dst, tmp, brw_imm_ud(0u), BRW_CONDITIONAL_EQ);
5231 bld.CMP(dst, srcs[TEX_LOGICAL_SRC_MCS], brw_imm_ud(0u),
5232 BRW_CONDITIONAL_EQ);
5237 unreachable("unknown texture opcode");
5240 if (instr->op == nir_texop_tg4) {
5241 if (instr->component == 1 &&
5242 key_tex->gather_channel_quirk_mask & (1 << texture)) {
5243 /* gather4 sampler is broken for green channel on RG32F --
5244 * we must ask for blue instead.
5246 header_bits |= 2 << 16;
5248 header_bits |= instr->component << 16;
5252 fs_reg dst = bld.vgrf(brw_type_for_nir_type(devinfo, instr->dest_type), 4);
5253 fs_inst *inst = bld.emit(opcode, dst, srcs, ARRAY_SIZE(srcs));
5254 inst->offset = header_bits;
5256 const unsigned dest_size = nir_tex_instr_dest_size(instr);
5257 if (devinfo->gen >= 9 &&
5258 instr->op != nir_texop_tg4 && instr->op != nir_texop_query_levels) {
5259 unsigned write_mask = instr->dest.is_ssa ?
5260 nir_ssa_def_components_read(&instr->dest.ssa):
5261 (1 << dest_size) - 1;
5262 assert(write_mask != 0); /* dead code should have been eliminated */
5263 inst->size_written = util_last_bit(write_mask) *
5264 inst->dst.component_size(inst->exec_size);
5266 inst->size_written = 4 * inst->dst.component_size(inst->exec_size);
5269 if (srcs[TEX_LOGICAL_SRC_SHADOW_C].file != BAD_FILE)
5270 inst->shadow_compare = true;
5272 if (instr->op == nir_texop_tg4 && devinfo->gen == 6)
5273 emit_gen6_gather_wa(key_tex->gen6_gather_wa[texture], dst);
5276 for (unsigned i = 0; i < dest_size; i++)
5277 nir_dest[i] = offset(dst, bld, i);
5279 if (instr->op == nir_texop_query_levels) {
5280 /* # levels is in .w */
5281 nir_dest[0] = offset(dst, bld, 3);
5282 } else if (instr->op == nir_texop_txs &&
5283 dest_size >= 3 && devinfo->gen < 7) {
5284 /* Gen4-6 return 0 instead of 1 for single layer surfaces. */
5285 fs_reg depth = offset(dst, bld, 2);
5286 nir_dest[2] = vgrf(glsl_type::int_type);
5287 bld.emit_minmax(nir_dest[2], depth, brw_imm_d(1), BRW_CONDITIONAL_GE);
5290 bld.LOAD_PAYLOAD(get_nir_dest(instr->dest), nir_dest, dest_size, 0);
5294 fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
5296 switch (instr->type) {
5297 case nir_jump_break:
5298 bld.emit(BRW_OPCODE_BREAK);
5300 case nir_jump_continue:
5301 bld.emit(BRW_OPCODE_CONTINUE);
5303 case nir_jump_return:
5305 unreachable("unknown jump");
5310 * This helper takes a source register and un/shuffles it into the destination
5313 * If source type size is smaller than destination type size the operation
5314 * needed is a component shuffle. The opposite case would be an unshuffle. If
5315 * source/destination type size is equal a shuffle is done that would be
5316 * equivalent to a simple MOV.
5318 * For example, if source is a 16-bit type and destination is 32-bit. A 3
5319 * components .xyz 16-bit vector on SIMD8 would be.
5321 * |x1|x2|x3|x4|x5|x6|x7|x8|y1|y2|y3|y4|y5|y6|y7|y8|
5322 * |z1|z2|z3|z4|z5|z6|z7|z8| | | | | | | | |
5324 * This helper will return the following 2 32-bit components with the 16-bit
5327 * |x1 y1|x2 y2|x3 y3|x4 y4|x5 y5|x6 y6|x7 y7|x8 y8|
5328 * |z1 |z2 |z3 |z4 |z5 |z6 |z7 |z8 |
5330 * For unshuffle, the example would be the opposite, a 64-bit type source
5331 * and a 32-bit destination. A 2 component .xy 64-bit vector on SIMD8
5334 * | x1l x1h | x2l x2h | x3l x3h | x4l x4h |
5335 * | x5l x5h | x6l x6h | x7l x7h | x8l x8h |
5336 * | y1l y1h | y2l y2h | y3l y3h | y4l y4h |
5337 * | y5l y5h | y6l y6h | y7l y7h | y8l y8h |
5339 * The returned result would be the following 4 32-bit components unshuffled:
5341 * | x1l | x2l | x3l | x4l | x5l | x6l | x7l | x8l |
5342 * | x1h | x2h | x3h | x4h | x5h | x6h | x7h | x8h |
5343 * | y1l | y2l | y3l | y4l | y5l | y6l | y7l | y8l |
5344 * | y1h | y2h | y3h | y4h | y5h | y6h | y7h | y8h |
5346 * - Source and destination register must not be overlapped.
5347 * - components units are measured in terms of the smaller type between
5348 * source and destination because we are un/shuffling the smaller
5349 * components from/into the bigger ones.
5350 * - first_component parameter allows skipping source components.
5353 shuffle_src_to_dst(const fs_builder &bld,
5356 uint32_t first_component,
5357 uint32_t components)
5359 if (type_sz(src.type) == type_sz(dst.type)) {
5360 assert(!regions_overlap(dst,
5361 type_sz(dst.type) * bld.dispatch_width() * components,
5362 offset(src, bld, first_component),
5363 type_sz(src.type) * bld.dispatch_width() * components));
5364 for (unsigned i = 0; i < components; i++) {
5365 bld.MOV(retype(offset(dst, bld, i), src.type),
5366 offset(src, bld, i + first_component));
5368 } else if (type_sz(src.type) < type_sz(dst.type)) {
5369 /* Source is shuffled into destination */
5370 unsigned size_ratio = type_sz(dst.type) / type_sz(src.type);
5371 assert(!regions_overlap(dst,
5372 type_sz(dst.type) * bld.dispatch_width() *
5373 DIV_ROUND_UP(components, size_ratio),
5374 offset(src, bld, first_component),
5375 type_sz(src.type) * bld.dispatch_width() * components));
5377 brw_reg_type shuffle_type =
5378 brw_reg_type_from_bit_size(8 * type_sz(src.type),
5379 BRW_REGISTER_TYPE_D);
5380 for (unsigned i = 0; i < components; i++) {
5381 fs_reg shuffle_component_i =
5382 subscript(offset(dst, bld, i / size_ratio),
5383 shuffle_type, i % size_ratio);
5384 bld.MOV(shuffle_component_i,
5385 retype(offset(src, bld, i + first_component), shuffle_type));
5388 /* Source is unshuffled into destination */
5389 unsigned size_ratio = type_sz(src.type) / type_sz(dst.type);
5390 assert(!regions_overlap(dst,
5391 type_sz(dst.type) * bld.dispatch_width() * components,
5392 offset(src, bld, first_component / size_ratio),
5393 type_sz(src.type) * bld.dispatch_width() *
5394 DIV_ROUND_UP(components + (first_component % size_ratio),
5397 brw_reg_type shuffle_type =
5398 brw_reg_type_from_bit_size(8 * type_sz(dst.type),
5399 BRW_REGISTER_TYPE_D);
5400 for (unsigned i = 0; i < components; i++) {
5401 fs_reg shuffle_component_i =
5402 subscript(offset(src, bld, (first_component + i) / size_ratio),
5403 shuffle_type, (first_component + i) % size_ratio);
5404 bld.MOV(retype(offset(dst, bld, i), shuffle_type),
5405 shuffle_component_i);
5411 shuffle_from_32bit_read(const fs_builder &bld,
5414 uint32_t first_component,
5415 uint32_t components)
5417 assert(type_sz(src.type) == 4);
5419 /* This function takes components in units of the destination type while
5420 * shuffle_src_to_dst takes components in units of the smallest type
5422 if (type_sz(dst.type) > 4) {
5423 assert(type_sz(dst.type) == 8);
5424 first_component *= 2;
5428 shuffle_src_to_dst(bld, dst, src, first_component, components);
5432 shuffle_for_32bit_write(const fs_builder &bld,
5434 uint32_t first_component,
5435 uint32_t components)
5437 fs_reg dst = bld.vgrf(BRW_REGISTER_TYPE_D,
5438 DIV_ROUND_UP (components * type_sz(src.type), 4));
5439 /* This function takes components in units of the source type while
5440 * shuffle_src_to_dst takes components in units of the smallest type
5442 if (type_sz(src.type) > 4) {
5443 assert(type_sz(src.type) == 8);
5444 first_component *= 2;
5448 shuffle_src_to_dst(bld, dst, src, first_component, components);
5454 setup_imm_df(const fs_builder &bld, double v)
5456 const struct gen_device_info *devinfo = bld.shader->devinfo;
5457 assert(devinfo->gen >= 7);
5459 if (devinfo->gen >= 8)
5460 return brw_imm_df(v);
5462 /* gen7.5 does not support DF immediates straighforward but the DIM
5463 * instruction allows to set the 64-bit immediate value.
5465 if (devinfo->is_haswell) {
5466 const fs_builder ubld = bld.exec_all().group(1, 0);
5467 fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_DF, 1);
5468 ubld.DIM(dst, brw_imm_df(v));
5469 return component(dst, 0);
5472 /* gen7 does not support DF immediates, so we generate a 64-bit constant by
5473 * writing the low 32-bit of the constant to suboffset 0 of a VGRF and
5474 * the high 32-bit to suboffset 4 and then applying a stride of 0.
5476 * Alternatively, we could also produce a normal VGRF (without stride 0)
5477 * by writing to all the channels in the VGRF, however, that would hit the
5478 * gen7 bug where we have to split writes that span more than 1 register
5479 * into instructions with a width of 4 (otherwise the write to the second
5480 * register written runs into an execmask hardware bug) which isn't very
5493 const fs_builder ubld = bld.exec_all().group(1, 0);
5494 const fs_reg tmp = ubld.vgrf(BRW_REGISTER_TYPE_UD, 2);
5495 ubld.MOV(tmp, brw_imm_ud(di.i1));
5496 ubld.MOV(horiz_offset(tmp, 1), brw_imm_ud(di.i2));
5498 return component(retype(tmp, BRW_REGISTER_TYPE_DF), 0);
5502 setup_imm_b(const fs_builder &bld, int8_t v)
5504 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_B);
5505 bld.MOV(tmp, brw_imm_w(v));
5510 setup_imm_ub(const fs_builder &bld, uint8_t v)
5512 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UB);
5513 bld.MOV(tmp, brw_imm_uw(v));