2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
25 #include "main/shaderimage.h"
27 #include "brw_fs_surface_builder.h"
29 #include "brw_program.h"
32 using namespace brw::surface_access;
35 fs_visitor::emit_nir_code()
37 /* emit the arrays used for inputs and outputs - load/store intrinsics will
38 * be converted to reads/writes of these arrays
43 nir_emit_system_values();
45 /* get the main function and emit it */
46 nir_foreach_function(nir, function) {
47 assert(strcmp(function->name, "main") == 0);
48 assert(function->impl);
49 nir_emit_impl(function->impl);
54 fs_visitor::nir_setup_inputs()
56 if (stage != MESA_SHADER_FRAGMENT)
59 nir_inputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_inputs);
61 nir_foreach_variable(var, &nir->inputs) {
62 fs_reg input = offset(nir_inputs, bld, var->data.driver_location);
65 if (var->data.location == VARYING_SLOT_POS) {
66 reg = *emit_fragcoord_interpolation(var->data.pixel_center_integer,
67 var->data.origin_upper_left);
68 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
70 } else if (var->data.location == VARYING_SLOT_LAYER) {
71 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_LAYER, 1), 3);
72 reg.type = BRW_REGISTER_TYPE_D;
73 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
74 } else if (var->data.location == VARYING_SLOT_VIEWPORT) {
75 struct brw_reg reg = suboffset(interp_reg(VARYING_SLOT_VIEWPORT, 2), 3);
76 reg.type = BRW_REGISTER_TYPE_D;
77 bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
79 int location = var->data.location;
80 emit_general_interpolation(&input, var->name, var->type,
81 (glsl_interp_qualifier) var->data.interpolation,
82 &location, var->data.centroid,
89 fs_visitor::nir_setup_single_output_varying(fs_reg *reg,
90 const glsl_type *type,
93 if (type->is_array() || type->is_matrix()) {
94 const struct glsl_type *elem_type = glsl_get_array_element(type);
95 const unsigned length = glsl_get_length(type);
97 for (unsigned i = 0; i < length; i++) {
98 nir_setup_single_output_varying(reg, elem_type, location);
100 } else if (type->is_record()) {
101 for (unsigned i = 0; i < type->length; i++) {
102 const struct glsl_type *field_type = type->fields.structure[i].type;
103 nir_setup_single_output_varying(reg, field_type, location);
106 assert(type->is_scalar() || type->is_vector());
107 this->outputs[*location] = *reg;
108 this->output_components[*location] = type->vector_elements;
109 *reg = offset(*reg, bld, 4);
115 fs_visitor::nir_setup_outputs()
117 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
119 nir_outputs = bld.vgrf(BRW_REGISTER_TYPE_F, nir->num_outputs);
121 nir_foreach_variable(var, &nir->outputs) {
122 fs_reg reg = offset(nir_outputs, bld, var->data.driver_location);
125 case MESA_SHADER_VERTEX:
126 case MESA_SHADER_TESS_EVAL:
127 case MESA_SHADER_GEOMETRY: {
128 unsigned location = var->data.location;
129 nir_setup_single_output_varying(®, var->type, &location);
132 case MESA_SHADER_FRAGMENT:
133 if (var->data.index > 0) {
134 assert(var->data.location == FRAG_RESULT_DATA0);
135 assert(var->data.index == 1);
136 this->dual_src_output = reg;
137 this->do_dual_src = true;
138 } else if (var->data.location == FRAG_RESULT_COLOR) {
139 /* Writing gl_FragColor outputs to all color regions. */
140 for (unsigned int i = 0; i < MAX2(key->nr_color_regions, 1); i++) {
141 this->outputs[i] = reg;
142 this->output_components[i] = 4;
144 } else if (var->data.location == FRAG_RESULT_DEPTH) {
145 this->frag_depth = reg;
146 } else if (var->data.location == FRAG_RESULT_STENCIL) {
147 this->frag_stencil = reg;
148 } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
149 this->sample_mask = reg;
151 int vector_elements = var->type->without_array()->vector_elements;
153 /* gl_FragData or a user-defined FS output */
154 assert(var->data.location >= FRAG_RESULT_DATA0 &&
155 var->data.location < FRAG_RESULT_DATA0+BRW_MAX_DRAW_BUFFERS);
157 /* General color output. */
158 for (unsigned int i = 0; i < MAX2(1, var->type->length); i++) {
159 int output = var->data.location - FRAG_RESULT_DATA0 + i;
160 this->outputs[output] = offset(reg, bld, vector_elements * i);
161 this->output_components[output] = vector_elements;
166 unreachable("unhandled shader stage");
172 fs_visitor::nir_setup_uniforms()
174 if (dispatch_width != 8)
177 uniforms = nir->num_uniforms / 4;
179 nir_foreach_variable(var, &nir->uniforms) {
180 /* UBO's and atomics don't take up space in the uniform file */
181 if (var->interface_type != NULL || var->type->contains_atomic())
184 if (type_size_scalar(var->type) > 0)
185 param_size[var->data.driver_location / 4] = type_size_scalar(var->type);
190 emit_system_values_block(nir_block *block, void *void_visitor)
192 fs_visitor *v = (fs_visitor *)void_visitor;
195 nir_foreach_instr(block, instr) {
196 if (instr->type != nir_instr_type_intrinsic)
199 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
200 switch (intrin->intrinsic) {
201 case nir_intrinsic_load_vertex_id:
202 unreachable("should be lowered by lower_vertex_id().");
204 case nir_intrinsic_load_vertex_id_zero_base:
205 assert(v->stage == MESA_SHADER_VERTEX);
206 reg = &v->nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
207 if (reg->file == BAD_FILE)
208 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE);
211 case nir_intrinsic_load_base_vertex:
212 assert(v->stage == MESA_SHADER_VERTEX);
213 reg = &v->nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
214 if (reg->file == BAD_FILE)
215 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_VERTEX);
218 case nir_intrinsic_load_instance_id:
219 assert(v->stage == MESA_SHADER_VERTEX);
220 reg = &v->nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
221 if (reg->file == BAD_FILE)
222 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_INSTANCE_ID);
225 case nir_intrinsic_load_base_instance:
226 assert(v->stage == MESA_SHADER_VERTEX);
227 reg = &v->nir_system_values[SYSTEM_VALUE_BASE_INSTANCE];
228 if (reg->file == BAD_FILE)
229 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_BASE_INSTANCE);
232 case nir_intrinsic_load_draw_id:
233 assert(v->stage == MESA_SHADER_VERTEX);
234 reg = &v->nir_system_values[SYSTEM_VALUE_DRAW_ID];
235 if (reg->file == BAD_FILE)
236 *reg = *v->emit_vs_system_value(SYSTEM_VALUE_DRAW_ID);
239 case nir_intrinsic_load_invocation_id:
240 assert(v->stage == MESA_SHADER_GEOMETRY);
241 reg = &v->nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
242 if (reg->file == BAD_FILE) {
243 const fs_builder abld = v->bld.annotate("gl_InvocationID", NULL);
244 fs_reg g1(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
245 fs_reg iid = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
246 abld.SHR(iid, g1, brw_imm_ud(27u));
251 case nir_intrinsic_load_sample_pos:
252 assert(v->stage == MESA_SHADER_FRAGMENT);
253 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
254 if (reg->file == BAD_FILE)
255 *reg = *v->emit_samplepos_setup();
258 case nir_intrinsic_load_sample_id:
259 assert(v->stage == MESA_SHADER_FRAGMENT);
260 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_ID];
261 if (reg->file == BAD_FILE)
262 *reg = *v->emit_sampleid_setup();
265 case nir_intrinsic_load_sample_mask_in:
266 assert(v->stage == MESA_SHADER_FRAGMENT);
267 assert(v->devinfo->gen >= 7);
268 reg = &v->nir_system_values[SYSTEM_VALUE_SAMPLE_MASK_IN];
269 if (reg->file == BAD_FILE)
270 *reg = fs_reg(retype(brw_vec8_grf(v->payload.sample_mask_in_reg, 0),
271 BRW_REGISTER_TYPE_D));
274 case nir_intrinsic_load_local_invocation_id:
275 assert(v->stage == MESA_SHADER_COMPUTE);
276 reg = &v->nir_system_values[SYSTEM_VALUE_LOCAL_INVOCATION_ID];
277 if (reg->file == BAD_FILE)
278 *reg = *v->emit_cs_local_invocation_id_setup();
281 case nir_intrinsic_load_work_group_id:
282 assert(v->stage == MESA_SHADER_COMPUTE);
283 reg = &v->nir_system_values[SYSTEM_VALUE_WORK_GROUP_ID];
284 if (reg->file == BAD_FILE)
285 *reg = *v->emit_cs_work_group_id_setup();
288 case nir_intrinsic_load_helper_invocation:
289 assert(v->stage == MESA_SHADER_FRAGMENT);
290 reg = &v->nir_system_values[SYSTEM_VALUE_HELPER_INVOCATION];
291 if (reg->file == BAD_FILE) {
292 const fs_builder abld =
293 v->bld.annotate("gl_HelperInvocation", NULL);
295 /* On Gen6+ (gl_HelperInvocation is only exposed on Gen7+) the
296 * pixel mask is in g1.7 of the thread payload.
298 * We move the per-channel pixel enable bit to the low bit of each
299 * channel by shifting the byte containing the pixel mask by the
300 * vector immediate 0x76543210UV.
302 * The region of <1,8,0> reads only 1 byte (the pixel masks for
303 * subspans 0 and 1) in SIMD8 and an additional byte (the pixel
304 * masks for 2 and 3) in SIMD16.
306 fs_reg shifted = abld.vgrf(BRW_REGISTER_TYPE_UW, 1);
308 stride(byte_offset(retype(brw_vec1_grf(1, 0),
309 BRW_REGISTER_TYPE_UB), 28),
311 brw_imm_uv(0x76543210));
313 /* A set bit in the pixel mask means the channel is enabled, but
314 * that is the opposite of gl_HelperInvocation so we need to invert
317 * The negate source-modifier bit of logical instructions on Gen8+
318 * performs 1's complement negation, so we can use that instead of
321 fs_reg inverted = negate(shifted);
322 if (v->devinfo->gen < 8) {
323 inverted = abld.vgrf(BRW_REGISTER_TYPE_UW);
324 abld.NOT(inverted, shifted);
327 /* We then resolve the 0/1 result to 0/~0 boolean values by ANDing
328 * with 1 and negating.
330 fs_reg anded = abld.vgrf(BRW_REGISTER_TYPE_UD, 1);
331 abld.AND(anded, inverted, brw_imm_uw(1));
333 fs_reg dst = abld.vgrf(BRW_REGISTER_TYPE_D, 1);
334 abld.MOV(dst, negate(retype(anded, BRW_REGISTER_TYPE_D)));
348 fs_visitor::nir_emit_system_values()
350 nir_system_values = ralloc_array(mem_ctx, fs_reg, SYSTEM_VALUE_MAX);
351 for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
352 nir_system_values[i] = fs_reg();
355 nir_foreach_function(nir, function) {
356 assert(strcmp(function->name, "main") == 0);
357 assert(function->impl);
358 nir_foreach_block(function->impl, emit_system_values_block, this);
363 fs_visitor::nir_emit_impl(nir_function_impl *impl)
365 nir_locals = ralloc_array(mem_ctx, fs_reg, impl->reg_alloc);
366 for (unsigned i = 0; i < impl->reg_alloc; i++) {
367 nir_locals[i] = fs_reg();
370 foreach_list_typed(nir_register, reg, node, &impl->registers) {
371 unsigned array_elems =
372 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
373 unsigned size = array_elems * reg->num_components;
374 nir_locals[reg->index] = bld.vgrf(BRW_REGISTER_TYPE_F, size);
377 nir_ssa_values = reralloc(mem_ctx, nir_ssa_values, fs_reg,
380 nir_emit_cf_list(&impl->body);
384 fs_visitor::nir_emit_cf_list(exec_list *list)
386 exec_list_validate(list);
387 foreach_list_typed(nir_cf_node, node, node, list) {
388 switch (node->type) {
390 nir_emit_if(nir_cf_node_as_if(node));
393 case nir_cf_node_loop:
394 nir_emit_loop(nir_cf_node_as_loop(node));
397 case nir_cf_node_block:
398 nir_emit_block(nir_cf_node_as_block(node));
402 unreachable("Invalid CFG node block");
408 fs_visitor::nir_emit_if(nir_if *if_stmt)
410 /* first, put the condition into f0 */
411 fs_inst *inst = bld.MOV(bld.null_reg_d(),
412 retype(get_nir_src(if_stmt->condition),
413 BRW_REGISTER_TYPE_D));
414 inst->conditional_mod = BRW_CONDITIONAL_NZ;
416 bld.IF(BRW_PREDICATE_NORMAL);
418 nir_emit_cf_list(&if_stmt->then_list);
420 /* note: if the else is empty, dead CF elimination will remove it */
421 bld.emit(BRW_OPCODE_ELSE);
423 nir_emit_cf_list(&if_stmt->else_list);
425 bld.emit(BRW_OPCODE_ENDIF);
429 fs_visitor::nir_emit_loop(nir_loop *loop)
431 bld.emit(BRW_OPCODE_DO);
433 nir_emit_cf_list(&loop->body);
435 bld.emit(BRW_OPCODE_WHILE);
439 fs_visitor::nir_emit_block(nir_block *block)
441 nir_foreach_instr(block, instr) {
442 nir_emit_instr(instr);
447 fs_visitor::nir_emit_instr(nir_instr *instr)
449 const fs_builder abld = bld.annotate(NULL, instr);
451 switch (instr->type) {
452 case nir_instr_type_alu:
453 nir_emit_alu(abld, nir_instr_as_alu(instr));
456 case nir_instr_type_intrinsic:
458 case MESA_SHADER_VERTEX:
459 nir_emit_vs_intrinsic(abld, nir_instr_as_intrinsic(instr));
461 case MESA_SHADER_TESS_EVAL:
462 nir_emit_tes_intrinsic(abld, nir_instr_as_intrinsic(instr));
464 case MESA_SHADER_GEOMETRY:
465 nir_emit_gs_intrinsic(abld, nir_instr_as_intrinsic(instr));
467 case MESA_SHADER_FRAGMENT:
468 nir_emit_fs_intrinsic(abld, nir_instr_as_intrinsic(instr));
470 case MESA_SHADER_COMPUTE:
471 nir_emit_cs_intrinsic(abld, nir_instr_as_intrinsic(instr));
474 unreachable("unsupported shader stage");
478 case nir_instr_type_tex:
479 nir_emit_texture(abld, nir_instr_as_tex(instr));
482 case nir_instr_type_load_const:
483 nir_emit_load_const(abld, nir_instr_as_load_const(instr));
486 case nir_instr_type_ssa_undef:
487 nir_emit_undef(abld, nir_instr_as_ssa_undef(instr));
490 case nir_instr_type_jump:
491 nir_emit_jump(abld, nir_instr_as_jump(instr));
495 unreachable("unknown instruction type");
500 fs_visitor::optimize_frontfacing_ternary(nir_alu_instr *instr,
501 const fs_reg &result)
503 if (!instr->src[0].src.is_ssa ||
504 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_intrinsic)
507 nir_intrinsic_instr *src0 =
508 nir_instr_as_intrinsic(instr->src[0].src.ssa->parent_instr);
510 if (src0->intrinsic != nir_intrinsic_load_front_face)
513 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
514 if (!value1 || fabsf(value1->f[0]) != 1.0f)
517 nir_const_value *value2 = nir_src_as_const_value(instr->src[2].src);
518 if (!value2 || fabsf(value2->f[0]) != 1.0f)
521 fs_reg tmp = vgrf(glsl_type::int_type);
523 if (devinfo->gen >= 6) {
524 /* Bit 15 of g0.0 is 0 if the polygon is front facing. */
525 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
527 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
529 * or(8) tmp.1<2>W g0.0<0,1,0>W 0x00003f80W
530 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
532 * and negate g0.0<0,1,0>W for (gl_FrontFacing ? -1.0 : 1.0).
534 * This negation looks like it's safe in practice, because bits 0:4 will
535 * surely be TRIANGLES
538 if (value1->f[0] == -1.0f) {
542 tmp.type = BRW_REGISTER_TYPE_W;
543 tmp.subreg_offset = 2;
546 bld.OR(tmp, g0, brw_imm_uw(0x3f80));
548 tmp.type = BRW_REGISTER_TYPE_D;
549 tmp.subreg_offset = 0;
552 /* Bit 31 of g1.6 is 0 if the polygon is front facing. */
553 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
555 /* For (gl_FrontFacing ? 1.0 : -1.0), emit:
557 * or(8) tmp<1>D g1.6<0,1,0>D 0x3f800000D
558 * and(8) dst<1>D tmp<8,8,1>D 0xbf800000D
560 * and negate g1.6<0,1,0>D for (gl_FrontFacing ? -1.0 : 1.0).
562 * This negation looks like it's safe in practice, because bits 0:4 will
563 * surely be TRIANGLES
566 if (value1->f[0] == -1.0f) {
570 bld.OR(tmp, g1_6, brw_imm_d(0x3f800000));
572 bld.AND(retype(result, BRW_REGISTER_TYPE_D), tmp, brw_imm_d(0xbf800000));
578 fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr)
580 struct brw_wm_prog_key *fs_key = (struct brw_wm_prog_key *) this->key;
583 fs_reg result = get_nir_dest(instr->dest.dest);
584 result.type = brw_type_for_nir_type(nir_op_infos[instr->op].output_type);
587 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
588 op[i] = get_nir_src(instr->src[i].src);
589 op[i].type = brw_type_for_nir_type(nir_op_infos[instr->op].input_types[i]);
590 op[i].abs = instr->src[i].abs;
591 op[i].negate = instr->src[i].negate;
594 /* We get a bunch of mov's out of the from_ssa pass and they may still
595 * be vectorized. We'll handle them as a special-case. We'll also
596 * handle vecN here because it's basically the same thing.
604 fs_reg temp = result;
605 bool need_extra_copy = false;
606 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
607 if (!instr->src[i].src.is_ssa &&
608 instr->dest.dest.reg.reg == instr->src[i].src.reg.reg) {
609 need_extra_copy = true;
610 temp = bld.vgrf(result.type, 4);
615 for (unsigned i = 0; i < 4; i++) {
616 if (!(instr->dest.write_mask & (1 << i)))
619 if (instr->op == nir_op_imov || instr->op == nir_op_fmov) {
620 inst = bld.MOV(offset(temp, bld, i),
621 offset(op[0], bld, instr->src[0].swizzle[i]));
623 inst = bld.MOV(offset(temp, bld, i),
624 offset(op[i], bld, instr->src[i].swizzle[0]));
626 inst->saturate = instr->dest.saturate;
629 /* In this case the source and destination registers were the same,
630 * so we need to insert an extra set of moves in order to deal with
633 if (need_extra_copy) {
634 for (unsigned i = 0; i < 4; i++) {
635 if (!(instr->dest.write_mask & (1 << i)))
638 bld.MOV(offset(result, bld, i), offset(temp, bld, i));
647 /* At this point, we have dealt with any instruction that operates on
648 * more than a single channel. Therefore, we can just adjust the source
649 * and destination registers for that channel and emit the instruction.
651 unsigned channel = 0;
652 if (nir_op_infos[instr->op].output_size == 0) {
653 /* Since NIR is doing the scalarizing for us, we should only ever see
654 * vectorized operations with a single channel.
656 assert(_mesa_bitcount(instr->dest.write_mask) == 1);
657 channel = ffs(instr->dest.write_mask) - 1;
659 result = offset(result, bld, channel);
662 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
663 assert(nir_op_infos[instr->op].input_sizes[i] < 2);
664 op[i] = offset(op[i], bld, instr->src[i].swizzle[channel]);
670 inst = bld.MOV(result, op[0]);
671 inst->saturate = instr->dest.saturate;
676 bld.MOV(result, op[0]);
680 /* AND(val, 0x80000000) gives the sign bit.
682 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
685 bld.CMP(bld.null_reg_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
687 fs_reg result_int = retype(result, BRW_REGISTER_TYPE_UD);
688 op[0].type = BRW_REGISTER_TYPE_UD;
689 result.type = BRW_REGISTER_TYPE_UD;
690 bld.AND(result_int, op[0], brw_imm_ud(0x80000000u));
692 inst = bld.OR(result_int, result_int, brw_imm_ud(0x3f800000u));
693 inst->predicate = BRW_PREDICATE_NORMAL;
694 if (instr->dest.saturate) {
695 inst = bld.MOV(result, result);
696 inst->saturate = true;
702 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
703 * -> non-negative val generates 0x00000000.
704 * Predicated OR sets 1 if val is positive.
706 bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_G);
707 bld.ASR(result, op[0], brw_imm_d(31));
708 inst = bld.OR(result, result, brw_imm_d(1));
709 inst->predicate = BRW_PREDICATE_NORMAL;
713 inst = bld.emit(SHADER_OPCODE_RCP, result, op[0]);
714 inst->saturate = instr->dest.saturate;
718 inst = bld.emit(SHADER_OPCODE_EXP2, result, op[0]);
719 inst->saturate = instr->dest.saturate;
723 inst = bld.emit(SHADER_OPCODE_LOG2, result, op[0]);
724 inst->saturate = instr->dest.saturate;
728 inst = bld.emit(SHADER_OPCODE_SIN, result, op[0]);
729 inst->saturate = instr->dest.saturate;
733 inst = bld.emit(SHADER_OPCODE_COS, result, op[0]);
734 inst->saturate = instr->dest.saturate;
738 if (fs_key->high_quality_derivatives) {
739 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
741 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
743 inst->saturate = instr->dest.saturate;
745 case nir_op_fddx_fine:
746 inst = bld.emit(FS_OPCODE_DDX_FINE, result, op[0]);
747 inst->saturate = instr->dest.saturate;
749 case nir_op_fddx_coarse:
750 inst = bld.emit(FS_OPCODE_DDX_COARSE, result, op[0]);
751 inst->saturate = instr->dest.saturate;
754 if (fs_key->high_quality_derivatives) {
755 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
756 brw_imm_d(fs_key->render_to_fbo));
758 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
759 brw_imm_d(fs_key->render_to_fbo));
761 inst->saturate = instr->dest.saturate;
763 case nir_op_fddy_fine:
764 inst = bld.emit(FS_OPCODE_DDY_FINE, result, op[0],
765 brw_imm_d(fs_key->render_to_fbo));
766 inst->saturate = instr->dest.saturate;
768 case nir_op_fddy_coarse:
769 inst = bld.emit(FS_OPCODE_DDY_COARSE, result, op[0],
770 brw_imm_d(fs_key->render_to_fbo));
771 inst->saturate = instr->dest.saturate;
776 inst = bld.ADD(result, op[0], op[1]);
777 inst->saturate = instr->dest.saturate;
781 inst = bld.MUL(result, op[0], op[1]);
782 inst->saturate = instr->dest.saturate;
786 bld.MUL(result, op[0], op[1]);
789 case nir_op_imul_high:
790 case nir_op_umul_high:
791 bld.emit(SHADER_OPCODE_MULH, result, op[0], op[1]);
796 bld.emit(SHADER_OPCODE_INT_QUOTIENT, result, op[0], op[1]);
799 case nir_op_uadd_carry:
800 unreachable("Should have been lowered by carry_to_arith().");
802 case nir_op_usub_borrow:
803 unreachable("Should have been lowered by borrow_to_arith().");
806 bld.emit(SHADER_OPCODE_INT_REMAINDER, result, op[0], op[1]);
812 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_L);
818 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_GE);
823 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_Z);
828 bld.CMP(result, op[0], op[1], BRW_CONDITIONAL_NZ);
832 if (devinfo->gen >= 8) {
833 op[0] = resolve_source_modifiers(op[0]);
835 bld.NOT(result, op[0]);
838 if (devinfo->gen >= 8) {
839 op[0] = resolve_source_modifiers(op[0]);
840 op[1] = resolve_source_modifiers(op[1]);
842 bld.XOR(result, op[0], op[1]);
845 if (devinfo->gen >= 8) {
846 op[0] = resolve_source_modifiers(op[0]);
847 op[1] = resolve_source_modifiers(op[1]);
849 bld.OR(result, op[0], op[1]);
852 if (devinfo->gen >= 8) {
853 op[0] = resolve_source_modifiers(op[0]);
854 op[1] = resolve_source_modifiers(op[1]);
856 bld.AND(result, op[0], op[1]);
862 case nir_op_ball_fequal2:
863 case nir_op_ball_iequal2:
864 case nir_op_ball_fequal3:
865 case nir_op_ball_iequal3:
866 case nir_op_ball_fequal4:
867 case nir_op_ball_iequal4:
868 case nir_op_bany_fnequal2:
869 case nir_op_bany_inequal2:
870 case nir_op_bany_fnequal3:
871 case nir_op_bany_inequal3:
872 case nir_op_bany_fnequal4:
873 case nir_op_bany_inequal4:
874 unreachable("Lowered by nir_lower_alu_reductions");
876 case nir_op_fnoise1_1:
877 case nir_op_fnoise1_2:
878 case nir_op_fnoise1_3:
879 case nir_op_fnoise1_4:
880 case nir_op_fnoise2_1:
881 case nir_op_fnoise2_2:
882 case nir_op_fnoise2_3:
883 case nir_op_fnoise2_4:
884 case nir_op_fnoise3_1:
885 case nir_op_fnoise3_2:
886 case nir_op_fnoise3_3:
887 case nir_op_fnoise3_4:
888 case nir_op_fnoise4_1:
889 case nir_op_fnoise4_2:
890 case nir_op_fnoise4_3:
891 case nir_op_fnoise4_4:
892 unreachable("not reached: should be handled by lower_noise");
895 unreachable("not reached: should be handled by ldexp_to_arith()");
898 inst = bld.emit(SHADER_OPCODE_SQRT, result, op[0]);
899 inst->saturate = instr->dest.saturate;
903 inst = bld.emit(SHADER_OPCODE_RSQ, result, op[0]);
904 inst->saturate = instr->dest.saturate;
909 bld.MOV(result, negate(op[0]));
913 bld.CMP(result, op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ);
916 bld.CMP(result, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
920 inst = bld.RNDZ(result, op[0]);
921 inst->saturate = instr->dest.saturate;
925 op[0].negate = !op[0].negate;
926 fs_reg temp = vgrf(glsl_type::float_type);
927 bld.RNDD(temp, op[0]);
929 inst = bld.MOV(result, temp);
930 inst->saturate = instr->dest.saturate;
934 inst = bld.RNDD(result, op[0]);
935 inst->saturate = instr->dest.saturate;
938 inst = bld.FRC(result, op[0]);
939 inst->saturate = instr->dest.saturate;
941 case nir_op_fround_even:
942 inst = bld.RNDE(result, op[0]);
943 inst->saturate = instr->dest.saturate;
949 if (devinfo->gen >= 6) {
950 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
951 inst->conditional_mod = BRW_CONDITIONAL_L;
953 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_L);
954 inst = bld.SEL(result, op[0], op[1]);
955 inst->predicate = BRW_PREDICATE_NORMAL;
957 inst->saturate = instr->dest.saturate;
963 if (devinfo->gen >= 6) {
964 inst = bld.emit(BRW_OPCODE_SEL, result, op[0], op[1]);
965 inst->conditional_mod = BRW_CONDITIONAL_GE;
967 bld.CMP(bld.null_reg_d(), op[0], op[1], BRW_CONDITIONAL_GE);
968 inst = bld.SEL(result, op[0], op[1]);
969 inst->predicate = BRW_PREDICATE_NORMAL;
971 inst->saturate = instr->dest.saturate;
974 case nir_op_pack_snorm_2x16:
975 case nir_op_pack_snorm_4x8:
976 case nir_op_pack_unorm_2x16:
977 case nir_op_pack_unorm_4x8:
978 case nir_op_unpack_snorm_2x16:
979 case nir_op_unpack_snorm_4x8:
980 case nir_op_unpack_unorm_2x16:
981 case nir_op_unpack_unorm_4x8:
982 case nir_op_unpack_half_2x16:
983 case nir_op_pack_half_2x16:
984 unreachable("not reached: should be handled by lower_packing_builtins");
986 case nir_op_unpack_half_2x16_split_x:
987 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_X, result, op[0]);
988 inst->saturate = instr->dest.saturate;
990 case nir_op_unpack_half_2x16_split_y:
991 inst = bld.emit(FS_OPCODE_UNPACK_HALF_2x16_SPLIT_Y, result, op[0]);
992 inst->saturate = instr->dest.saturate;
996 inst = bld.emit(SHADER_OPCODE_POW, result, op[0], op[1]);
997 inst->saturate = instr->dest.saturate;
1000 case nir_op_bitfield_reverse:
1001 bld.BFREV(result, op[0]);
1004 case nir_op_bit_count:
1005 bld.CBIT(result, op[0]);
1008 case nir_op_ufind_msb:
1009 case nir_op_ifind_msb: {
1010 bld.FBH(retype(result, BRW_REGISTER_TYPE_UD), op[0]);
1012 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1013 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1014 * subtract the result from 31 to convert the MSB count into an LSB count.
1016 bld.CMP(bld.null_reg_d(), result, brw_imm_d(-1), BRW_CONDITIONAL_NZ);
1018 inst = bld.ADD(result, result, brw_imm_d(31));
1019 inst->predicate = BRW_PREDICATE_NORMAL;
1020 inst->src[0].negate = true;
1024 case nir_op_find_lsb:
1025 bld.FBL(result, op[0]);
1028 case nir_op_ubitfield_extract:
1029 case nir_op_ibitfield_extract:
1030 bld.BFE(result, op[2], op[1], op[0]);
1033 bld.BFI1(result, op[0], op[1]);
1036 bld.BFI2(result, op[0], op[1], op[2]);
1039 case nir_op_bitfield_insert:
1040 unreachable("not reached: should be handled by "
1041 "lower_instructions::bitfield_insert_to_bfm_bfi");
1044 bld.SHL(result, op[0], op[1]);
1047 bld.ASR(result, op[0], op[1]);
1050 bld.SHR(result, op[0], op[1]);
1053 case nir_op_pack_half_2x16_split:
1054 bld.emit(FS_OPCODE_PACK_HALF_2x16_SPLIT, result, op[0], op[1]);
1058 inst = bld.MAD(result, op[2], op[1], op[0]);
1059 inst->saturate = instr->dest.saturate;
1063 inst = bld.LRP(result, op[0], op[1], op[2]);
1064 inst->saturate = instr->dest.saturate;
1068 if (optimize_frontfacing_ternary(instr, result))
1071 bld.CMP(bld.null_reg_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ);
1072 inst = bld.SEL(result, op[1], op[2]);
1073 inst->predicate = BRW_PREDICATE_NORMAL;
1077 unreachable("unhandled instruction");
1080 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1081 * to sign extend the low bit to 0/~0
1083 if (devinfo->gen <= 5 &&
1084 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) == BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1085 fs_reg masked = vgrf(glsl_type::int_type);
1086 bld.AND(masked, result, brw_imm_d(1));
1087 masked.negate = true;
1088 bld.MOV(retype(result, BRW_REGISTER_TYPE_D), masked);
1093 fs_visitor::nir_emit_load_const(const fs_builder &bld,
1094 nir_load_const_instr *instr)
1096 fs_reg reg = bld.vgrf(BRW_REGISTER_TYPE_D, instr->def.num_components);
1098 for (unsigned i = 0; i < instr->def.num_components; i++)
1099 bld.MOV(offset(reg, bld, i), brw_imm_d(instr->value.i[i]));
1101 nir_ssa_values[instr->def.index] = reg;
1105 fs_visitor::nir_emit_undef(const fs_builder &bld, nir_ssa_undef_instr *instr)
1107 nir_ssa_values[instr->def.index] = bld.vgrf(BRW_REGISTER_TYPE_D,
1108 instr->def.num_components);
1112 fs_visitor::get_nir_src(nir_src src)
1116 reg = nir_ssa_values[src.ssa->index];
1118 /* We don't handle indirects on locals */
1119 assert(src.reg.indirect == NULL);
1120 reg = offset(nir_locals[src.reg.reg->index], bld,
1121 src.reg.base_offset * src.reg.reg->num_components);
1124 /* to avoid floating-point denorm flushing problems, set the type by
1125 * default to D - instructions that need floating point semantics will set
1126 * this to F if they need to
1128 return retype(reg, BRW_REGISTER_TYPE_D);
1132 fs_visitor::get_nir_dest(nir_dest dest)
1135 nir_ssa_values[dest.ssa.index] = bld.vgrf(BRW_REGISTER_TYPE_F,
1136 dest.ssa.num_components);
1137 return nir_ssa_values[dest.ssa.index];
1139 /* We don't handle indirects on locals */
1140 assert(dest.reg.indirect == NULL);
1141 return offset(nir_locals[dest.reg.reg->index], bld,
1142 dest.reg.base_offset * dest.reg.reg->num_components);
1147 fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
1149 fs_reg image(UNIFORM, deref->var->data.driver_location / 4,
1150 BRW_REGISTER_TYPE_UD);
1152 for (const nir_deref *tail = &deref->deref; tail->child;
1153 tail = tail->child) {
1154 const nir_deref_array *deref_array = nir_deref_as_array(tail->child);
1155 assert(tail->child->deref_type == nir_deref_type_array);
1156 const unsigned size = glsl_get_length(tail->type);
1157 const unsigned element_size = type_size_scalar(deref_array->deref.type);
1158 const unsigned base = MIN2(deref_array->base_offset, size - 1);
1159 image = offset(image, bld, base * element_size);
1161 if (deref_array->deref_array_type == nir_deref_array_type_indirect) {
1162 fs_reg tmp = vgrf(glsl_type::int_type);
1164 if (devinfo->gen == 7 && !devinfo->is_haswell) {
1165 /* IVB hangs when trying to access an invalid surface index with
1166 * the dataport. According to the spec "if the index used to
1167 * select an individual element is negative or greater than or
1168 * equal to the size of the array, the results of the operation
1169 * are undefined but may not lead to termination" -- which is one
1170 * of the possible outcomes of the hang. Clamp the index to
1171 * prevent access outside of the array bounds.
1173 bld.emit_minmax(tmp, retype(get_nir_src(deref_array->indirect),
1174 BRW_REGISTER_TYPE_UD),
1175 brw_imm_ud(size - base - 1), BRW_CONDITIONAL_L);
1177 bld.MOV(tmp, get_nir_src(deref_array->indirect));
1180 bld.MUL(tmp, tmp, brw_imm_ud(element_size * 4));
1182 bld.ADD(*image.reladdr, *image.reladdr, tmp);
1184 image.reladdr = new(mem_ctx) fs_reg(tmp);
1192 fs_visitor::emit_percomp(const fs_builder &bld, const fs_inst &inst,
1195 for (unsigned i = 0; i < 4; i++) {
1196 if (!((wr_mask >> i) & 1))
1199 fs_inst *new_inst = new(mem_ctx) fs_inst(inst);
1200 new_inst->dst = offset(new_inst->dst, bld, i);
1201 for (unsigned j = 0; j < new_inst->sources; j++)
1202 if (new_inst->src[j].file == VGRF)
1203 new_inst->src[j] = offset(new_inst->src[j], bld, i);
1210 * Get the matching channel register datatype for an image intrinsic of the
1211 * specified GLSL image type.
1214 get_image_base_type(const glsl_type *type)
1216 switch ((glsl_base_type)type->sampler_type) {
1217 case GLSL_TYPE_UINT:
1218 return BRW_REGISTER_TYPE_UD;
1220 return BRW_REGISTER_TYPE_D;
1221 case GLSL_TYPE_FLOAT:
1222 return BRW_REGISTER_TYPE_F;
1224 unreachable("Not reached.");
1229 * Get the appropriate atomic op for an image atomic intrinsic.
1232 get_image_atomic_op(nir_intrinsic_op op, const glsl_type *type)
1235 case nir_intrinsic_image_atomic_add:
1237 case nir_intrinsic_image_atomic_min:
1238 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1239 BRW_AOP_IMIN : BRW_AOP_UMIN);
1240 case nir_intrinsic_image_atomic_max:
1241 return (get_image_base_type(type) == BRW_REGISTER_TYPE_D ?
1242 BRW_AOP_IMAX : BRW_AOP_UMAX);
1243 case nir_intrinsic_image_atomic_and:
1245 case nir_intrinsic_image_atomic_or:
1247 case nir_intrinsic_image_atomic_xor:
1249 case nir_intrinsic_image_atomic_exchange:
1251 case nir_intrinsic_image_atomic_comp_swap:
1252 return BRW_AOP_CMPWR;
1254 unreachable("Not reachable.");
1259 emit_pixel_interpolater_send(const fs_builder &bld,
1264 glsl_interp_qualifier interpolation)
1270 if (src.file == BAD_FILE) {
1272 payload = bld.vgrf(BRW_REGISTER_TYPE_F, 1);
1276 mlen = 2 * bld.dispatch_width() / 8;
1279 inst = bld.emit(opcode, dst, payload, desc);
1281 /* 2 floats per slot returned */
1282 inst->regs_written = 2 * bld.dispatch_width() / 8;
1283 inst->pi_noperspective = interpolation == INTERP_QUALIFIER_NOPERSPECTIVE;
1289 * Computes 1 << x, given a D/UD register containing some value x.
1292 intexp2(const fs_builder &bld, const fs_reg &x)
1294 assert(x.type == BRW_REGISTER_TYPE_UD || x.type == BRW_REGISTER_TYPE_D);
1296 fs_reg result = bld.vgrf(x.type, 1);
1297 fs_reg one = bld.vgrf(x.type, 1);
1299 bld.MOV(one, retype(brw_imm_d(1), one.type));
1300 bld.SHL(result, one, x);
1305 fs_visitor::emit_gs_end_primitive(const nir_src &vertex_count_nir_src)
1307 assert(stage == MESA_SHADER_GEOMETRY);
1309 struct brw_gs_prog_data *gs_prog_data =
1310 (struct brw_gs_prog_data *) prog_data;
1312 /* We can only do EndPrimitive() functionality when the control data
1313 * consists of cut bits. Fortunately, the only time it isn't is when the
1314 * output type is points, in which case EndPrimitive() is a no-op.
1316 if (gs_prog_data->control_data_format !=
1317 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_CUT) {
1321 /* Cut bits use one bit per vertex. */
1322 assert(gs_compile->control_data_bits_per_vertex == 1);
1324 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1325 vertex_count.type = BRW_REGISTER_TYPE_UD;
1327 /* Cut bit n should be set to 1 if EndPrimitive() was called after emitting
1328 * vertex n, 0 otherwise. So all we need to do here is mark bit
1329 * (vertex_count - 1) % 32 in the cut_bits register to indicate that
1330 * EndPrimitive() was called after emitting vertex (vertex_count - 1);
1331 * vec4_gs_visitor::emit_control_data_bits() will take care of the rest.
1333 * Note that if EndPrimitive() is called before emitting any vertices, this
1334 * will cause us to set bit 31 of the control_data_bits register to 1.
1335 * That's fine because:
1337 * - If max_vertices < 32, then vertex number 31 (zero-based) will never be
1338 * output, so the hardware will ignore cut bit 31.
1340 * - If max_vertices == 32, then vertex number 31 is guaranteed to be the
1341 * last vertex, so setting cut bit 31 has no effect (since the primitive
1342 * is automatically ended when the GS terminates).
1344 * - If max_vertices > 32, then the ir_emit_vertex visitor will reset the
1345 * control_data_bits register to 0 when the first vertex is emitted.
1348 const fs_builder abld = bld.annotate("end primitive");
1350 /* control_data_bits |= 1 << ((vertex_count - 1) % 32) */
1351 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1352 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
1353 fs_reg mask = intexp2(abld, prev_count);
1354 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1355 * attention to the lower 5 bits of its second source argument, so on this
1356 * architecture, 1 << (vertex_count - 1) is equivalent to 1 <<
1357 * ((vertex_count - 1) % 32).
1359 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1363 fs_visitor::emit_gs_control_data_bits(const fs_reg &vertex_count)
1365 assert(stage == MESA_SHADER_GEOMETRY);
1366 assert(gs_compile->control_data_bits_per_vertex != 0);
1368 struct brw_gs_prog_data *gs_prog_data =
1369 (struct brw_gs_prog_data *) prog_data;
1371 const fs_builder abld = bld.annotate("emit control data bits");
1372 const fs_builder fwa_bld = bld.exec_all();
1374 /* We use a single UD register to accumulate control data bits (32 bits
1375 * for each of the SIMD8 channels). So we need to write a DWord (32 bits)
1378 * Unfortunately, the URB_WRITE_SIMD8 message uses 128-bit (OWord) offsets.
1379 * We have select a 128-bit group via the Global and Per-Slot Offsets, then
1380 * use the Channel Mask phase to enable/disable which DWord within that
1381 * group to write. (Remember, different SIMD8 channels may have emitted
1382 * different numbers of vertices, so we may need per-slot offsets.)
1384 * Channel masking presents an annoying problem: we may have to replicate
1385 * the data up to 4 times:
1387 * Msg = Handles, Per-Slot Offsets, Channel Masks, Data, Data, Data, Data.
1389 * To avoid penalizing shaders that emit a small number of vertices, we
1390 * can avoid these sometimes: if the size of the control data header is
1391 * <= 128 bits, then there is only 1 OWord. All SIMD8 channels will land
1392 * land in the same 128-bit group, so we can skip per-slot offsets.
1394 * Similarly, if the control data header is <= 32 bits, there is only one
1395 * DWord, so we can skip channel masks.
1397 enum opcode opcode = SHADER_OPCODE_URB_WRITE_SIMD8;
1399 fs_reg channel_mask, per_slot_offset;
1401 if (gs_compile->control_data_header_size_bits > 32) {
1402 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED;
1403 channel_mask = vgrf(glsl_type::uint_type);
1406 if (gs_compile->control_data_header_size_bits > 128) {
1407 opcode = SHADER_OPCODE_URB_WRITE_SIMD8_MASKED_PER_SLOT;
1408 per_slot_offset = vgrf(glsl_type::uint_type);
1411 /* Figure out which DWord we're trying to write to using the formula:
1413 * dword_index = (vertex_count - 1) * bits_per_vertex / 32
1415 * Since bits_per_vertex is a power of two, and is known at compile
1416 * time, this can be optimized to:
1418 * dword_index = (vertex_count - 1) >> (6 - log2(bits_per_vertex))
1420 if (opcode != SHADER_OPCODE_URB_WRITE_SIMD8) {
1421 fs_reg dword_index = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1422 fs_reg prev_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1423 abld.ADD(prev_count, vertex_count, brw_imm_ud(0xffffffffu));
1424 unsigned log2_bits_per_vertex =
1425 _mesa_fls(gs_compile->control_data_bits_per_vertex);
1426 abld.SHR(dword_index, prev_count, brw_imm_ud(6u - log2_bits_per_vertex));
1428 if (per_slot_offset.file != BAD_FILE) {
1429 /* Set the per-slot offset to dword_index / 4, so that we'll write to
1430 * the appropriate OWord within the control data header.
1432 abld.SHR(per_slot_offset, dword_index, brw_imm_ud(2u));
1435 /* Set the channel masks to 1 << (dword_index % 4), so that we'll
1436 * write to the appropriate DWORD within the OWORD.
1438 fs_reg channel = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1439 fwa_bld.AND(channel, dword_index, brw_imm_ud(3u));
1440 channel_mask = intexp2(fwa_bld, channel);
1441 /* Then the channel masks need to be in bits 23:16. */
1442 fwa_bld.SHL(channel_mask, channel_mask, brw_imm_ud(16u));
1445 /* Store the control data bits in the message payload and send it. */
1447 if (channel_mask.file != BAD_FILE)
1448 mlen += 4; /* channel masks, plus 3 extra copies of the data */
1449 if (per_slot_offset.file != BAD_FILE)
1452 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, mlen);
1453 fs_reg *sources = ralloc_array(mem_ctx, fs_reg, mlen);
1455 sources[i++] = fs_reg(retype(brw_vec8_grf(1, 0), BRW_REGISTER_TYPE_UD));
1456 if (per_slot_offset.file != BAD_FILE)
1457 sources[i++] = per_slot_offset;
1458 if (channel_mask.file != BAD_FILE)
1459 sources[i++] = channel_mask;
1461 sources[i++] = this->control_data_bits;
1464 abld.LOAD_PAYLOAD(payload, sources, mlen, mlen);
1465 fs_inst *inst = abld.emit(opcode, reg_undef, payload);
1467 /* We need to increment Global Offset by 256-bits to make room for
1468 * Broadwell's extra "Vertex Count" payload at the beginning of the
1469 * URB entry. Since this is an OWord message, Global Offset is counted
1470 * in 128-bit units, so we must set it to 2.
1472 if (gs_prog_data->static_vertex_count == -1)
1477 fs_visitor::set_gs_stream_control_data_bits(const fs_reg &vertex_count,
1480 /* control_data_bits |= stream_id << ((2 * (vertex_count - 1)) % 32) */
1482 /* Note: we are calling this *before* increasing vertex_count, so
1483 * this->vertex_count == vertex_count - 1 in the formula above.
1486 /* Stream mode uses 2 bits per vertex */
1487 assert(gs_compile->control_data_bits_per_vertex == 2);
1489 /* Must be a valid stream */
1490 assert(stream_id >= 0 && stream_id < MAX_VERTEX_STREAMS);
1492 /* Control data bits are initialized to 0 so we don't have to set any
1493 * bits when sending vertices to stream 0.
1498 const fs_builder abld = bld.annotate("set stream control data bits", NULL);
1500 /* reg::sid = stream_id */
1501 fs_reg sid = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1502 abld.MOV(sid, brw_imm_ud(stream_id));
1504 /* reg:shift_count = 2 * (vertex_count - 1) */
1505 fs_reg shift_count = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1506 abld.SHL(shift_count, vertex_count, brw_imm_ud(1u));
1508 /* Note: we're relying on the fact that the GEN SHL instruction only pays
1509 * attention to the lower 5 bits of its second source argument, so on this
1510 * architecture, stream_id << 2 * (vertex_count - 1) is equivalent to
1511 * stream_id << ((2 * (vertex_count - 1)) % 32).
1513 fs_reg mask = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1514 abld.SHL(mask, sid, shift_count);
1515 abld.OR(this->control_data_bits, this->control_data_bits, mask);
1519 fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
1522 assert(stage == MESA_SHADER_GEOMETRY);
1524 struct brw_gs_prog_data *gs_prog_data =
1525 (struct brw_gs_prog_data *) prog_data;
1527 fs_reg vertex_count = get_nir_src(vertex_count_nir_src);
1528 vertex_count.type = BRW_REGISTER_TYPE_UD;
1530 /* Haswell and later hardware ignores the "Render Stream Select" bits
1531 * from the 3DSTATE_STREAMOUT packet when the SOL stage is disabled,
1532 * and instead sends all primitives down the pipeline for rasterization.
1533 * If the SOL stage is enabled, "Render Stream Select" is honored and
1534 * primitives bound to non-zero streams are discarded after stream output.
1536 * Since the only purpose of primives sent to non-zero streams is to
1537 * be recorded by transform feedback, we can simply discard all geometry
1538 * bound to these streams when transform feedback is disabled.
1540 if (stream_id > 0 && !nir->info.has_transform_feedback_varyings)
1543 /* If we're outputting 32 control data bits or less, then we can wait
1544 * until the shader is over to output them all. Otherwise we need to
1545 * output them as we go. Now is the time to do it, since we're about to
1546 * output the vertex_count'th vertex, so it's guaranteed that the
1547 * control data bits associated with the (vertex_count - 1)th vertex are
1550 if (gs_compile->control_data_header_size_bits > 32) {
1551 const fs_builder abld =
1552 bld.annotate("emit vertex: emit control data bits");
1554 /* Only emit control data bits if we've finished accumulating a batch
1555 * of 32 bits. This is the case when:
1557 * (vertex_count * bits_per_vertex) % 32 == 0
1559 * (in other words, when the last 5 bits of vertex_count *
1560 * bits_per_vertex are 0). Assuming bits_per_vertex == 2^n for some
1561 * integer n (which is always the case, since bits_per_vertex is
1562 * always 1 or 2), this is equivalent to requiring that the last 5-n
1563 * bits of vertex_count are 0:
1565 * vertex_count & (2^(5-n) - 1) == 0
1567 * 2^(5-n) == 2^5 / 2^n == 32 / bits_per_vertex, so this is
1570 * vertex_count & (32 / bits_per_vertex - 1) == 0
1572 * TODO: If vertex_count is an immediate, we could do some of this math
1573 * at compile time...
1576 abld.AND(bld.null_reg_d(), vertex_count,
1577 brw_imm_ud(32u / gs_compile->control_data_bits_per_vertex - 1u));
1578 inst->conditional_mod = BRW_CONDITIONAL_Z;
1580 abld.IF(BRW_PREDICATE_NORMAL);
1581 /* If vertex_count is 0, then no control data bits have been
1582 * accumulated yet, so we can skip emitting them.
1584 abld.CMP(bld.null_reg_d(), vertex_count, brw_imm_ud(0u),
1585 BRW_CONDITIONAL_NEQ);
1586 abld.IF(BRW_PREDICATE_NORMAL);
1587 emit_gs_control_data_bits(vertex_count);
1588 abld.emit(BRW_OPCODE_ENDIF);
1590 /* Reset control_data_bits to 0 so we can start accumulating a new
1593 * Note: in the case where vertex_count == 0, this neutralizes the
1594 * effect of any call to EndPrimitive() that the shader may have
1595 * made before outputting its first vertex.
1597 inst = abld.MOV(this->control_data_bits, brw_imm_ud(0u));
1598 inst->force_writemask_all = true;
1599 abld.emit(BRW_OPCODE_ENDIF);
1602 emit_urb_writes(vertex_count);
1604 /* In stream mode we have to set control data bits for all vertices
1605 * unless we have disabled control data bits completely (which we do
1606 * do for GL_POINTS outputs that don't use streams).
1608 if (gs_compile->control_data_header_size_bits > 0 &&
1609 gs_prog_data->control_data_format ==
1610 GEN7_GS_CONTROL_DATA_FORMAT_GSCTL_SID) {
1611 set_gs_stream_control_data_bits(vertex_count, stream_id);
1616 fs_visitor::emit_gs_input_load(const fs_reg &dst,
1617 const nir_src &vertex_src,
1618 unsigned base_offset,
1619 const nir_src &offset_src,
1620 unsigned num_components)
1622 struct brw_gs_prog_data *gs_prog_data = (struct brw_gs_prog_data *) prog_data;
1624 nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
1625 nir_const_value *offset_const = nir_src_as_const_value(offset_src);
1626 const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
1628 /* Offset 0 is the VUE header, which contains VARYING_SLOT_LAYER [.y],
1629 * VARYING_SLOT_VIEWPORT [.z], and VARYING_SLOT_PSIZ [.w]. Only
1630 * gl_PointSize is available as a GS input, however, so it must be that.
1632 const bool is_point_size = (base_offset == 0);
1634 if (offset_const != NULL && vertex_const != NULL &&
1635 4 * (base_offset + offset_const->u[0]) < push_reg_count) {
1636 int imm_offset = (base_offset + offset_const->u[0]) * 4 +
1637 vertex_const->u[0] * push_reg_count;
1638 /* This input was pushed into registers. */
1639 if (is_point_size) {
1640 /* gl_PointSize comes in .w */
1641 assert(imm_offset == 0);
1642 bld.MOV(dst, fs_reg(ATTR, imm_offset + 3, dst.type));
1644 for (unsigned i = 0; i < num_components; i++) {
1645 bld.MOV(offset(dst, bld, i),
1646 fs_reg(ATTR, imm_offset + i, dst.type));
1650 /* Resort to the pull model. Ensure the VUE handles are provided. */
1651 gs_prog_data->base.include_vue_handles = true;
1653 unsigned first_icp_handle = gs_prog_data->include_primitive_id ? 3 : 2;
1657 /* The vertex index is constant; just select the proper URB handle. */
1659 retype(brw_vec8_grf(first_icp_handle + vertex_const->i[0], 0),
1660 BRW_REGISTER_TYPE_UD);
1662 /* The vertex index is non-constant. We need to use indirect
1663 * addressing to fetch the proper URB handle.
1665 * First, we start with the sequence <7, 6, 5, 4, 3, 2, 1, 0>
1666 * indicating that channel <n> should read the handle from
1667 * DWord <n>. We convert that to bytes by multiplying by 4.
1669 * Next, we convert the vertex index to bytes by multiplying
1670 * by 32 (shifting by 5), and add the two together. This is
1671 * the final indirect byte offset.
1673 fs_reg sequence = bld.vgrf(BRW_REGISTER_TYPE_W, 1);
1674 fs_reg channel_offsets = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1675 fs_reg vertex_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1676 fs_reg icp_offset_bytes = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1677 icp_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1679 /* sequence = <7, 6, 5, 4, 3, 2, 1, 0> */
1680 bld.MOV(sequence, fs_reg(brw_imm_v(0x76543210)));
1681 /* channel_offsets = 4 * sequence = <28, 24, 20, 16, 12, 8, 4, 0> */
1682 bld.SHL(channel_offsets, sequence, brw_imm_ud(2u));
1683 /* Convert vertex_index to bytes (multiply by 32) */
1684 bld.SHL(vertex_offset_bytes,
1685 retype(get_nir_src(vertex_src), BRW_REGISTER_TYPE_UD),
1687 bld.ADD(icp_offset_bytes, vertex_offset_bytes, channel_offsets);
1689 /* Use first_icp_handle as the base offset. There is one register
1690 * of URB handles per vertex, so inform the register allocator that
1691 * we might read up to nir->info.gs.vertices_in registers.
1693 bld.emit(SHADER_OPCODE_MOV_INDIRECT, icp_handle,
1694 fs_reg(brw_vec8_grf(first_icp_handle, 0)),
1695 fs_reg(icp_offset_bytes),
1696 brw_imm_ud(nir->info.gs.vertices_in * REG_SIZE));
1701 /* Constant indexing - use global offset. */
1702 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
1703 inst->offset = base_offset + offset_const->u[0];
1704 inst->base_mrf = -1;
1706 inst->regs_written = num_components;
1708 /* Indirect indexing - use per-slot offsets as well. */
1709 const fs_reg srcs[] = { icp_handle, get_nir_src(offset_src) };
1710 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
1711 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
1713 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
1714 inst->offset = base_offset;
1715 inst->base_mrf = -1;
1717 inst->regs_written = num_components;
1720 if (is_point_size) {
1721 /* Read the whole VUE header (because of alignment) and read .w. */
1722 fs_reg tmp = bld.vgrf(dst.type, 4);
1724 inst->regs_written = 4;
1725 bld.MOV(dst, offset(tmp, bld, 3));
1731 fs_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
1733 nir_src *offset_src = nir_get_io_offset_src(instr);
1734 nir_const_value *const_value = nir_src_as_const_value(*offset_src);
1737 /* The only constant offset we should find is 0. brw_nir.c's
1738 * add_const_offset_to_base() will fold other constant offsets
1739 * into instr->const_index[0].
1741 assert(const_value->u[0] == 0);
1745 return get_nir_src(*offset_src);
1749 fs_visitor::nir_emit_vs_intrinsic(const fs_builder &bld,
1750 nir_intrinsic_instr *instr)
1752 assert(stage == MESA_SHADER_VERTEX);
1755 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1756 dest = get_nir_dest(instr->dest);
1758 switch (instr->intrinsic) {
1759 case nir_intrinsic_load_vertex_id:
1760 unreachable("should be lowered by lower_vertex_id()");
1762 case nir_intrinsic_load_vertex_id_zero_base:
1763 case nir_intrinsic_load_base_vertex:
1764 case nir_intrinsic_load_instance_id:
1765 case nir_intrinsic_load_base_instance:
1766 case nir_intrinsic_load_draw_id: {
1767 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1768 fs_reg val = nir_system_values[sv];
1769 assert(val.file != BAD_FILE);
1770 dest.type = val.type;
1776 nir_emit_intrinsic(bld, instr);
1782 fs_visitor::nir_emit_tes_intrinsic(const fs_builder &bld,
1783 nir_intrinsic_instr *instr)
1785 assert(stage == MESA_SHADER_TESS_EVAL);
1786 struct brw_tes_prog_data *tes_prog_data = (struct brw_tes_prog_data *) prog_data;
1789 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1790 dest = get_nir_dest(instr->dest);
1792 switch (instr->intrinsic) {
1793 case nir_intrinsic_load_primitive_id:
1794 bld.MOV(dest, fs_reg(brw_vec1_grf(0, 1)));
1796 case nir_intrinsic_load_tess_coord:
1797 /* gl_TessCoord is part of the payload in g1-3 */
1798 for (unsigned i = 0; i < 3; i++) {
1799 bld.MOV(offset(dest, bld, i), fs_reg(brw_vec8_grf(1 + i, 0)));
1803 case nir_intrinsic_load_tess_level_outer:
1804 /* When the TES reads gl_TessLevelOuter, we ensure that the patch header
1805 * appears as a push-model input. So, we can simply use the ATTR file
1806 * rather than issuing URB read messages. The data is stored in the
1807 * high DWords in reverse order - DWord 7 contains .x, DWord 6 contains
1810 switch (tes_prog_data->domain) {
1811 case BRW_TESS_DOMAIN_QUAD:
1812 for (unsigned i = 0; i < 4; i++)
1813 bld.MOV(offset(dest, bld, i), component(fs_reg(ATTR, 0), 7 - i));
1815 case BRW_TESS_DOMAIN_TRI:
1816 for (unsigned i = 0; i < 3; i++)
1817 bld.MOV(offset(dest, bld, i), component(fs_reg(ATTR, 0), 7 - i));
1819 case BRW_TESS_DOMAIN_ISOLINE:
1820 for (unsigned i = 0; i < 2; i++)
1821 bld.MOV(offset(dest, bld, i), component(fs_reg(ATTR, 0), 7 - i));
1826 case nir_intrinsic_load_tess_level_inner:
1827 /* When the TES reads gl_TessLevelInner, we ensure that the patch header
1828 * appears as a push-model input. So, we can simply use the ATTR file
1829 * rather than issuing URB read messages.
1831 switch (tes_prog_data->domain) {
1832 case BRW_TESS_DOMAIN_QUAD:
1833 bld.MOV(dest, component(fs_reg(ATTR, 0), 3));
1834 bld.MOV(offset(dest, bld, 1), component(fs_reg(ATTR, 0), 2));
1836 case BRW_TESS_DOMAIN_TRI:
1837 bld.MOV(dest, component(fs_reg(ATTR, 0), 4));
1839 case BRW_TESS_DOMAIN_ISOLINE:
1840 /* ignore - value is undefined */
1845 case nir_intrinsic_load_input:
1846 case nir_intrinsic_load_per_vertex_input: {
1847 fs_reg indirect_offset = get_indirect_offset(instr);
1848 unsigned imm_offset = instr->const_index[0];
1851 if (indirect_offset.file == BAD_FILE) {
1852 /* Arbitrarily only push up to 32 vec4 slots worth of data,
1853 * which is 16 registers (since each holds 2 vec4 slots).
1855 const unsigned max_push_slots = 32;
1856 if (imm_offset < max_push_slots) {
1857 fs_reg src = fs_reg(ATTR, imm_offset / 2, dest.type);
1858 for (int i = 0; i < instr->num_components; i++) {
1859 bld.MOV(offset(dest, bld, i),
1860 component(src, 4 * (imm_offset % 2) + i));
1862 tes_prog_data->base.urb_read_length =
1863 MAX2(tes_prog_data->base.urb_read_length,
1864 DIV_ROUND_UP(imm_offset + 1, 2));
1866 /* Replicate the patch handle to all enabled channels */
1867 const fs_reg srcs[] = {
1868 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)
1870 fs_reg patch_handle = bld.vgrf(BRW_REGISTER_TYPE_UD, 1);
1871 bld.LOAD_PAYLOAD(patch_handle, srcs, ARRAY_SIZE(srcs), 0);
1873 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dest, patch_handle);
1875 inst->offset = imm_offset;
1876 inst->base_mrf = -1;
1877 inst->regs_written = instr->num_components;
1880 /* Indirect indexing - use per-slot offsets as well. */
1881 const fs_reg srcs[] = {
1882 retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD),
1885 fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
1886 bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
1888 inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dest, payload);
1890 inst->offset = imm_offset;
1891 inst->base_mrf = -1;
1892 inst->regs_written = instr->num_components;
1897 nir_emit_intrinsic(bld, instr);
1903 fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
1904 nir_intrinsic_instr *instr)
1906 assert(stage == MESA_SHADER_GEOMETRY);
1907 fs_reg indirect_offset;
1910 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1911 dest = get_nir_dest(instr->dest);
1913 switch (instr->intrinsic) {
1914 case nir_intrinsic_load_primitive_id:
1915 assert(stage == MESA_SHADER_GEOMETRY);
1916 assert(((struct brw_gs_prog_data *)prog_data)->include_primitive_id);
1917 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD),
1918 retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
1921 case nir_intrinsic_load_input:
1922 unreachable("load_input intrinsics are invalid for the GS stage");
1924 case nir_intrinsic_load_per_vertex_input:
1925 emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
1926 instr->src[1], instr->num_components);
1929 case nir_intrinsic_emit_vertex_with_counter:
1930 emit_gs_vertex(instr->src[0], instr->const_index[0]);
1933 case nir_intrinsic_end_primitive_with_counter:
1934 emit_gs_end_primitive(instr->src[0]);
1937 case nir_intrinsic_set_vertex_count:
1938 bld.MOV(this->final_gs_vertex_count, get_nir_src(instr->src[0]));
1941 case nir_intrinsic_load_invocation_id: {
1942 fs_reg val = nir_system_values[SYSTEM_VALUE_INVOCATION_ID];
1943 assert(val.file != BAD_FILE);
1944 dest.type = val.type;
1950 nir_emit_intrinsic(bld, instr);
1956 fs_visitor::nir_emit_fs_intrinsic(const fs_builder &bld,
1957 nir_intrinsic_instr *instr)
1959 assert(stage == MESA_SHADER_FRAGMENT);
1960 struct brw_wm_prog_data *wm_prog_data =
1961 (struct brw_wm_prog_data *) prog_data;
1964 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
1965 dest = get_nir_dest(instr->dest);
1967 switch (instr->intrinsic) {
1968 case nir_intrinsic_load_front_face:
1969 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D),
1970 *emit_frontfacing_interpolation());
1973 case nir_intrinsic_load_sample_pos: {
1974 fs_reg sample_pos = nir_system_values[SYSTEM_VALUE_SAMPLE_POS];
1975 assert(sample_pos.file != BAD_FILE);
1976 dest.type = sample_pos.type;
1977 bld.MOV(dest, sample_pos);
1978 bld.MOV(offset(dest, bld, 1), offset(sample_pos, bld, 1));
1982 case nir_intrinsic_load_helper_invocation:
1983 case nir_intrinsic_load_sample_mask_in:
1984 case nir_intrinsic_load_sample_id: {
1985 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
1986 fs_reg val = nir_system_values[sv];
1987 assert(val.file != BAD_FILE);
1988 dest.type = val.type;
1993 case nir_intrinsic_discard:
1994 case nir_intrinsic_discard_if: {
1995 /* We track our discarded pixels in f0.1. By predicating on it, we can
1996 * update just the flag bits that aren't yet discarded. If there's no
1997 * condition, we emit a CMP of g0 != g0, so all currently executing
1998 * channels will get turned off.
2001 if (instr->intrinsic == nir_intrinsic_discard_if) {
2002 cmp = bld.CMP(bld.null_reg_f(), get_nir_src(instr->src[0]),
2003 brw_imm_d(0), BRW_CONDITIONAL_Z);
2005 fs_reg some_reg = fs_reg(retype(brw_vec8_grf(0, 0),
2006 BRW_REGISTER_TYPE_UW));
2007 cmp = bld.CMP(bld.null_reg_f(), some_reg, some_reg, BRW_CONDITIONAL_NZ);
2009 cmp->predicate = BRW_PREDICATE_NORMAL;
2010 cmp->flag_subreg = 1;
2012 if (devinfo->gen >= 6) {
2013 emit_discard_jump();
2018 case nir_intrinsic_interp_var_at_centroid:
2019 case nir_intrinsic_interp_var_at_sample:
2020 case nir_intrinsic_interp_var_at_offset: {
2021 /* Handle ARB_gpu_shader5 interpolation intrinsics
2023 * It's worth a quick word of explanation as to why we handle the full
2024 * variable-based interpolation intrinsic rather than a lowered version
2025 * with like we do for other inputs. We have to do that because the way
2026 * we set up inputs doesn't allow us to use the already setup inputs for
2027 * interpolation. At the beginning of the shader, we go through all of
2028 * the input variables and do the initial interpolation and put it in
2029 * the nir_inputs array based on its location as determined in
2030 * nir_lower_io. If the input isn't used, dead code cleans up and
2031 * everything works fine. However, when we get to the ARB_gpu_shader5
2032 * interpolation intrinsics, we need to reinterpolate the input
2033 * differently. If we used an intrinsic that just had an index it would
2034 * only give us the offset into the nir_inputs array. However, this is
2035 * useless because that value is post-interpolation and we need
2036 * pre-interpolation. In order to get the actual location of the bits
2037 * we get from the vertex fetching hardware, we need the variable.
2039 wm_prog_data->pulls_bary = true;
2041 fs_reg dst_xy = bld.vgrf(BRW_REGISTER_TYPE_F, 2);
2042 const glsl_interp_qualifier interpolation =
2043 (glsl_interp_qualifier) instr->variables[0]->var->data.interpolation;
2045 switch (instr->intrinsic) {
2046 case nir_intrinsic_interp_var_at_centroid:
2047 emit_pixel_interpolater_send(bld,
2048 FS_OPCODE_INTERPOLATE_AT_CENTROID,
2055 case nir_intrinsic_interp_var_at_sample: {
2056 nir_const_value *const_sample = nir_src_as_const_value(instr->src[0]);
2059 unsigned msg_data = const_sample->i[0] << 4;
2061 emit_pixel_interpolater_send(bld,
2062 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
2065 brw_imm_ud(msg_data),
2068 const fs_reg sample_src = retype(get_nir_src(instr->src[0]),
2069 BRW_REGISTER_TYPE_UD);
2071 if (nir_src_is_dynamically_uniform(instr->src[0])) {
2072 const fs_reg sample_id = bld.emit_uniformize(sample_src);
2073 const fs_reg msg_data = vgrf(glsl_type::uint_type);
2074 bld.exec_all().group(1, 0)
2075 .SHL(msg_data, sample_id, brw_imm_ud(4u));
2076 emit_pixel_interpolater_send(bld,
2077 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
2083 /* Make a loop that sends a message to the pixel interpolater
2084 * for the sample number in each live channel. If there are
2085 * multiple channels with the same sample number then these
2086 * will be handled simultaneously with a single interation of
2089 bld.emit(BRW_OPCODE_DO);
2091 /* Get the next live sample number into sample_id_reg */
2092 const fs_reg sample_id = bld.emit_uniformize(sample_src);
2094 /* Set the flag register so that we can perform the send
2095 * message on all channels that have the same sample number
2097 bld.CMP(bld.null_reg_ud(),
2098 sample_src, sample_id,
2099 BRW_CONDITIONAL_EQ);
2100 const fs_reg msg_data = vgrf(glsl_type::uint_type);
2101 bld.exec_all().group(1, 0)
2102 .SHL(msg_data, sample_id, brw_imm_ud(4u));
2104 emit_pixel_interpolater_send(bld,
2105 FS_OPCODE_INTERPOLATE_AT_SAMPLE,
2110 set_predicate(BRW_PREDICATE_NORMAL, inst);
2112 /* Continue the loop if there are any live channels left */
2113 set_predicate_inv(BRW_PREDICATE_NORMAL,
2115 bld.emit(BRW_OPCODE_WHILE));
2122 case nir_intrinsic_interp_var_at_offset: {
2123 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
2126 unsigned off_x = MIN2((int)(const_offset->f[0] * 16), 7) & 0xf;
2127 unsigned off_y = MIN2((int)(const_offset->f[1] * 16), 7) & 0xf;
2129 emit_pixel_interpolater_send(bld,
2130 FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET,
2133 brw_imm_ud(off_x | (off_y << 4)),
2136 fs_reg src = vgrf(glsl_type::ivec2_type);
2137 fs_reg offset_src = retype(get_nir_src(instr->src[0]),
2138 BRW_REGISTER_TYPE_F);
2139 for (int i = 0; i < 2; i++) {
2140 fs_reg temp = vgrf(glsl_type::float_type);
2141 bld.MUL(temp, offset(offset_src, bld, i), brw_imm_f(16.0f));
2142 fs_reg itemp = vgrf(glsl_type::int_type);
2143 bld.MOV(itemp, temp); /* float to int */
2145 /* Clamp the upper end of the range to +7/16.
2146 * ARB_gpu_shader5 requires that we support a maximum offset
2147 * of +0.5, which isn't representable in a S0.4 value -- if
2148 * we didn't clamp it, we'd end up with -8/16, which is the
2149 * opposite of what the shader author wanted.
2151 * This is legal due to ARB_gpu_shader5's quantization
2154 * "Not all values of <offset> may be supported; x and y
2155 * offsets may be rounded to fixed-point values with the
2156 * number of fraction bits given by the
2157 * implementation-dependent constant
2158 * FRAGMENT_INTERPOLATION_OFFSET_BITS"
2160 set_condmod(BRW_CONDITIONAL_L,
2161 bld.SEL(offset(src, bld, i), itemp, brw_imm_d(7)));
2164 const enum opcode opcode = FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET;
2165 emit_pixel_interpolater_send(bld,
2176 unreachable("Invalid intrinsic");
2179 for (unsigned j = 0; j < instr->num_components; j++) {
2180 fs_reg src = interp_reg(instr->variables[0]->var->data.location, j);
2181 src.type = dest.type;
2183 bld.emit(FS_OPCODE_LINTERP, dest, dst_xy, src);
2184 dest = offset(dest, bld, 1);
2189 nir_emit_intrinsic(bld, instr);
2195 fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
2196 nir_intrinsic_instr *instr)
2198 assert(stage == MESA_SHADER_COMPUTE);
2199 struct brw_cs_prog_data *cs_prog_data =
2200 (struct brw_cs_prog_data *) prog_data;
2203 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2204 dest = get_nir_dest(instr->dest);
2206 switch (instr->intrinsic) {
2207 case nir_intrinsic_barrier:
2209 cs_prog_data->uses_barrier = true;
2212 case nir_intrinsic_load_local_invocation_id:
2213 case nir_intrinsic_load_work_group_id: {
2214 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
2215 fs_reg val = nir_system_values[sv];
2216 assert(val.file != BAD_FILE);
2217 dest.type = val.type;
2218 for (unsigned i = 0; i < 3; i++)
2219 bld.MOV(offset(dest, bld, i), offset(val, bld, i));
2223 case nir_intrinsic_load_num_work_groups: {
2224 const unsigned surface =
2225 cs_prog_data->binding_table.work_groups_start;
2227 cs_prog_data->uses_num_work_groups = true;
2229 fs_reg surf_index = brw_imm_ud(surface);
2230 brw_mark_surface_used(prog_data, surface);
2232 /* Read the 3 GLuint components of gl_NumWorkGroups */
2233 for (unsigned i = 0; i < 3; i++) {
2234 fs_reg read_result =
2235 emit_untyped_read(bld, surf_index,
2237 1 /* dims */, 1 /* size */,
2238 BRW_PREDICATE_NONE);
2239 read_result.type = dest.type;
2240 bld.MOV(dest, read_result);
2241 dest = offset(dest, bld, 1);
2246 case nir_intrinsic_shared_atomic_add:
2247 nir_emit_shared_atomic(bld, BRW_AOP_ADD, instr);
2249 case nir_intrinsic_shared_atomic_imin:
2250 nir_emit_shared_atomic(bld, BRW_AOP_IMIN, instr);
2252 case nir_intrinsic_shared_atomic_umin:
2253 nir_emit_shared_atomic(bld, BRW_AOP_UMIN, instr);
2255 case nir_intrinsic_shared_atomic_imax:
2256 nir_emit_shared_atomic(bld, BRW_AOP_IMAX, instr);
2258 case nir_intrinsic_shared_atomic_umax:
2259 nir_emit_shared_atomic(bld, BRW_AOP_UMAX, instr);
2261 case nir_intrinsic_shared_atomic_and:
2262 nir_emit_shared_atomic(bld, BRW_AOP_AND, instr);
2264 case nir_intrinsic_shared_atomic_or:
2265 nir_emit_shared_atomic(bld, BRW_AOP_OR, instr);
2267 case nir_intrinsic_shared_atomic_xor:
2268 nir_emit_shared_atomic(bld, BRW_AOP_XOR, instr);
2270 case nir_intrinsic_shared_atomic_exchange:
2271 nir_emit_shared_atomic(bld, BRW_AOP_MOV, instr);
2273 case nir_intrinsic_shared_atomic_comp_swap:
2274 nir_emit_shared_atomic(bld, BRW_AOP_CMPWR, instr);
2278 nir_emit_intrinsic(bld, instr);
2284 fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr)
2287 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2288 dest = get_nir_dest(instr->dest);
2290 switch (instr->intrinsic) {
2291 case nir_intrinsic_atomic_counter_inc:
2292 case nir_intrinsic_atomic_counter_dec:
2293 case nir_intrinsic_atomic_counter_read: {
2294 using namespace surface_access;
2296 /* Get the arguments of the atomic intrinsic. */
2297 const fs_reg offset = get_nir_src(instr->src[0]);
2298 const unsigned surface = (stage_prog_data->binding_table.abo_start +
2299 instr->const_index[0]);
2302 /* Emit a surface read or atomic op. */
2303 switch (instr->intrinsic) {
2304 case nir_intrinsic_atomic_counter_read:
2305 tmp = emit_untyped_read(bld, brw_imm_ud(surface), offset, 1, 1);
2308 case nir_intrinsic_atomic_counter_inc:
2309 tmp = emit_untyped_atomic(bld, brw_imm_ud(surface), offset, fs_reg(),
2310 fs_reg(), 1, 1, BRW_AOP_INC);
2313 case nir_intrinsic_atomic_counter_dec:
2314 tmp = emit_untyped_atomic(bld, brw_imm_ud(surface), offset, fs_reg(),
2315 fs_reg(), 1, 1, BRW_AOP_PREDEC);
2319 unreachable("Unreachable");
2322 /* Assign the result. */
2323 bld.MOV(retype(dest, BRW_REGISTER_TYPE_UD), tmp);
2325 /* Mark the surface as used. */
2326 brw_mark_surface_used(stage_prog_data, surface);
2330 case nir_intrinsic_image_load:
2331 case nir_intrinsic_image_store:
2332 case nir_intrinsic_image_atomic_add:
2333 case nir_intrinsic_image_atomic_min:
2334 case nir_intrinsic_image_atomic_max:
2335 case nir_intrinsic_image_atomic_and:
2336 case nir_intrinsic_image_atomic_or:
2337 case nir_intrinsic_image_atomic_xor:
2338 case nir_intrinsic_image_atomic_exchange:
2339 case nir_intrinsic_image_atomic_comp_swap: {
2340 using namespace image_access;
2342 /* Get the referenced image variable and type. */
2343 const nir_variable *var = instr->variables[0]->var;
2344 const glsl_type *type = var->type->without_array();
2345 const brw_reg_type base_type = get_image_base_type(type);
2347 /* Get some metadata from the image intrinsic. */
2348 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
2349 const unsigned arr_dims = type->sampler_array ? 1 : 0;
2350 const unsigned surf_dims = type->coordinate_components() - arr_dims;
2351 const mesa_format format =
2352 (var->data.image.write_only ? MESA_FORMAT_NONE :
2353 _mesa_get_shader_image_format(var->data.image.format));
2355 /* Get the arguments of the image intrinsic. */
2356 const fs_reg image = get_nir_image_deref(instr->variables[0]);
2357 const fs_reg addr = retype(get_nir_src(instr->src[0]),
2358 BRW_REGISTER_TYPE_UD);
2359 const fs_reg src0 = (info->num_srcs >= 3 ?
2360 retype(get_nir_src(instr->src[2]), base_type) :
2362 const fs_reg src1 = (info->num_srcs >= 4 ?
2363 retype(get_nir_src(instr->src[3]), base_type) :
2367 /* Emit an image load, store or atomic op. */
2368 if (instr->intrinsic == nir_intrinsic_image_load)
2369 tmp = emit_image_load(bld, image, addr, surf_dims, arr_dims, format);
2371 else if (instr->intrinsic == nir_intrinsic_image_store)
2372 emit_image_store(bld, image, addr, src0, surf_dims, arr_dims, format);
2375 tmp = emit_image_atomic(bld, image, addr, src0, src1,
2376 surf_dims, arr_dims, info->dest_components,
2377 get_image_atomic_op(instr->intrinsic, type));
2379 /* Assign the result. */
2380 for (unsigned c = 0; c < info->dest_components; ++c)
2381 bld.MOV(offset(retype(dest, base_type), bld, c),
2382 offset(tmp, bld, c));
2386 case nir_intrinsic_memory_barrier_atomic_counter:
2387 case nir_intrinsic_memory_barrier_buffer:
2388 case nir_intrinsic_memory_barrier_image:
2389 case nir_intrinsic_memory_barrier: {
2390 const fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 16 / dispatch_width);
2391 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
2396 case nir_intrinsic_group_memory_barrier:
2397 case nir_intrinsic_memory_barrier_shared:
2398 /* We treat these workgroup-level barriers as no-ops. This should be
2399 * safe at present and as long as:
2401 * - Memory access instructions are not subsequently reordered by the
2402 * compiler back-end.
2404 * - All threads from a given compute shader workgroup fit within a
2405 * single subslice and therefore talk to the same HDC shared unit
2406 * what supposedly guarantees ordering and coherency between threads
2407 * from the same workgroup. This may change in the future when we
2408 * start splitting workgroups across multiple subslices.
2410 * - The context is not in fault-and-stream mode, which could cause
2411 * memory transactions (including to SLM) prior to the barrier to be
2412 * replayed after the barrier if a pagefault occurs. This shouldn't
2413 * be a problem up to and including SKL because fault-and-stream is
2414 * not usable due to hardware issues, but that's likely to change in
2419 case nir_intrinsic_shader_clock: {
2420 /* We cannot do anything if there is an event, so ignore it for now */
2421 fs_reg shader_clock = get_timestamp(bld);
2422 const fs_reg srcs[] = { shader_clock.set_smear(0), shader_clock.set_smear(1) };
2424 bld.LOAD_PAYLOAD(dest, srcs, ARRAY_SIZE(srcs), 0);
2428 case nir_intrinsic_image_size: {
2429 /* Get the referenced image variable and type. */
2430 const nir_variable *var = instr->variables[0]->var;
2431 const glsl_type *type = var->type->without_array();
2433 /* Get the size of the image. */
2434 const fs_reg image = get_nir_image_deref(instr->variables[0]);
2435 const fs_reg size = offset(image, bld, BRW_IMAGE_PARAM_SIZE_OFFSET);
2437 /* For 1DArray image types, the array index is stored in the Z component.
2438 * Fix this by swizzling the Z component to the Y component.
2440 const bool is_1d_array_image =
2441 type->sampler_dimensionality == GLSL_SAMPLER_DIM_1D &&
2442 type->sampler_array;
2444 /* For CubeArray images, we should count the number of cubes instead
2445 * of the number of faces. Fix it by dividing the (Z component) by 6.
2447 const bool is_cube_array_image =
2448 type->sampler_dimensionality == GLSL_SAMPLER_DIM_CUBE &&
2449 type->sampler_array;
2451 /* Copy all the components. */
2452 const nir_intrinsic_info *info = &nir_intrinsic_infos[instr->intrinsic];
2453 for (unsigned c = 0; c < info->dest_components; ++c) {
2454 if ((int)c >= type->coordinate_components()) {
2455 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2457 } else if (c == 1 && is_1d_array_image) {
2458 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2459 offset(size, bld, 2));
2460 } else if (c == 2 && is_cube_array_image) {
2461 bld.emit(SHADER_OPCODE_INT_QUOTIENT,
2462 offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2463 offset(size, bld, c), brw_imm_d(6));
2465 bld.MOV(offset(retype(dest, BRW_REGISTER_TYPE_D), bld, c),
2466 offset(size, bld, c));
2473 case nir_intrinsic_image_samples:
2474 /* The driver does not support multi-sampled images. */
2475 bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1));
2478 case nir_intrinsic_load_uniform: {
2479 /* Offsets are in bytes but they should always be multiples of 4 */
2480 assert(instr->const_index[0] % 4 == 0);
2482 fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type);
2484 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
2486 /* Offsets are in bytes but they should always be multiples of 4 */
2487 assert(const_offset->u[0] % 4 == 0);
2488 src.reg_offset = const_offset->u[0] / 4;
2490 src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
2493 for (unsigned j = 0; j < instr->num_components; j++) {
2494 bld.MOV(offset(dest, bld, j), offset(src, bld, j));
2499 case nir_intrinsic_load_ubo: {
2500 nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
2504 const unsigned index = stage_prog_data->binding_table.ubo_start +
2506 surf_index = brw_imm_ud(index);
2507 brw_mark_surface_used(prog_data, index);
2509 /* The block index is not a constant. Evaluate the index expression
2510 * per-channel and add the base UBO index; we have to select a value
2511 * from any live channel.
2513 surf_index = vgrf(glsl_type::uint_type);
2514 bld.ADD(surf_index, get_nir_src(instr->src[0]),
2515 brw_imm_ud(stage_prog_data->binding_table.ubo_start));
2516 surf_index = bld.emit_uniformize(surf_index);
2518 /* Assume this may touch any UBO. It would be nice to provide
2519 * a tighter bound, but the array information is already lowered away.
2521 brw_mark_surface_used(prog_data,
2522 stage_prog_data->binding_table.ubo_start +
2523 nir->info.num_ubos - 1);
2526 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
2527 if (const_offset == NULL) {
2528 fs_reg base_offset = retype(get_nir_src(instr->src[1]),
2529 BRW_REGISTER_TYPE_D);
2531 for (int i = 0; i < instr->num_components; i++)
2532 VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
2533 base_offset, i * 4);
2535 fs_reg packed_consts = vgrf(glsl_type::float_type);
2536 packed_consts.type = dest.type;
2538 struct brw_reg const_offset_reg = brw_imm_ud(const_offset->u[0] & ~15);
2539 bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
2540 surf_index, const_offset_reg);
2542 for (unsigned i = 0; i < instr->num_components; i++) {
2543 packed_consts.set_smear(const_offset->u[0] % 16 / 4 + i);
2545 /* The std140 packing rules don't allow vectors to cross 16-byte
2546 * boundaries, and a reg is 32 bytes.
2548 assert(packed_consts.subreg_offset < 32);
2550 bld.MOV(dest, packed_consts);
2551 dest = offset(dest, bld, 1);
2557 case nir_intrinsic_load_ssbo: {
2558 assert(devinfo->gen >= 7);
2560 nir_const_value *const_uniform_block =
2561 nir_src_as_const_value(instr->src[0]);
2564 if (const_uniform_block) {
2565 unsigned index = stage_prog_data->binding_table.ssbo_start +
2566 const_uniform_block->u[0];
2567 surf_index = brw_imm_ud(index);
2568 brw_mark_surface_used(prog_data, index);
2570 surf_index = vgrf(glsl_type::uint_type);
2571 bld.ADD(surf_index, get_nir_src(instr->src[0]),
2572 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
2574 /* Assume this may touch any UBO. It would be nice to provide
2575 * a tighter bound, but the array information is already lowered away.
2577 brw_mark_surface_used(prog_data,
2578 stage_prog_data->binding_table.ssbo_start +
2579 nir->info.num_ssbos - 1);
2583 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
2585 offset_reg = brw_imm_ud(const_offset->u[0]);
2587 offset_reg = get_nir_src(instr->src[1]);
2590 /* Read the vector */
2591 fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
2593 instr->num_components,
2594 BRW_PREDICATE_NONE);
2595 read_result.type = dest.type;
2596 for (int i = 0; i < instr->num_components; i++)
2597 bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
2602 case nir_intrinsic_load_shared: {
2603 assert(devinfo->gen >= 7);
2605 fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
2607 /* Get the offset to read from */
2609 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
2611 offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0]);
2613 offset_reg = vgrf(glsl_type::uint_type);
2615 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
2616 brw_imm_ud(instr->const_index[0]));
2619 /* Read the vector */
2620 fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
2622 instr->num_components,
2623 BRW_PREDICATE_NONE);
2624 read_result.type = dest.type;
2625 for (int i = 0; i < instr->num_components; i++)
2626 bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
2631 case nir_intrinsic_store_shared: {
2632 assert(devinfo->gen >= 7);
2635 fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
2638 fs_reg val_reg = get_nir_src(instr->src[0]);
2641 unsigned writemask = instr->const_index[1];
2643 /* Combine groups of consecutive enabled channels in one write
2644 * message. We use ffs to find the first enabled channel and then ffs on
2645 * the bit-inverse, down-shifted writemask to determine the length of
2646 * the block of enabled bits.
2649 unsigned first_component = ffs(writemask) - 1;
2650 unsigned length = ffs(~(writemask >> first_component)) - 1;
2653 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
2655 offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0] +
2656 4 * first_component);
2658 offset_reg = vgrf(glsl_type::uint_type);
2660 retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD),
2661 brw_imm_ud(instr->const_index[0] + 4 * first_component));
2664 emit_untyped_write(bld, surf_index, offset_reg,
2665 offset(val_reg, bld, first_component),
2666 1 /* dims */, length,
2667 BRW_PREDICATE_NONE);
2669 /* Clear the bits in the writemask that we just wrote, then try
2670 * again to see if more channels are left.
2672 writemask &= (15 << (first_component + length));
2678 case nir_intrinsic_load_input: {
2680 if (stage == MESA_SHADER_VERTEX) {
2681 src = fs_reg(ATTR, instr->const_index[0], dest.type);
2683 src = offset(retype(nir_inputs, dest.type), bld,
2684 instr->const_index[0]);
2687 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
2688 assert(const_offset && "Indirect input loads not allowed");
2689 src = offset(src, bld, const_offset->u[0]);
2691 for (unsigned j = 0; j < instr->num_components; j++) {
2692 bld.MOV(offset(dest, bld, j), offset(src, bld, j));
2697 case nir_intrinsic_store_ssbo: {
2698 assert(devinfo->gen >= 7);
2702 nir_const_value *const_uniform_block =
2703 nir_src_as_const_value(instr->src[1]);
2704 if (const_uniform_block) {
2705 unsigned index = stage_prog_data->binding_table.ssbo_start +
2706 const_uniform_block->u[0];
2707 surf_index = brw_imm_ud(index);
2708 brw_mark_surface_used(prog_data, index);
2710 surf_index = vgrf(glsl_type::uint_type);
2711 bld.ADD(surf_index, get_nir_src(instr->src[1]),
2712 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
2714 brw_mark_surface_used(prog_data,
2715 stage_prog_data->binding_table.ssbo_start +
2716 nir->info.num_ssbos - 1);
2720 fs_reg val_reg = get_nir_src(instr->src[0]);
2723 unsigned writemask = instr->const_index[0];
2725 /* Combine groups of consecutive enabled channels in one write
2726 * message. We use ffs to find the first enabled channel and then ffs on
2727 * the bit-inverse, down-shifted writemask to determine the length of
2728 * the block of enabled bits.
2731 unsigned first_component = ffs(writemask) - 1;
2732 unsigned length = ffs(~(writemask >> first_component)) - 1;
2735 nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
2737 offset_reg = brw_imm_ud(const_offset->u[0] + 4 * first_component);
2739 offset_reg = vgrf(glsl_type::uint_type);
2741 retype(get_nir_src(instr->src[2]), BRW_REGISTER_TYPE_UD),
2742 brw_imm_ud(4 * first_component));
2745 emit_untyped_write(bld, surf_index, offset_reg,
2746 offset(val_reg, bld, first_component),
2747 1 /* dims */, length,
2748 BRW_PREDICATE_NONE);
2750 /* Clear the bits in the writemask that we just wrote, then try
2751 * again to see if more channels are left.
2753 writemask &= (15 << (first_component + length));
2758 case nir_intrinsic_store_output: {
2759 fs_reg src = get_nir_src(instr->src[0]);
2760 fs_reg new_dest = offset(retype(nir_outputs, src.type), bld,
2761 instr->const_index[0]);
2763 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
2764 assert(const_offset && "Indirect output stores not allowed");
2765 new_dest = offset(new_dest, bld, const_offset->u[0]);
2767 for (unsigned j = 0; j < instr->num_components; j++) {
2768 bld.MOV(offset(new_dest, bld, j), offset(src, bld, j));
2773 case nir_intrinsic_ssbo_atomic_add:
2774 nir_emit_ssbo_atomic(bld, BRW_AOP_ADD, instr);
2776 case nir_intrinsic_ssbo_atomic_imin:
2777 nir_emit_ssbo_atomic(bld, BRW_AOP_IMIN, instr);
2779 case nir_intrinsic_ssbo_atomic_umin:
2780 nir_emit_ssbo_atomic(bld, BRW_AOP_UMIN, instr);
2782 case nir_intrinsic_ssbo_atomic_imax:
2783 nir_emit_ssbo_atomic(bld, BRW_AOP_IMAX, instr);
2785 case nir_intrinsic_ssbo_atomic_umax:
2786 nir_emit_ssbo_atomic(bld, BRW_AOP_UMAX, instr);
2788 case nir_intrinsic_ssbo_atomic_and:
2789 nir_emit_ssbo_atomic(bld, BRW_AOP_AND, instr);
2791 case nir_intrinsic_ssbo_atomic_or:
2792 nir_emit_ssbo_atomic(bld, BRW_AOP_OR, instr);
2794 case nir_intrinsic_ssbo_atomic_xor:
2795 nir_emit_ssbo_atomic(bld, BRW_AOP_XOR, instr);
2797 case nir_intrinsic_ssbo_atomic_exchange:
2798 nir_emit_ssbo_atomic(bld, BRW_AOP_MOV, instr);
2800 case nir_intrinsic_ssbo_atomic_comp_swap:
2801 nir_emit_ssbo_atomic(bld, BRW_AOP_CMPWR, instr);
2804 case nir_intrinsic_get_buffer_size: {
2805 nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
2806 unsigned ssbo_index = const_uniform_block ? const_uniform_block->u[0] : 0;
2807 int reg_width = dispatch_width / 8;
2810 fs_reg source = brw_imm_d(0);
2812 int mlen = 1 * reg_width;
2814 /* A resinfo's sampler message is used to get the buffer size.
2815 * The SIMD8's writeback message consists of four registers and
2816 * SIMD16's writeback message consists of 8 destination registers
2817 * (two per each component), although we are only interested on the
2818 * first component, where resinfo returns the buffer size for
2821 int regs_written = 4 * mlen;
2822 fs_reg src_payload = fs_reg(VGRF, alloc.allocate(mlen),
2823 BRW_REGISTER_TYPE_UD);
2824 bld.LOAD_PAYLOAD(src_payload, &source, 1, 0);
2825 fs_reg buffer_size = fs_reg(VGRF, alloc.allocate(regs_written),
2826 BRW_REGISTER_TYPE_UD);
2827 const unsigned index = prog_data->binding_table.ssbo_start + ssbo_index;
2828 fs_inst *inst = bld.emit(FS_OPCODE_GET_BUFFER_SIZE, buffer_size,
2829 src_payload, brw_imm_ud(index));
2830 inst->header_size = 0;
2832 inst->regs_written = regs_written;
2834 bld.MOV(retype(dest, buffer_size.type), buffer_size);
2836 brw_mark_surface_used(prog_data, index);
2841 unreachable("unknown intrinsic");
2846 fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
2847 int op, nir_intrinsic_instr *instr)
2850 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2851 dest = get_nir_dest(instr->dest);
2854 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
2855 if (const_surface) {
2856 unsigned surf_index = stage_prog_data->binding_table.ssbo_start +
2857 const_surface->u[0];
2858 surface = brw_imm_ud(surf_index);
2859 brw_mark_surface_used(prog_data, surf_index);
2861 surface = vgrf(glsl_type::uint_type);
2862 bld.ADD(surface, get_nir_src(instr->src[0]),
2863 brw_imm_ud(stage_prog_data->binding_table.ssbo_start));
2865 /* Assume this may touch any SSBO. This is the same we do for other
2866 * UBO/SSBO accesses with non-constant surface.
2868 brw_mark_surface_used(prog_data,
2869 stage_prog_data->binding_table.ssbo_start +
2870 nir->info.num_ssbos - 1);
2873 fs_reg offset = get_nir_src(instr->src[1]);
2874 fs_reg data1 = get_nir_src(instr->src[2]);
2876 if (op == BRW_AOP_CMPWR)
2877 data2 = get_nir_src(instr->src[3]);
2879 /* Emit the actual atomic operation operation */
2881 fs_reg atomic_result =
2882 surface_access::emit_untyped_atomic(bld, surface, offset,
2884 1 /* dims */, 1 /* rsize */,
2886 BRW_PREDICATE_NONE);
2887 dest.type = atomic_result.type;
2888 bld.MOV(dest, atomic_result);
2892 fs_visitor::nir_emit_shared_atomic(const fs_builder &bld,
2893 int op, nir_intrinsic_instr *instr)
2896 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
2897 dest = get_nir_dest(instr->dest);
2899 fs_reg surface = brw_imm_ud(GEN7_BTI_SLM);
2900 fs_reg offset = get_nir_src(instr->src[0]);
2901 fs_reg data1 = get_nir_src(instr->src[1]);
2903 if (op == BRW_AOP_CMPWR)
2904 data2 = get_nir_src(instr->src[2]);
2906 /* Emit the actual atomic operation operation */
2908 fs_reg atomic_result =
2909 surface_access::emit_untyped_atomic(bld, surface, offset,
2911 1 /* dims */, 1 /* rsize */,
2913 BRW_PREDICATE_NONE);
2914 dest.type = atomic_result.type;
2915 bld.MOV(dest, atomic_result);
2919 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
2921 unsigned texture = instr->texture_index;
2922 unsigned sampler = instr->sampler_index;
2923 fs_reg texture_reg(brw_imm_ud(texture));
2924 fs_reg sampler_reg(brw_imm_ud(sampler));
2926 int gather_component = instr->component;
2928 bool is_cube_array = instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
2931 int lod_components = 0;
2932 int UNUSED offset_components = 0;
2934 fs_reg coordinate, shadow_comparitor, lod, lod2, sample_index, mcs, tex_offset;
2936 /* Our hardware requires a LOD for buffer textures */
2937 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
2940 for (unsigned i = 0; i < instr->num_srcs; i++) {
2941 fs_reg src = get_nir_src(instr->src[i].src);
2942 switch (instr->src[i].src_type) {
2943 case nir_tex_src_bias:
2944 lod = retype(src, BRW_REGISTER_TYPE_F);
2946 case nir_tex_src_comparitor:
2947 shadow_comparitor = retype(src, BRW_REGISTER_TYPE_F);
2949 case nir_tex_src_coord:
2950 switch (instr->op) {
2952 case nir_texop_txf_ms:
2953 case nir_texop_samples_identical:
2954 coordinate = retype(src, BRW_REGISTER_TYPE_D);
2957 coordinate = retype(src, BRW_REGISTER_TYPE_F);
2961 case nir_tex_src_ddx:
2962 lod = retype(src, BRW_REGISTER_TYPE_F);
2963 lod_components = nir_tex_instr_src_size(instr, i);
2965 case nir_tex_src_ddy:
2966 lod2 = retype(src, BRW_REGISTER_TYPE_F);
2968 case nir_tex_src_lod:
2969 switch (instr->op) {
2971 lod = retype(src, BRW_REGISTER_TYPE_UD);
2974 lod = retype(src, BRW_REGISTER_TYPE_D);
2977 lod = retype(src, BRW_REGISTER_TYPE_F);
2981 case nir_tex_src_ms_index:
2982 sample_index = retype(src, BRW_REGISTER_TYPE_UD);
2984 case nir_tex_src_offset:
2985 tex_offset = retype(src, BRW_REGISTER_TYPE_D);
2986 if (instr->is_array)
2987 offset_components = instr->coord_components - 1;
2989 offset_components = instr->coord_components;
2991 case nir_tex_src_projector:
2992 unreachable("should be lowered");
2994 case nir_tex_src_texture_offset: {
2995 /* Figure out the highest possible texture index and mark it as used */
2996 uint32_t max_used = texture + instr->texture_array_size - 1;
2997 if (instr->op == nir_texop_tg4 && devinfo->gen < 8) {
2998 max_used += stage_prog_data->binding_table.gather_texture_start;
3000 max_used += stage_prog_data->binding_table.texture_start;
3002 brw_mark_surface_used(prog_data, max_used);
3004 /* Emit code to evaluate the actual indexing expression */
3005 texture_reg = vgrf(glsl_type::uint_type);
3006 bld.ADD(texture_reg, src, brw_imm_ud(texture));
3007 texture_reg = bld.emit_uniformize(texture_reg);
3011 case nir_tex_src_sampler_offset: {
3012 /* Emit code to evaluate the actual indexing expression */
3013 sampler_reg = vgrf(glsl_type::uint_type);
3014 bld.ADD(sampler_reg, src, brw_imm_ud(sampler));
3015 sampler_reg = bld.emit_uniformize(sampler_reg);
3020 unreachable("unknown texture source");
3024 if (instr->op == nir_texop_txf_ms ||
3025 instr->op == nir_texop_samples_identical) {
3026 if (devinfo->gen >= 7 &&
3027 key_tex->compressed_multisample_layout_mask & (1 << texture)) {
3028 mcs = emit_mcs_fetch(coordinate, instr->coord_components, texture_reg);
3030 mcs = brw_imm_ud(0u);
3034 for (unsigned i = 0; i < 3; i++) {
3035 if (instr->const_offset[i] != 0) {
3036 assert(offset_components == 0);
3037 tex_offset = brw_imm_ud(brw_texture_offset(instr->const_offset, 3));
3042 enum glsl_base_type dest_base_type =
3043 brw_glsl_base_type_for_nir_type (instr->dest_type);
3045 const glsl_type *dest_type =
3046 glsl_type::get_instance(dest_base_type, nir_tex_instr_dest_size(instr),
3049 ir_texture_opcode op;
3050 switch (instr->op) {
3051 case nir_texop_lod: op = ir_lod; break;
3052 case nir_texop_query_levels: op = ir_query_levels; break;
3053 case nir_texop_tex: op = ir_tex; break;
3054 case nir_texop_tg4: op = ir_tg4; break;
3055 case nir_texop_txb: op = ir_txb; break;
3056 case nir_texop_txd: op = ir_txd; break;
3057 case nir_texop_txf: op = ir_txf; break;
3058 case nir_texop_txf_ms: op = ir_txf_ms; break;
3059 case nir_texop_txl: op = ir_txl; break;
3060 case nir_texop_txs: op = ir_txs; break;
3061 case nir_texop_texture_samples: {
3062 fs_reg dst = retype(get_nir_dest(instr->dest), BRW_REGISTER_TYPE_D);
3063 fs_inst *inst = bld.emit(SHADER_OPCODE_SAMPLEINFO, dst,
3064 bld.vgrf(BRW_REGISTER_TYPE_D, 1),
3065 texture_reg, texture_reg);
3067 inst->header_size = 1;
3068 inst->base_mrf = -1;
3071 case nir_texop_samples_identical: op = ir_samples_identical; break;
3073 unreachable("unknown texture opcode");
3076 emit_texture(op, dest_type, coordinate, instr->coord_components,
3077 shadow_comparitor, lod, lod2, lod_components, sample_index,
3078 tex_offset, mcs, gather_component,
3079 is_cube_array, texture, texture_reg, sampler, sampler_reg);
3081 fs_reg dest = get_nir_dest(instr->dest);
3082 dest.type = this->result.type;
3083 unsigned num_components = nir_tex_instr_dest_size(instr);
3084 emit_percomp(bld, fs_inst(BRW_OPCODE_MOV, bld.dispatch_width(),
3085 dest, this->result),
3086 (1 << num_components) - 1);
3090 fs_visitor::nir_emit_jump(const fs_builder &bld, nir_jump_instr *instr)
3092 switch (instr->type) {
3093 case nir_jump_break:
3094 bld.emit(BRW_OPCODE_BREAK);
3096 case nir_jump_continue:
3097 bld.emit(BRW_OPCODE_CONTINUE);
3099 case nir_jump_return:
3101 unreachable("unknown jump");