2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 #include "brw_vec4_builder.h"
27 #include "brw_vec4_surface_builder.h"
28 #include "brw_program.h"
31 using namespace brw::surface_access;
36 vec4_visitor::emit_nir_code()
38 if (nir->num_uniforms > 0)
41 nir_setup_system_values();
43 /* get the main function and emit it */
44 nir_foreach_function(nir, function) {
45 assert(strcmp(function->name, "main") == 0);
46 assert(function->impl);
47 nir_emit_impl(function->impl);
52 vec4_visitor::nir_setup_system_value_intrinsic(nir_intrinsic_instr *instr)
56 switch (instr->intrinsic) {
57 case nir_intrinsic_load_vertex_id:
58 unreachable("should be lowered by lower_vertex_id().");
60 case nir_intrinsic_load_vertex_id_zero_base:
61 reg = &nir_system_values[SYSTEM_VALUE_VERTEX_ID_ZERO_BASE];
62 if (reg->file == BAD_FILE)
63 *reg = *make_reg_for_system_value(SYSTEM_VALUE_VERTEX_ID_ZERO_BASE,
67 case nir_intrinsic_load_base_vertex:
68 reg = &nir_system_values[SYSTEM_VALUE_BASE_VERTEX];
69 if (reg->file == BAD_FILE)
70 *reg = *make_reg_for_system_value(SYSTEM_VALUE_BASE_VERTEX,
74 case nir_intrinsic_load_instance_id:
75 reg = &nir_system_values[SYSTEM_VALUE_INSTANCE_ID];
76 if (reg->file == BAD_FILE)
77 *reg = *make_reg_for_system_value(SYSTEM_VALUE_INSTANCE_ID,
81 case nir_intrinsic_load_base_instance:
82 reg = &nir_system_values[SYSTEM_VALUE_BASE_INSTANCE];
83 if (reg->file == BAD_FILE)
84 *reg = *make_reg_for_system_value(SYSTEM_VALUE_BASE_INSTANCE,
88 case nir_intrinsic_load_draw_id:
89 reg = &nir_system_values[SYSTEM_VALUE_DRAW_ID];
90 if (reg->file == BAD_FILE)
91 *reg = *make_reg_for_system_value(SYSTEM_VALUE_DRAW_ID,
101 setup_system_values_block(nir_block *block, void *void_visitor)
103 vec4_visitor *v = (vec4_visitor *)void_visitor;
105 nir_foreach_instr(block, instr) {
106 if (instr->type != nir_instr_type_intrinsic)
109 nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
110 v->nir_setup_system_value_intrinsic(intrin);
117 vec4_visitor::nir_setup_system_values()
119 nir_system_values = ralloc_array(mem_ctx, dst_reg, SYSTEM_VALUE_MAX);
120 for (unsigned i = 0; i < SYSTEM_VALUE_MAX; i++) {
121 nir_system_values[i] = dst_reg();
124 nir_foreach_function(nir, function) {
125 assert(strcmp(function->name, "main") == 0);
126 assert(function->impl);
127 nir_foreach_block(function->impl, setup_system_values_block, this);
132 vec4_visitor::nir_setup_uniforms()
134 uniforms = nir->num_uniforms / 16;
138 vec4_visitor::nir_emit_impl(nir_function_impl *impl)
140 nir_locals = ralloc_array(mem_ctx, dst_reg, impl->reg_alloc);
141 for (unsigned i = 0; i < impl->reg_alloc; i++) {
142 nir_locals[i] = dst_reg();
145 foreach_list_typed(nir_register, reg, node, &impl->registers) {
146 unsigned array_elems =
147 reg->num_array_elems == 0 ? 1 : reg->num_array_elems;
149 nir_locals[reg->index] = dst_reg(VGRF, alloc.allocate(array_elems));
152 nir_ssa_values = ralloc_array(mem_ctx, dst_reg, impl->ssa_alloc);
154 nir_emit_cf_list(&impl->body);
158 vec4_visitor::nir_emit_cf_list(exec_list *list)
160 exec_list_validate(list);
161 foreach_list_typed(nir_cf_node, node, node, list) {
162 switch (node->type) {
164 nir_emit_if(nir_cf_node_as_if(node));
167 case nir_cf_node_loop:
168 nir_emit_loop(nir_cf_node_as_loop(node));
171 case nir_cf_node_block:
172 nir_emit_block(nir_cf_node_as_block(node));
176 unreachable("Invalid CFG node block");
182 vec4_visitor::nir_emit_if(nir_if *if_stmt)
184 /* First, put the condition in f0 */
185 src_reg condition = get_nir_src(if_stmt->condition, BRW_REGISTER_TYPE_D, 1);
186 vec4_instruction *inst = emit(MOV(dst_null_d(), condition));
187 inst->conditional_mod = BRW_CONDITIONAL_NZ;
189 /* We can just predicate based on the X channel, as the condition only
190 * goes on its own line */
191 emit(IF(BRW_PREDICATE_ALIGN16_REPLICATE_X));
193 nir_emit_cf_list(&if_stmt->then_list);
195 /* note: if the else is empty, dead CF elimination will remove it */
196 emit(BRW_OPCODE_ELSE);
198 nir_emit_cf_list(&if_stmt->else_list);
200 emit(BRW_OPCODE_ENDIF);
204 vec4_visitor::nir_emit_loop(nir_loop *loop)
208 nir_emit_cf_list(&loop->body);
210 emit(BRW_OPCODE_WHILE);
214 vec4_visitor::nir_emit_block(nir_block *block)
216 nir_foreach_instr(block, instr) {
217 nir_emit_instr(instr);
222 vec4_visitor::nir_emit_instr(nir_instr *instr)
226 switch (instr->type) {
227 case nir_instr_type_load_const:
228 nir_emit_load_const(nir_instr_as_load_const(instr));
231 case nir_instr_type_intrinsic:
232 nir_emit_intrinsic(nir_instr_as_intrinsic(instr));
235 case nir_instr_type_alu:
236 nir_emit_alu(nir_instr_as_alu(instr));
239 case nir_instr_type_jump:
240 nir_emit_jump(nir_instr_as_jump(instr));
243 case nir_instr_type_tex:
244 nir_emit_texture(nir_instr_as_tex(instr));
247 case nir_instr_type_ssa_undef:
248 nir_emit_undef(nir_instr_as_ssa_undef(instr));
252 fprintf(stderr, "VS instruction not yet implemented by NIR->vec4\n");
258 dst_reg_for_nir_reg(vec4_visitor *v, nir_register *nir_reg,
259 unsigned base_offset, nir_src *indirect)
263 reg = v->nir_locals[nir_reg->index];
264 reg = offset(reg, base_offset);
267 new(v->mem_ctx) src_reg(v->get_nir_src(*indirect,
275 vec4_visitor::get_nir_dest(nir_dest dest)
278 dst_reg dst = dst_reg(VGRF, alloc.allocate(1));
279 nir_ssa_values[dest.ssa.index] = dst;
282 return dst_reg_for_nir_reg(this, dest.reg.reg, dest.reg.base_offset,
288 vec4_visitor::get_nir_dest(nir_dest dest, enum brw_reg_type type)
290 return retype(get_nir_dest(dest), type);
294 vec4_visitor::get_nir_dest(nir_dest dest, nir_alu_type type)
296 return get_nir_dest(dest, brw_type_for_nir_type(type));
300 vec4_visitor::get_nir_src(nir_src src, enum brw_reg_type type,
301 unsigned num_components)
306 assert(src.ssa != NULL);
307 reg = nir_ssa_values[src.ssa->index];
310 reg = dst_reg_for_nir_reg(this, src.reg.reg, src.reg.base_offset,
314 reg = retype(reg, type);
316 src_reg reg_as_src = src_reg(reg);
317 reg_as_src.swizzle = brw_swizzle_for_size(num_components);
322 vec4_visitor::get_nir_src(nir_src src, nir_alu_type type,
323 unsigned num_components)
325 return get_nir_src(src, brw_type_for_nir_type(type), num_components);
329 vec4_visitor::get_nir_src(nir_src src, unsigned num_components)
331 /* if type is not specified, default to signed int */
332 return get_nir_src(src, nir_type_int, num_components);
336 vec4_visitor::get_indirect_offset(nir_intrinsic_instr *instr)
338 nir_src *offset_src = nir_get_io_offset_src(instr);
339 nir_const_value *const_value = nir_src_as_const_value(*offset_src);
342 /* The only constant offset we should find is 0. brw_nir.c's
343 * add_const_offset_to_base() will fold other constant offsets
344 * into instr->const_index[0].
346 assert(const_value->u[0] == 0);
350 return get_nir_src(*offset_src, BRW_REGISTER_TYPE_UD, 1);
354 vec4_visitor::nir_emit_load_const(nir_load_const_instr *instr)
356 dst_reg reg = dst_reg(VGRF, alloc.allocate(1));
357 reg.type = BRW_REGISTER_TYPE_D;
359 unsigned remaining = brw_writemask_for_size(instr->def.num_components);
361 /* @FIXME: consider emitting vector operations to save some MOVs in
362 * cases where the components are representable in 8 bits.
363 * For now, we emit a MOV for each distinct value.
365 for (unsigned i = 0; i < instr->def.num_components; i++) {
366 unsigned writemask = 1 << i;
368 if ((remaining & writemask) == 0)
371 for (unsigned j = i; j < instr->def.num_components; j++) {
372 if (instr->value.u[i] == instr->value.u[j]) {
377 reg.writemask = writemask;
378 emit(MOV(reg, brw_imm_d(instr->value.i[i])));
380 remaining &= ~writemask;
383 /* Set final writemask */
384 reg.writemask = brw_writemask_for_size(instr->def.num_components);
386 nir_ssa_values[instr->def.index] = reg;
390 vec4_visitor::nir_emit_intrinsic(nir_intrinsic_instr *instr)
395 switch (instr->intrinsic) {
397 case nir_intrinsic_load_input: {
398 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
400 /* We set EmitNoIndirectInput for VS */
401 assert(const_offset);
403 src = src_reg(ATTR, instr->const_index[0] + const_offset->u[0],
404 glsl_type::uvec4_type);
406 dest = get_nir_dest(instr->dest, src.type);
407 dest.writemask = brw_writemask_for_size(instr->num_components);
409 emit(MOV(dest, src));
413 case nir_intrinsic_store_output: {
414 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
415 assert(const_offset);
417 int varying = instr->const_index[0] + const_offset->u[0];
419 src = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_F,
420 instr->num_components);
422 output_reg[varying] = dst_reg(src);
426 case nir_intrinsic_get_buffer_size: {
427 nir_const_value *const_uniform_block = nir_src_as_const_value(instr->src[0]);
428 unsigned ssbo_index = const_uniform_block ? const_uniform_block->u[0] : 0;
430 const unsigned index =
431 prog_data->base.binding_table.ssbo_start + ssbo_index;
432 dst_reg result_dst = get_nir_dest(instr->dest);
433 vec4_instruction *inst = new(mem_ctx)
434 vec4_instruction(VS_OPCODE_GET_BUFFER_SIZE, result_dst);
437 inst->mlen = 1; /* always at least one */
438 inst->src[1] = brw_imm_ud(index);
440 /* MRF for the first parameter */
441 src_reg lod = brw_imm_d(0);
442 int param_base = inst->base_mrf;
443 int writemask = WRITEMASK_X;
444 emit(MOV(dst_reg(MRF, param_base, glsl_type::int_type, writemask), lod));
448 brw_mark_surface_used(&prog_data->base, index);
452 case nir_intrinsic_store_ssbo: {
453 assert(devinfo->gen >= 7);
457 nir_const_value *const_uniform_block =
458 nir_src_as_const_value(instr->src[1]);
459 if (const_uniform_block) {
460 unsigned index = prog_data->base.binding_table.ssbo_start +
461 const_uniform_block->u[0];
462 surf_index = brw_imm_ud(index);
463 brw_mark_surface_used(&prog_data->base, index);
465 surf_index = src_reg(this, glsl_type::uint_type);
466 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[1], 1),
467 brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
468 surf_index = emit_uniformize(surf_index);
470 brw_mark_surface_used(&prog_data->base,
471 prog_data->base.binding_table.ssbo_start +
472 nir->info.num_ssbos - 1);
477 nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
479 offset_reg = brw_imm_ud(const_offset->u[0]);
481 offset_reg = get_nir_src(instr->src[2], 1);
485 src_reg val_reg = get_nir_src(instr->src[0], 4);
488 unsigned write_mask = instr->const_index[0];
490 /* IvyBridge does not have a native SIMD4x2 untyped write message so untyped
491 * writes will use SIMD8 mode. In order to hide this and keep symmetry across
492 * typed and untyped messages and across hardware platforms, the
493 * current implementation of the untyped messages will transparently convert
494 * the SIMD4x2 payload into an equivalent SIMD8 payload by transposing it
495 * and enabling only channel X on the SEND instruction.
497 * The above, works well for full vector writes, but not for partial writes
498 * where we want to write some channels and not others, like when we have
499 * code such as v.xyw = vec3(1,2,4). Because the untyped write messages are
500 * quite restrictive with regards to the channel enables we can configure in
501 * the message descriptor (not all combinations are allowed) we cannot simply
502 * implement these scenarios with a single message while keeping the
503 * aforementioned symmetry in the implementation. For now we de decided that
504 * it is better to keep the symmetry to reduce complexity, so in situations
505 * such as the one described we end up emitting two untyped write messages
506 * (one for xy and another for w).
508 * The code below packs consecutive channels into a single write message,
509 * detects gaps in the vector write and if needed, sends a second message
510 * with the remaining channels. If in the future we decide that we want to
511 * emit a single message at the expense of losing the symmetry in the
512 * implementation we can:
514 * 1) For IvyBridge: Only use the red channel of the untyped write SIMD8
515 * message payload. In this mode we can write up to 8 offsets and dwords
516 * to the red channel only (for the two vec4s in the SIMD4x2 execution)
517 * and select which of the 8 channels carry data to write by setting the
518 * appropriate writemask in the dst register of the SEND instruction.
519 * It would require to write a new generator opcode specifically for
520 * IvyBridge since we would need to prepare a SIMD8 payload that could
521 * use any channel, not just X.
523 * 2) For Haswell+: Simply send a single write message but set the writemask
524 * on the dst of the SEND instruction to select the channels we want to
525 * write. It would require to modify the current messages to receive
526 * and honor the writemask provided.
528 const vec4_builder bld = vec4_builder(this).at_end()
529 .annotate(current_annotation, base_ir);
531 int swizzle[4] = { 0, 0, 0, 0};
532 int num_channels = 0;
533 unsigned skipped_channels = 0;
534 int num_components = instr->num_components;
535 for (int i = 0; i < num_components; i++) {
536 /* Check if this channel needs to be written. If so, record the
537 * channel we need to take the data from in the swizzle array
539 int component_mask = 1 << i;
540 int write_test = write_mask & component_mask;
542 swizzle[num_channels++] = i;
544 /* If we don't have to write this channel it means we have a gap in the
545 * vector, so write the channels we accumulated until now, if any. Do
546 * the same if this was the last component in the vector.
548 if (!write_test || i == num_components - 1) {
549 if (num_channels > 0) {
550 /* We have channels to write, so update the offset we need to
551 * write at to skip the channels we skipped, if any.
553 if (skipped_channels > 0) {
554 if (offset_reg.file == IMM) {
555 offset_reg.ud += 4 * skipped_channels;
557 emit(ADD(dst_reg(offset_reg), offset_reg,
558 brw_imm_ud(4 * skipped_channels)));
562 /* Swizzle the data register so we take the data from the channels
563 * we need to write and send the write message. This will write
564 * num_channels consecutive dwords starting at offset.
567 BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
568 emit_untyped_write(bld, surf_index, offset_reg, val_reg,
569 1 /* dims */, num_channels /* size */,
572 /* If we have to do a second write we will have to update the
573 * offset so that we jump over the channels we have just written
576 skipped_channels = num_channels;
578 /* Restart the count for the next write message */
582 /* We did not write the current channel, so increase skipped count */
590 case nir_intrinsic_load_ssbo: {
591 assert(devinfo->gen >= 7);
593 nir_const_value *const_uniform_block =
594 nir_src_as_const_value(instr->src[0]);
597 if (const_uniform_block) {
598 unsigned index = prog_data->base.binding_table.ssbo_start +
599 const_uniform_block->u[0];
600 surf_index = brw_imm_ud(index);
602 brw_mark_surface_used(&prog_data->base, index);
604 surf_index = src_reg(this, glsl_type::uint_type);
605 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[0], 1),
606 brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
607 surf_index = emit_uniformize(surf_index);
609 /* Assume this may touch any UBO. It would be nice to provide
610 * a tighter bound, but the array information is already lowered away.
612 brw_mark_surface_used(&prog_data->base,
613 prog_data->base.binding_table.ssbo_start +
614 nir->info.num_ssbos - 1);
618 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
620 offset_reg = brw_imm_ud(const_offset->u[0]);
622 offset_reg = get_nir_src(instr->src[1], 1);
625 /* Read the vector */
626 const vec4_builder bld = vec4_builder(this).at_end()
627 .annotate(current_annotation, base_ir);
629 src_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
630 1 /* dims */, 4 /* size*/,
632 dst_reg dest = get_nir_dest(instr->dest);
633 read_result.type = dest.type;
634 read_result.swizzle = brw_swizzle_for_size(instr->num_components);
635 emit(MOV(dest, read_result));
640 case nir_intrinsic_ssbo_atomic_add:
641 nir_emit_ssbo_atomic(BRW_AOP_ADD, instr);
643 case nir_intrinsic_ssbo_atomic_imin:
644 nir_emit_ssbo_atomic(BRW_AOP_IMIN, instr);
646 case nir_intrinsic_ssbo_atomic_umin:
647 nir_emit_ssbo_atomic(BRW_AOP_UMIN, instr);
649 case nir_intrinsic_ssbo_atomic_imax:
650 nir_emit_ssbo_atomic(BRW_AOP_IMAX, instr);
652 case nir_intrinsic_ssbo_atomic_umax:
653 nir_emit_ssbo_atomic(BRW_AOP_UMAX, instr);
655 case nir_intrinsic_ssbo_atomic_and:
656 nir_emit_ssbo_atomic(BRW_AOP_AND, instr);
658 case nir_intrinsic_ssbo_atomic_or:
659 nir_emit_ssbo_atomic(BRW_AOP_OR, instr);
661 case nir_intrinsic_ssbo_atomic_xor:
662 nir_emit_ssbo_atomic(BRW_AOP_XOR, instr);
664 case nir_intrinsic_ssbo_atomic_exchange:
665 nir_emit_ssbo_atomic(BRW_AOP_MOV, instr);
667 case nir_intrinsic_ssbo_atomic_comp_swap:
668 nir_emit_ssbo_atomic(BRW_AOP_CMPWR, instr);
671 case nir_intrinsic_load_vertex_id:
672 unreachable("should be lowered by lower_vertex_id()");
674 case nir_intrinsic_load_vertex_id_zero_base:
675 case nir_intrinsic_load_base_vertex:
676 case nir_intrinsic_load_instance_id:
677 case nir_intrinsic_load_base_instance:
678 case nir_intrinsic_load_draw_id:
679 case nir_intrinsic_load_invocation_id:
680 case nir_intrinsic_load_tess_level_inner:
681 case nir_intrinsic_load_tess_level_outer: {
682 gl_system_value sv = nir_system_value_from_intrinsic(instr->intrinsic);
683 src_reg val = src_reg(nir_system_values[sv]);
684 assert(val.file != BAD_FILE);
685 dest = get_nir_dest(instr->dest, val.type);
686 emit(MOV(dest, val));
690 case nir_intrinsic_load_uniform: {
691 /* Offsets are in bytes but they should always be multiples of 16 */
692 assert(instr->const_index[0] % 16 == 0);
694 dest = get_nir_dest(instr->dest);
696 src = src_reg(dst_reg(UNIFORM, instr->const_index[0] / 16));
697 src.type = dest.type;
699 nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
701 /* Offsets are in bytes but they should always be multiples of 16 */
702 assert(const_offset->u[0] % 16 == 0);
703 src.reg_offset = const_offset->u[0] / 16;
705 emit(MOV(dest, src));
707 src_reg indirect = get_nir_src(instr->src[0], BRW_REGISTER_TYPE_UD, 1);
709 emit(SHADER_OPCODE_MOV_INDIRECT, dest, src,
710 indirect, brw_imm_ud(instr->const_index[1]));
715 case nir_intrinsic_atomic_counter_read:
716 case nir_intrinsic_atomic_counter_inc:
717 case nir_intrinsic_atomic_counter_dec: {
718 unsigned surf_index = prog_data->base.binding_table.abo_start +
719 (unsigned) instr->const_index[0];
720 src_reg offset = get_nir_src(instr->src[0], nir_type_int,
721 instr->num_components);
722 dest = get_nir_dest(instr->dest);
724 switch (instr->intrinsic) {
725 case nir_intrinsic_atomic_counter_inc:
726 emit_untyped_atomic(BRW_AOP_INC, surf_index, dest, offset,
727 src_reg(), src_reg());
729 case nir_intrinsic_atomic_counter_dec:
730 emit_untyped_atomic(BRW_AOP_PREDEC, surf_index, dest, offset,
731 src_reg(), src_reg());
733 case nir_intrinsic_atomic_counter_read:
734 emit_untyped_surface_read(surf_index, dest, offset);
737 unreachable("Unreachable");
740 brw_mark_surface_used(stage_prog_data, surf_index);
744 case nir_intrinsic_load_ubo: {
745 nir_const_value *const_block_index = nir_src_as_const_value(instr->src[0]);
748 dest = get_nir_dest(instr->dest);
750 if (const_block_index) {
751 /* The block index is a constant, so just emit the binding table entry
754 const unsigned index = prog_data->base.binding_table.ubo_start +
755 const_block_index->u[0];
756 surf_index = brw_imm_ud(index);
757 brw_mark_surface_used(&prog_data->base, index);
759 /* The block index is not a constant. Evaluate the index expression
760 * per-channel and add the base UBO index; we have to select a value
761 * from any live channel.
763 surf_index = src_reg(this, glsl_type::uint_type);
764 emit(ADD(dst_reg(surf_index), get_nir_src(instr->src[0], nir_type_int,
765 instr->num_components),
766 brw_imm_ud(prog_data->base.binding_table.ubo_start)));
767 surf_index = emit_uniformize(surf_index);
769 /* Assume this may touch any UBO. It would be nice to provide
770 * a tighter bound, but the array information is already lowered away.
772 brw_mark_surface_used(&prog_data->base,
773 prog_data->base.binding_table.ubo_start +
774 nir->info.num_ubos - 1);
778 nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
780 offset = brw_imm_ud(const_offset->u[0] & ~15);
782 offset = get_nir_src(instr->src[1], nir_type_int, 1);
785 src_reg packed_consts = src_reg(this, glsl_type::vec4_type);
786 packed_consts.type = dest.type;
788 emit_pull_constant_load_reg(dst_reg(packed_consts),
791 NULL, NULL /* before_block/inst */);
793 packed_consts.swizzle = brw_swizzle_for_size(instr->num_components);
795 packed_consts.swizzle += BRW_SWIZZLE4(const_offset->u[0] % 16 / 4,
796 const_offset->u[0] % 16 / 4,
797 const_offset->u[0] % 16 / 4,
798 const_offset->u[0] % 16 / 4);
801 emit(MOV(dest, packed_consts));
805 case nir_intrinsic_memory_barrier: {
806 const vec4_builder bld =
807 vec4_builder(this).at_end().annotate(current_annotation, base_ir);
808 const dst_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
809 bld.emit(SHADER_OPCODE_MEMORY_FENCE, tmp)
814 case nir_intrinsic_shader_clock: {
815 /* We cannot do anything if there is an event, so ignore it for now */
816 const src_reg shader_clock = get_timestamp();
817 const enum brw_reg_type type = brw_type_for_base_type(glsl_type::uvec2_type);
819 dest = get_nir_dest(instr->dest, type);
820 emit(MOV(dest, shader_clock));
825 unreachable("Unknown intrinsic");
830 vec4_visitor::nir_emit_ssbo_atomic(int op, nir_intrinsic_instr *instr)
833 if (nir_intrinsic_infos[instr->intrinsic].has_dest)
834 dest = get_nir_dest(instr->dest);
837 nir_const_value *const_surface = nir_src_as_const_value(instr->src[0]);
839 unsigned surf_index = prog_data->base.binding_table.ssbo_start +
841 surface = brw_imm_ud(surf_index);
842 brw_mark_surface_used(&prog_data->base, surf_index);
844 surface = src_reg(this, glsl_type::uint_type);
845 emit(ADD(dst_reg(surface), get_nir_src(instr->src[0]),
846 brw_imm_ud(prog_data->base.binding_table.ssbo_start)));
848 /* Assume this may touch any UBO. This is the same we do for other
849 * UBO/SSBO accesses with non-constant surface.
851 brw_mark_surface_used(&prog_data->base,
852 prog_data->base.binding_table.ssbo_start +
853 nir->info.num_ssbos - 1);
856 src_reg offset = get_nir_src(instr->src[1], 1);
857 src_reg data1 = get_nir_src(instr->src[2], 1);
859 if (op == BRW_AOP_CMPWR)
860 data2 = get_nir_src(instr->src[3], 1);
862 /* Emit the actual atomic operation operation */
863 const vec4_builder bld =
864 vec4_builder(this).at_end().annotate(current_annotation, base_ir);
866 src_reg atomic_result =
867 surface_access::emit_untyped_atomic(bld, surface, offset,
869 1 /* dims */, 1 /* rsize */,
872 dest.type = atomic_result.type;
873 bld.MOV(dest, atomic_result);
877 brw_swizzle_for_nir_swizzle(uint8_t swizzle[4])
879 return BRW_SWIZZLE4(swizzle[0], swizzle[1], swizzle[2], swizzle[3]);
882 static enum brw_conditional_mod
883 brw_conditional_for_nir_comparison(nir_op op)
889 return BRW_CONDITIONAL_L;
894 return BRW_CONDITIONAL_GE;
898 case nir_op_ball_fequal2:
899 case nir_op_ball_iequal2:
900 case nir_op_ball_fequal3:
901 case nir_op_ball_iequal3:
902 case nir_op_ball_fequal4:
903 case nir_op_ball_iequal4:
904 return BRW_CONDITIONAL_Z;
908 case nir_op_bany_fnequal2:
909 case nir_op_bany_inequal2:
910 case nir_op_bany_fnequal3:
911 case nir_op_bany_inequal3:
912 case nir_op_bany_fnequal4:
913 case nir_op_bany_inequal4:
914 return BRW_CONDITIONAL_NZ;
917 unreachable("not reached: bad operation for comparison");
922 vec4_visitor::optimize_predicate(nir_alu_instr *instr,
923 enum brw_predicate *predicate)
925 if (!instr->src[0].src.is_ssa ||
926 instr->src[0].src.ssa->parent_instr->type != nir_instr_type_alu)
929 nir_alu_instr *cmp_instr =
930 nir_instr_as_alu(instr->src[0].src.ssa->parent_instr);
932 switch (cmp_instr->op) {
933 case nir_op_bany_fnequal2:
934 case nir_op_bany_inequal2:
935 case nir_op_bany_fnequal3:
936 case nir_op_bany_inequal3:
937 case nir_op_bany_fnequal4:
938 case nir_op_bany_inequal4:
939 *predicate = BRW_PREDICATE_ALIGN16_ANY4H;
941 case nir_op_ball_fequal2:
942 case nir_op_ball_iequal2:
943 case nir_op_ball_fequal3:
944 case nir_op_ball_iequal3:
945 case nir_op_ball_fequal4:
946 case nir_op_ball_iequal4:
947 *predicate = BRW_PREDICATE_ALIGN16_ALL4H;
953 unsigned size_swizzle =
954 brw_swizzle_for_size(nir_op_infos[cmp_instr->op].input_sizes[0]);
957 assert(nir_op_infos[cmp_instr->op].num_inputs == 2);
958 for (unsigned i = 0; i < 2; i++) {
959 op[i] = get_nir_src(cmp_instr->src[i].src,
960 nir_op_infos[cmp_instr->op].input_types[i], 4);
961 unsigned base_swizzle =
962 brw_swizzle_for_nir_swizzle(cmp_instr->src[i].swizzle);
963 op[i].swizzle = brw_compose_swizzle(size_swizzle, base_swizzle);
964 op[i].abs = cmp_instr->src[i].abs;
965 op[i].negate = cmp_instr->src[i].negate;
968 emit(CMP(dst_null_d(), op[0], op[1],
969 brw_conditional_for_nir_comparison(cmp_instr->op)));
975 vec4_visitor::nir_emit_alu(nir_alu_instr *instr)
977 vec4_instruction *inst;
979 dst_reg dst = get_nir_dest(instr->dest.dest,
980 nir_op_infos[instr->op].output_type);
981 dst.writemask = instr->dest.write_mask;
984 for (unsigned i = 0; i < nir_op_infos[instr->op].num_inputs; i++) {
985 op[i] = get_nir_src(instr->src[i].src,
986 nir_op_infos[instr->op].input_types[i], 4);
987 op[i].swizzle = brw_swizzle_for_nir_swizzle(instr->src[i].swizzle);
988 op[i].abs = instr->src[i].abs;
989 op[i].negate = instr->src[i].negate;
995 inst = emit(MOV(dst, op[0]));
996 inst->saturate = instr->dest.saturate;
1002 unreachable("not reached: should be handled by lower_vec_to_movs()");
1006 inst = emit(MOV(dst, op[0]));
1007 inst->saturate = instr->dest.saturate;
1012 inst = emit(MOV(dst, op[0]));
1018 inst = emit(ADD(dst, op[0], op[1]));
1019 inst->saturate = instr->dest.saturate;
1023 inst = emit(MUL(dst, op[0], op[1]));
1024 inst->saturate = instr->dest.saturate;
1028 if (devinfo->gen < 8) {
1029 nir_const_value *value0 = nir_src_as_const_value(instr->src[0].src);
1030 nir_const_value *value1 = nir_src_as_const_value(instr->src[1].src);
1032 /* For integer multiplication, the MUL uses the low 16 bits of one of
1033 * the operands (src0 through SNB, src1 on IVB and later). The MACH
1034 * accumulates in the contribution of the upper 16 bits of that
1035 * operand. If we can determine that one of the args is in the low
1036 * 16 bits, though, we can just emit a single MUL.
1038 if (value0 && value0->u[0] < (1 << 16)) {
1039 if (devinfo->gen < 7)
1040 emit(MUL(dst, op[0], op[1]));
1042 emit(MUL(dst, op[1], op[0]));
1043 } else if (value1 && value1->u[0] < (1 << 16)) {
1044 if (devinfo->gen < 7)
1045 emit(MUL(dst, op[1], op[0]));
1047 emit(MUL(dst, op[0], op[1]));
1049 struct brw_reg acc = retype(brw_acc_reg(8), dst.type);
1051 emit(MUL(acc, op[0], op[1]));
1052 emit(MACH(dst_null_d(), op[0], op[1]));
1053 emit(MOV(dst, src_reg(acc)));
1056 emit(MUL(dst, op[0], op[1]));
1061 case nir_op_imul_high:
1062 case nir_op_umul_high: {
1063 struct brw_reg acc = retype(brw_acc_reg(8), dst.type);
1065 if (devinfo->gen >= 8)
1066 emit(MUL(acc, op[0], retype(op[1], BRW_REGISTER_TYPE_UW)));
1068 emit(MUL(acc, op[0], op[1]));
1070 emit(MACH(dst, op[0], op[1]));
1075 inst = emit_math(SHADER_OPCODE_RCP, dst, op[0]);
1076 inst->saturate = instr->dest.saturate;
1080 inst = emit_math(SHADER_OPCODE_EXP2, dst, op[0]);
1081 inst->saturate = instr->dest.saturate;
1085 inst = emit_math(SHADER_OPCODE_LOG2, dst, op[0]);
1086 inst->saturate = instr->dest.saturate;
1090 src_reg tmp = src_reg(this, glsl_type::vec4_type);
1091 inst = emit_math(SHADER_OPCODE_SIN, dst_reg(tmp), op[0]);
1092 if (instr->dest.saturate) {
1094 inst->saturate = true;
1096 emit(MUL(dst, tmp, brw_imm_f(0.99997)));
1102 src_reg tmp = src_reg(this, glsl_type::vec4_type);
1103 inst = emit_math(SHADER_OPCODE_COS, dst_reg(tmp), op[0]);
1104 if (instr->dest.saturate) {
1106 inst->saturate = true;
1108 emit(MUL(dst, tmp, brw_imm_f(0.99997)));
1115 emit_math(SHADER_OPCODE_INT_QUOTIENT, dst, op[0], op[1]);
1120 /* According to the sign table for INT DIV in the Ivy Bridge PRM, it
1121 * appears that our hardware just does the right thing for signed
1124 emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
1128 /* Get a regular C-style remainder. If a % b == 0, set the predicate. */
1129 inst = emit_math(SHADER_OPCODE_INT_REMAINDER, dst, op[0], op[1]);
1131 /* Math instructions don't support conditional mod */
1132 inst = emit(MOV(dst_null_d(), src_reg(dst)));
1133 inst->conditional_mod = BRW_CONDITIONAL_NZ;
1135 /* Now, we need to determine if signs of the sources are different.
1136 * When we XOR the sources, the top bit is 0 if they are the same and 1
1137 * if they are different. We can then use a conditional modifier to
1138 * turn that into a predicate. This leads us to an XOR.l instruction.
1140 src_reg tmp = src_reg(this, glsl_type::ivec4_type);
1141 inst = emit(XOR(dst_reg(tmp), op[0], op[1]));
1142 inst->predicate = BRW_PREDICATE_NORMAL;
1143 inst->conditional_mod = BRW_CONDITIONAL_L;
1145 /* If the result of the initial remainder operation is non-zero and the
1146 * two sources have different signs, add in a copy of op[1] to get the
1147 * final integer modulus value.
1149 inst = emit(ADD(dst, src_reg(dst), op[1]));
1150 inst->predicate = BRW_PREDICATE_NORMAL;
1155 unreachable("not reached: should be handled by ldexp_to_arith()");
1158 inst = emit_math(SHADER_OPCODE_SQRT, dst, op[0]);
1159 inst->saturate = instr->dest.saturate;
1163 inst = emit_math(SHADER_OPCODE_RSQ, dst, op[0]);
1164 inst->saturate = instr->dest.saturate;
1168 inst = emit_math(SHADER_OPCODE_POW, dst, op[0], op[1]);
1169 inst->saturate = instr->dest.saturate;
1172 case nir_op_uadd_carry: {
1173 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1175 emit(ADDC(dst_null_ud(), op[0], op[1]));
1176 emit(MOV(dst, src_reg(acc)));
1180 case nir_op_usub_borrow: {
1181 struct brw_reg acc = retype(brw_acc_reg(8), BRW_REGISTER_TYPE_UD);
1183 emit(SUBB(dst_null_ud(), op[0], op[1]));
1184 emit(MOV(dst, src_reg(acc)));
1189 inst = emit(RNDZ(dst, op[0]));
1190 inst->saturate = instr->dest.saturate;
1193 case nir_op_fceil: {
1194 src_reg tmp = src_reg(this, glsl_type::float_type);
1196 brw_swizzle_for_size(instr->src[0].src.is_ssa ?
1197 instr->src[0].src.ssa->num_components :
1198 instr->src[0].src.reg.reg->num_components);
1200 op[0].negate = !op[0].negate;
1201 emit(RNDD(dst_reg(tmp), op[0]));
1203 inst = emit(MOV(dst, tmp));
1204 inst->saturate = instr->dest.saturate;
1209 inst = emit(RNDD(dst, op[0]));
1210 inst->saturate = instr->dest.saturate;
1214 inst = emit(FRC(dst, op[0]));
1215 inst->saturate = instr->dest.saturate;
1218 case nir_op_fround_even:
1219 inst = emit(RNDE(dst, op[0]));
1220 inst->saturate = instr->dest.saturate;
1223 case nir_op_fquantize2f16: {
1224 /* See also vec4_visitor::emit_pack_half_2x16() */
1225 src_reg tmp16 = src_reg(this, glsl_type::uvec4_type);
1226 src_reg tmp32 = src_reg(this, glsl_type::vec4_type);
1227 src_reg zero = src_reg(this, glsl_type::vec4_type);
1229 /* Check for denormal */
1230 src_reg abs_src0 = op[0];
1231 abs_src0.abs = true;
1232 emit(CMP(dst_null_f(), abs_src0, brw_imm_f(ldexpf(1.0, -14)),
1233 BRW_CONDITIONAL_L));
1234 /* Get the appropriately signed zero */
1235 emit(AND(retype(dst_reg(zero), BRW_REGISTER_TYPE_UD),
1236 retype(op[0], BRW_REGISTER_TYPE_UD),
1237 brw_imm_ud(0x80000000)));
1238 /* Do the actual F32 -> F16 -> F32 conversion */
1239 emit(F32TO16(dst_reg(tmp16), op[0]));
1240 emit(F16TO32(dst_reg(tmp32), tmp16));
1241 /* Select that or zero based on normal status */
1242 inst = emit(BRW_OPCODE_SEL, dst, zero, tmp32);
1243 inst->predicate = BRW_PREDICATE_NORMAL;
1244 inst->predicate_inverse = true;
1245 inst->saturate = instr->dest.saturate;
1252 inst = emit_minmax(BRW_CONDITIONAL_L, dst, op[0], op[1]);
1253 inst->saturate = instr->dest.saturate;
1259 inst = emit_minmax(BRW_CONDITIONAL_GE, dst, op[0], op[1]);
1260 inst->saturate = instr->dest.saturate;
1264 case nir_op_fddx_coarse:
1265 case nir_op_fddx_fine:
1267 case nir_op_fddy_coarse:
1268 case nir_op_fddy_fine:
1269 unreachable("derivatives are not valid in vertex shaders");
1281 emit(CMP(dst, op[0], op[1],
1282 brw_conditional_for_nir_comparison(instr->op)));
1285 case nir_op_ball_fequal2:
1286 case nir_op_ball_iequal2:
1287 case nir_op_ball_fequal3:
1288 case nir_op_ball_iequal3:
1289 case nir_op_ball_fequal4:
1290 case nir_op_ball_iequal4: {
1292 brw_swizzle_for_size(nir_op_infos[instr->op].input_sizes[0]);
1294 emit(CMP(dst_null_d(), swizzle(op[0], swiz), swizzle(op[1], swiz),
1295 brw_conditional_for_nir_comparison(instr->op)));
1296 emit(MOV(dst, brw_imm_d(0)));
1297 inst = emit(MOV(dst, brw_imm_d(~0)));
1298 inst->predicate = BRW_PREDICATE_ALIGN16_ALL4H;
1302 case nir_op_bany_fnequal2:
1303 case nir_op_bany_inequal2:
1304 case nir_op_bany_fnequal3:
1305 case nir_op_bany_inequal3:
1306 case nir_op_bany_fnequal4:
1307 case nir_op_bany_inequal4: {
1309 brw_swizzle_for_size(nir_op_infos[instr->op].input_sizes[0]);
1311 emit(CMP(dst_null_d(), swizzle(op[0], swiz), swizzle(op[1], swiz),
1312 brw_conditional_for_nir_comparison(instr->op)));
1314 emit(MOV(dst, brw_imm_d(0)));
1315 inst = emit(MOV(dst, brw_imm_d(~0)));
1316 inst->predicate = BRW_PREDICATE_ALIGN16_ANY4H;
1321 if (devinfo->gen >= 8) {
1322 op[0] = resolve_source_modifiers(op[0]);
1324 emit(NOT(dst, op[0]));
1328 if (devinfo->gen >= 8) {
1329 op[0] = resolve_source_modifiers(op[0]);
1330 op[1] = resolve_source_modifiers(op[1]);
1332 emit(XOR(dst, op[0], op[1]));
1336 if (devinfo->gen >= 8) {
1337 op[0] = resolve_source_modifiers(op[0]);
1338 op[1] = resolve_source_modifiers(op[1]);
1340 emit(OR(dst, op[0], op[1]));
1344 if (devinfo->gen >= 8) {
1345 op[0] = resolve_source_modifiers(op[0]);
1346 op[1] = resolve_source_modifiers(op[1]);
1348 emit(AND(dst, op[0], op[1]));
1353 emit(MOV(dst, negate(op[0])));
1357 emit(CMP(dst, op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ));
1361 emit(CMP(dst, op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ));
1364 case nir_op_fnoise1_1:
1365 case nir_op_fnoise1_2:
1366 case nir_op_fnoise1_3:
1367 case nir_op_fnoise1_4:
1368 case nir_op_fnoise2_1:
1369 case nir_op_fnoise2_2:
1370 case nir_op_fnoise2_3:
1371 case nir_op_fnoise2_4:
1372 case nir_op_fnoise3_1:
1373 case nir_op_fnoise3_2:
1374 case nir_op_fnoise3_3:
1375 case nir_op_fnoise3_4:
1376 case nir_op_fnoise4_1:
1377 case nir_op_fnoise4_2:
1378 case nir_op_fnoise4_3:
1379 case nir_op_fnoise4_4:
1380 unreachable("not reached: should be handled by lower_noise");
1382 case nir_op_unpack_half_2x16_split_x:
1383 case nir_op_unpack_half_2x16_split_y:
1384 case nir_op_pack_half_2x16_split:
1385 unreachable("not reached: should not occur in vertex shader");
1387 case nir_op_unpack_snorm_2x16:
1388 case nir_op_unpack_unorm_2x16:
1389 case nir_op_pack_snorm_2x16:
1390 case nir_op_pack_unorm_2x16:
1391 unreachable("not reached: should be handled by lower_packing_builtins");
1393 case nir_op_pack_uvec4_to_uint:
1394 unreachable("not reached");
1396 case nir_op_pack_uvec2_to_uint: {
1397 dst_reg tmp1 = dst_reg(this, glsl_type::uint_type);
1398 tmp1.writemask = WRITEMASK_X;
1399 op[0].swizzle = BRW_SWIZZLE_YYYY;
1400 emit(SHL(tmp1, op[0], src_reg(brw_imm_ud(16u))));
1402 dst_reg tmp2 = dst_reg(this, glsl_type::uint_type);
1403 tmp2.writemask = WRITEMASK_X;
1404 op[0].swizzle = BRW_SWIZZLE_XXXX;
1405 emit(AND(tmp2, op[0], src_reg(brw_imm_ud(0xffffu))));
1407 emit(OR(dst, src_reg(tmp1), src_reg(tmp2)));
1411 case nir_op_unpack_half_2x16:
1412 /* As NIR does not guarantee that we have a correct swizzle outside the
1413 * boundaries of a vector, and the implementation of emit_unpack_half_2x16
1414 * uses the source operand in an operation with WRITEMASK_Y while our
1415 * source operand has only size 1, it accessed incorrect data producing
1416 * regressions in Piglit. We repeat the swizzle of the first component on the
1417 * rest of components to avoid regressions. In the vec4_visitor IR code path
1418 * this is not needed because the operand has already the correct swizzle.
1420 op[0].swizzle = brw_compose_swizzle(BRW_SWIZZLE_XXXX, op[0].swizzle);
1421 emit_unpack_half_2x16(dst, op[0]);
1424 case nir_op_pack_half_2x16:
1425 emit_pack_half_2x16(dst, op[0]);
1428 case nir_op_unpack_unorm_4x8:
1429 emit_unpack_unorm_4x8(dst, op[0]);
1432 case nir_op_pack_unorm_4x8:
1433 emit_pack_unorm_4x8(dst, op[0]);
1436 case nir_op_unpack_snorm_4x8:
1437 emit_unpack_snorm_4x8(dst, op[0]);
1440 case nir_op_pack_snorm_4x8:
1441 emit_pack_snorm_4x8(dst, op[0]);
1444 case nir_op_bitfield_reverse:
1445 emit(BFREV(dst, op[0]));
1448 case nir_op_bit_count:
1449 emit(CBIT(dst, op[0]));
1452 case nir_op_ufind_msb:
1453 case nir_op_ifind_msb: {
1454 emit(FBH(retype(dst, BRW_REGISTER_TYPE_UD), op[0]));
1456 /* FBH counts from the MSB side, while GLSL's findMSB() wants the count
1457 * from the LSB side. If FBH didn't return an error (0xFFFFFFFF), then
1458 * subtract the result from 31 to convert the MSB count into an LSB count.
1461 emit(CMP(dst_null_d(), src, brw_imm_d(-1), BRW_CONDITIONAL_NZ));
1463 inst = emit(ADD(dst, src, brw_imm_d(31)));
1464 inst->predicate = BRW_PREDICATE_NORMAL;
1465 inst->src[0].negate = true;
1469 case nir_op_find_lsb:
1470 emit(FBL(dst, op[0]));
1473 case nir_op_ubitfield_extract:
1474 case nir_op_ibitfield_extract:
1475 unreachable("should have been lowered");
1478 op[0] = fix_3src_operand(op[0]);
1479 op[1] = fix_3src_operand(op[1]);
1480 op[2] = fix_3src_operand(op[2]);
1482 emit(BFE(dst, op[2], op[1], op[0]));
1486 emit(BFI1(dst, op[0], op[1]));
1490 op[0] = fix_3src_operand(op[0]);
1491 op[1] = fix_3src_operand(op[1]);
1492 op[2] = fix_3src_operand(op[2]);
1494 emit(BFI2(dst, op[0], op[1], op[2]));
1497 case nir_op_bitfield_insert:
1498 unreachable("not reached: should have been lowered");
1501 /* AND(val, 0x80000000) gives the sign bit.
1503 * Predicated OR ORs 1.0 (0x3f800000) with the sign bit if val is not
1506 emit(CMP(dst_null_f(), op[0], brw_imm_f(0.0f), BRW_CONDITIONAL_NZ));
1508 op[0].type = BRW_REGISTER_TYPE_UD;
1509 dst.type = BRW_REGISTER_TYPE_UD;
1510 emit(AND(dst, op[0], brw_imm_ud(0x80000000u)));
1512 inst = emit(OR(dst, src_reg(dst), brw_imm_ud(0x3f800000u)));
1513 inst->predicate = BRW_PREDICATE_NORMAL;
1514 dst.type = BRW_REGISTER_TYPE_F;
1516 if (instr->dest.saturate) {
1517 inst = emit(MOV(dst, src_reg(dst)));
1518 inst->saturate = true;
1523 /* ASR(val, 31) -> negative val generates 0xffffffff (signed -1).
1524 * -> non-negative val generates 0x00000000.
1525 * Predicated OR sets 1 if val is positive.
1527 emit(CMP(dst_null_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_G));
1528 emit(ASR(dst, op[0], brw_imm_d(31)));
1529 inst = emit(OR(dst, src_reg(dst), brw_imm_d(1)));
1530 inst->predicate = BRW_PREDICATE_NORMAL;
1534 emit(SHL(dst, op[0], op[1]));
1538 emit(ASR(dst, op[0], op[1]));
1542 emit(SHR(dst, op[0], op[1]));
1546 op[0] = fix_3src_operand(op[0]);
1547 op[1] = fix_3src_operand(op[1]);
1548 op[2] = fix_3src_operand(op[2]);
1550 inst = emit(MAD(dst, op[2], op[1], op[0]));
1551 inst->saturate = instr->dest.saturate;
1555 inst = emit_lrp(dst, op[0], op[1], op[2]);
1556 inst->saturate = instr->dest.saturate;
1560 enum brw_predicate predicate;
1561 if (!optimize_predicate(instr, &predicate)) {
1562 emit(CMP(dst_null_d(), op[0], brw_imm_d(0), BRW_CONDITIONAL_NZ));
1563 switch (dst.writemask) {
1565 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_X;
1568 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1571 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1574 predicate = BRW_PREDICATE_ALIGN16_REPLICATE_W;
1577 predicate = BRW_PREDICATE_NORMAL;
1581 inst = emit(BRW_OPCODE_SEL, dst, op[1], op[2]);
1582 inst->predicate = predicate;
1585 case nir_op_fdot_replicated2:
1586 inst = emit(BRW_OPCODE_DP2, dst, op[0], op[1]);
1587 inst->saturate = instr->dest.saturate;
1590 case nir_op_fdot_replicated3:
1591 inst = emit(BRW_OPCODE_DP3, dst, op[0], op[1]);
1592 inst->saturate = instr->dest.saturate;
1595 case nir_op_fdot_replicated4:
1596 inst = emit(BRW_OPCODE_DP4, dst, op[0], op[1]);
1597 inst->saturate = instr->dest.saturate;
1600 case nir_op_fdph_replicated:
1601 inst = emit(BRW_OPCODE_DPH, dst, op[0], op[1]);
1602 inst->saturate = instr->dest.saturate;
1610 unreachable("not reached: should be lowered by lower_source mods");
1613 unreachable("not reached: should be lowered by DIV_TO_MUL_RCP in the compiler");
1616 unreachable("not reached: should be lowered by MOD_TO_FLOOR in the compiler");
1620 unreachable("not reached: should be handled by ir_sub_to_add_neg");
1623 unreachable("Unimplemented ALU operation");
1626 /* If we need to do a boolean resolve, replace the result with -(x & 1)
1627 * to sign extend the low bit to 0/~0
1629 if (devinfo->gen <= 5 &&
1630 (instr->instr.pass_flags & BRW_NIR_BOOLEAN_MASK) ==
1631 BRW_NIR_BOOLEAN_NEEDS_RESOLVE) {
1632 dst_reg masked = dst_reg(this, glsl_type::int_type);
1633 masked.writemask = dst.writemask;
1634 emit(AND(masked, src_reg(dst), brw_imm_d(1)));
1635 src_reg masked_neg = src_reg(masked);
1636 masked_neg.negate = true;
1637 emit(MOV(retype(dst, BRW_REGISTER_TYPE_D), masked_neg));
1642 vec4_visitor::nir_emit_jump(nir_jump_instr *instr)
1644 switch (instr->type) {
1645 case nir_jump_break:
1646 emit(BRW_OPCODE_BREAK);
1649 case nir_jump_continue:
1650 emit(BRW_OPCODE_CONTINUE);
1653 case nir_jump_return:
1655 unreachable("unknown jump");
1659 enum ir_texture_opcode
1660 ir_texture_opcode_for_nir_texop(nir_texop texop)
1662 enum ir_texture_opcode op;
1665 case nir_texop_lod: op = ir_lod; break;
1666 case nir_texop_query_levels: op = ir_query_levels; break;
1667 case nir_texop_texture_samples: op = ir_texture_samples; break;
1668 case nir_texop_tex: op = ir_tex; break;
1669 case nir_texop_tg4: op = ir_tg4; break;
1670 case nir_texop_txb: op = ir_txb; break;
1671 case nir_texop_txd: op = ir_txd; break;
1672 case nir_texop_txf: op = ir_txf; break;
1673 case nir_texop_txf_ms: op = ir_txf_ms; break;
1674 case nir_texop_txl: op = ir_txl; break;
1675 case nir_texop_txs: op = ir_txs; break;
1676 case nir_texop_samples_identical: op = ir_samples_identical; break;
1678 unreachable("unknown texture opcode");
1684 glsl_type_for_nir_alu_type(nir_alu_type alu_type,
1685 unsigned components)
1688 case nir_type_float:
1689 return glsl_type::vec(components);
1691 return glsl_type::ivec(components);
1693 return glsl_type::uvec(components);
1695 return glsl_type::bvec(components);
1697 return glsl_type::error_type;
1700 return glsl_type::error_type;
1704 vec4_visitor::nir_emit_texture(nir_tex_instr *instr)
1706 unsigned texture = instr->texture_index;
1707 unsigned sampler = instr->sampler_index;
1708 src_reg texture_reg = brw_imm_ud(texture);
1709 src_reg sampler_reg = brw_imm_ud(sampler);
1711 const glsl_type *coord_type = NULL;
1712 src_reg shadow_comparitor;
1713 src_reg offset_value;
1715 src_reg sample_index;
1718 const glsl_type *dest_type =
1719 glsl_type_for_nir_alu_type(instr->dest_type,
1720 nir_tex_instr_dest_size(instr));
1721 dst_reg dest = get_nir_dest(instr->dest, instr->dest_type);
1723 /* Our hardware requires a LOD for buffer textures */
1724 if (instr->sampler_dim == GLSL_SAMPLER_DIM_BUF)
1727 /* Load the texture operation sources */
1728 uint32_t constant_offset = 0;
1729 for (unsigned i = 0; i < instr->num_srcs; i++) {
1730 switch (instr->src[i].src_type) {
1731 case nir_tex_src_comparitor:
1732 shadow_comparitor = get_nir_src(instr->src[i].src,
1733 BRW_REGISTER_TYPE_F, 1);
1736 case nir_tex_src_coord: {
1737 unsigned src_size = nir_tex_instr_src_size(instr, i);
1739 switch (instr->op) {
1741 case nir_texop_txf_ms:
1742 case nir_texop_samples_identical:
1743 coordinate = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D,
1745 coord_type = glsl_type::ivec(src_size);
1749 coordinate = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
1751 coord_type = glsl_type::vec(src_size);
1757 case nir_tex_src_ddx:
1758 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
1759 nir_tex_instr_src_size(instr, i));
1762 case nir_tex_src_ddy:
1763 lod2 = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F,
1764 nir_tex_instr_src_size(instr, i));
1767 case nir_tex_src_lod:
1768 switch (instr->op) {
1771 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 1);
1775 lod = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_F, 1);
1780 case nir_tex_src_ms_index: {
1781 sample_index = get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 1);
1785 case nir_tex_src_offset: {
1786 nir_const_value *const_offset =
1787 nir_src_as_const_value(instr->src[i].src);
1789 constant_offset = brw_texture_offset(const_offset->i, 3);
1792 get_nir_src(instr->src[i].src, BRW_REGISTER_TYPE_D, 2);
1797 case nir_tex_src_texture_offset: {
1798 /* The highest texture which may be used by this operation is
1799 * the last element of the array. Mark it here, because the generator
1800 * doesn't have enough information to determine the bound.
1802 uint32_t array_size = instr->texture_array_size;
1803 uint32_t max_used = texture + array_size - 1;
1804 if (instr->op == nir_texop_tg4) {
1805 max_used += prog_data->base.binding_table.gather_texture_start;
1807 max_used += prog_data->base.binding_table.texture_start;
1810 brw_mark_surface_used(&prog_data->base, max_used);
1812 /* Emit code to evaluate the actual indexing expression */
1813 src_reg src = get_nir_src(instr->src[i].src, 1);
1814 src_reg temp(this, glsl_type::uint_type);
1815 emit(ADD(dst_reg(temp), src, brw_imm_ud(texture)));
1816 texture_reg = emit_uniformize(temp);
1820 case nir_tex_src_sampler_offset: {
1821 /* Emit code to evaluate the actual indexing expression */
1822 src_reg src = get_nir_src(instr->src[i].src, 1);
1823 src_reg temp(this, glsl_type::uint_type);
1824 emit(ADD(dst_reg(temp), src, brw_imm_ud(sampler)));
1825 sampler_reg = emit_uniformize(temp);
1829 case nir_tex_src_projector:
1830 unreachable("Should be lowered by do_lower_texture_projection");
1832 case nir_tex_src_bias:
1833 unreachable("LOD bias is not valid for vertex shaders.\n");
1836 unreachable("unknown texture source");
1840 if (instr->op == nir_texop_txf_ms ||
1841 instr->op == nir_texop_samples_identical) {
1842 assert(coord_type != NULL);
1843 if (devinfo->gen >= 7 &&
1844 key_tex->compressed_multisample_layout_mask & (1 << texture)) {
1845 mcs = emit_mcs_fetch(coord_type, coordinate, texture_reg);
1847 mcs = brw_imm_ud(0u);
1851 /* Stuff the channel select bits in the top of the texture offset */
1852 if (instr->op == nir_texop_tg4) {
1853 if (instr->component == 1 &&
1854 (key_tex->gather_channel_quirk_mask & (1 << texture))) {
1855 /* gather4 sampler is broken for green channel on RG32F --
1856 * we must ask for blue instead.
1858 constant_offset |= 2 << 16;
1860 constant_offset |= instr->component << 16;
1864 ir_texture_opcode op = ir_texture_opcode_for_nir_texop(instr->op);
1866 bool is_cube_array =
1867 instr->op == nir_texop_txs &&
1868 instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE &&
1871 emit_texture(op, dest, dest_type, coordinate, instr->coord_components,
1873 lod, lod2, sample_index,
1874 constant_offset, offset_value,
1876 texture, texture_reg, sampler, sampler_reg);
1880 vec4_visitor::nir_emit_undef(nir_ssa_undef_instr *instr)
1882 nir_ssa_values[instr->def.index] = dst_reg(VGRF, alloc.allocate(1));