2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
31 #include <sys/types.h>
33 #include "util/hash_table.h"
34 #include "main/macros.h"
35 #include "main/shaderobj.h"
36 #include "main/fbobject.h"
37 #include "program/prog_parameter.h"
38 #include "program/prog_print.h"
39 #include "util/register_allocate.h"
40 #include "program/hash_table.h"
41 #include "brw_context.h"
47 #include "brw_dead_control_flow.h"
48 #include "main/uniforms.h"
49 #include "brw_fs_live_variables.h"
50 #include "glsl/glsl_types.h"
51 #include "program/sampler.h"
56 fs_inst::init(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
57 const fs_reg *src, unsigned sources)
59 memset(this, 0, sizeof(*this));
61 this->src = new fs_reg[MAX2(sources, 3)];
62 for (unsigned i = 0; i < sources; i++)
63 this->src[i] = src[i];
65 this->opcode = opcode;
67 this->sources = sources;
68 this->exec_size = exec_size;
70 assert(dst.file != IMM && dst.file != UNIFORM);
72 assert(this->exec_size != 0);
74 this->conditional_mod = BRW_CONDITIONAL_NONE;
76 /* This will be the case for almost all instructions. */
82 this->regs_written = DIV_ROUND_UP(dst.component_size(exec_size),
86 this->regs_written = 0;
90 unreachable("Invalid destination register file");
92 unreachable("Invalid register file");
95 this->writes_accumulator = false;
100 init(BRW_OPCODE_NOP, 8, dst, NULL, 0);
103 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size)
105 init(opcode, exec_size, reg_undef, NULL, 0);
108 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst)
110 init(opcode, exec_size, dst, NULL, 0);
113 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
116 const fs_reg src[1] = { src0 };
117 init(opcode, exec_size, dst, src, 1);
120 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
121 const fs_reg &src0, const fs_reg &src1)
123 const fs_reg src[2] = { src0, src1 };
124 init(opcode, exec_size, dst, src, 2);
127 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_size, const fs_reg &dst,
128 const fs_reg &src0, const fs_reg &src1, const fs_reg &src2)
130 const fs_reg src[3] = { src0, src1, src2 };
131 init(opcode, exec_size, dst, src, 3);
134 fs_inst::fs_inst(enum opcode opcode, uint8_t exec_width, const fs_reg &dst,
135 const fs_reg src[], unsigned sources)
137 init(opcode, exec_width, dst, src, sources);
140 fs_inst::fs_inst(const fs_inst &that)
142 memcpy(this, &that, sizeof(that));
144 this->src = new fs_reg[MAX2(that.sources, 3)];
146 for (unsigned i = 0; i < that.sources; i++)
147 this->src[i] = that.src[i];
156 fs_inst::resize_sources(uint8_t num_sources)
158 if (this->sources != num_sources) {
159 fs_reg *src = new fs_reg[MAX2(num_sources, 3)];
161 for (unsigned i = 0; i < MIN2(this->sources, num_sources); ++i)
162 src[i] = this->src[i];
166 this->sources = num_sources;
171 fs_visitor::VARYING_PULL_CONSTANT_LOAD(const fs_builder &bld,
173 const fs_reg &surf_index,
174 const fs_reg &varying_offset,
175 uint32_t const_offset)
177 /* We have our constant surface use a pitch of 4 bytes, so our index can
178 * be any component of a vector, and then we load 4 contiguous
179 * components starting from that.
181 * We break down the const_offset to a portion added to the variable
182 * offset and a portion done using reg_offset, which means that if you
183 * have GLSL using something like "uniform vec4 a[20]; gl_FragColor =
184 * a[i]", we'll temporarily generate 4 vec4 loads from offset i * 4, and
185 * CSE can later notice that those loads are all the same and eliminate
186 * the redundant ones.
188 fs_reg vec4_offset = vgrf(glsl_type::int_type);
189 bld.ADD(vec4_offset, varying_offset, fs_reg(const_offset & ~3));
192 if (devinfo->gen == 4 && bld.dispatch_width() == 8) {
193 /* Pre-gen5, we can either use a SIMD8 message that requires (header,
194 * u, v, r) as parameters, or we can just use the SIMD16 message
195 * consisting of (header, u). We choose the second, at the cost of a
196 * longer return length.
202 if (devinfo->gen >= 7)
203 op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7;
205 op = FS_OPCODE_VARYING_PULL_CONSTANT_LOAD;
207 int regs_written = 4 * (bld.dispatch_width() / 8) * scale;
208 fs_reg vec4_result = fs_reg(GRF, alloc.allocate(regs_written), dst.type);
209 fs_inst *inst = bld.emit(op, vec4_result, surf_index, vec4_offset);
210 inst->regs_written = regs_written;
212 if (devinfo->gen < 7) {
214 inst->header_size = 1;
215 if (devinfo->gen == 4)
218 inst->mlen = 1 + bld.dispatch_width() / 8;
221 bld.MOV(dst, offset(vec4_result, bld, (const_offset & 3) * scale));
225 * A helper for MOV generation for fixing up broken hardware SEND dependency
229 fs_visitor::DEP_RESOLVE_MOV(const fs_builder &bld, int grf)
231 /* The caller always wants uncompressed to emit the minimal extra
232 * dependencies, and to avoid having to deal with aligning its regs to 2.
234 const fs_builder ubld = bld.annotate("send dependency resolve")
237 ubld.MOV(ubld.null_reg_f(), fs_reg(GRF, grf, BRW_REGISTER_TYPE_F));
241 fs_inst::equals(fs_inst *inst) const
243 return (opcode == inst->opcode &&
244 dst.equals(inst->dst) &&
245 src[0].equals(inst->src[0]) &&
246 src[1].equals(inst->src[1]) &&
247 src[2].equals(inst->src[2]) &&
248 saturate == inst->saturate &&
249 predicate == inst->predicate &&
250 conditional_mod == inst->conditional_mod &&
251 mlen == inst->mlen &&
252 base_mrf == inst->base_mrf &&
253 target == inst->target &&
255 header_size == inst->header_size &&
256 shadow_compare == inst->shadow_compare &&
257 exec_size == inst->exec_size &&
258 offset == inst->offset);
262 fs_inst::overwrites_reg(const fs_reg ®) const
264 return reg.in_range(dst, regs_written);
268 fs_inst::is_send_from_grf() const
271 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD_GEN7:
272 case SHADER_OPCODE_SHADER_TIME_ADD:
273 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
274 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
275 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
276 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
277 case SHADER_OPCODE_UNTYPED_ATOMIC:
278 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
279 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
280 case SHADER_OPCODE_TYPED_ATOMIC:
281 case SHADER_OPCODE_TYPED_SURFACE_READ:
282 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
283 case SHADER_OPCODE_URB_WRITE_SIMD8:
285 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
286 return src[1].file == GRF;
287 case FS_OPCODE_FB_WRITE:
288 return src[0].file == GRF;
291 return src[0].file == GRF;
298 fs_inst::is_copy_payload(const brw::simple_allocator &grf_alloc) const
300 if (this->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
303 fs_reg reg = this->src[0];
304 if (reg.file != GRF || reg.reg_offset != 0 || reg.stride == 0)
307 if (grf_alloc.sizes[reg.reg] != this->regs_written)
310 for (int i = 0; i < this->sources; i++) {
311 reg.type = this->src[i].type;
312 if (!this->src[i].equals(reg))
315 if (i < this->header_size) {
318 reg.reg_offset += this->exec_size / 8;
326 fs_inst::can_do_source_mods(const struct brw_device_info *devinfo)
328 if (devinfo->gen == 6 && is_math())
331 if (is_send_from_grf())
334 if (!backend_instruction::can_do_source_mods())
341 fs_inst::has_side_effects() const
343 return this->eot || backend_instruction::has_side_effects();
349 memset(this, 0, sizeof(*this));
353 /** Generic unset register constructor. */
357 this->file = BAD_FILE;
360 /** Immediate value constructor. */
361 fs_reg::fs_reg(float f)
365 this->type = BRW_REGISTER_TYPE_F;
367 this->fixed_hw_reg.dw1.f = f;
370 /** Immediate value constructor. */
371 fs_reg::fs_reg(int32_t i)
375 this->type = BRW_REGISTER_TYPE_D;
377 this->fixed_hw_reg.dw1.d = i;
380 /** Immediate value constructor. */
381 fs_reg::fs_reg(uint32_t u)
385 this->type = BRW_REGISTER_TYPE_UD;
387 this->fixed_hw_reg.dw1.ud = u;
390 /** Vector float immediate value constructor. */
391 fs_reg::fs_reg(uint8_t vf[4])
395 this->type = BRW_REGISTER_TYPE_VF;
396 memcpy(&this->fixed_hw_reg.dw1.ud, vf, sizeof(unsigned));
399 /** Vector float immediate value constructor. */
400 fs_reg::fs_reg(uint8_t vf0, uint8_t vf1, uint8_t vf2, uint8_t vf3)
404 this->type = BRW_REGISTER_TYPE_VF;
405 this->fixed_hw_reg.dw1.ud = (vf0 << 0) |
411 /** Fixed brw_reg. */
412 fs_reg::fs_reg(struct brw_reg fixed_hw_reg)
416 this->fixed_hw_reg = fixed_hw_reg;
417 this->type = fixed_hw_reg.type;
421 fs_reg::equals(const fs_reg &r) const
423 return (file == r.file &&
425 reg_offset == r.reg_offset &&
426 subreg_offset == r.subreg_offset &&
428 negate == r.negate &&
430 !reladdr && !r.reladdr &&
431 ((file != HW_REG && file != IMM) ||
432 memcmp(&fixed_hw_reg, &r.fixed_hw_reg,
433 sizeof(fixed_hw_reg)) == 0) &&
438 fs_reg::set_smear(unsigned subreg)
440 assert(file != HW_REG && file != IMM);
441 subreg_offset = subreg * type_sz(type);
447 fs_reg::is_contiguous() const
453 fs_reg::component_size(unsigned width) const
455 const unsigned stride = (file != HW_REG ? this->stride :
456 fixed_hw_reg.hstride == 0 ? 0 :
457 1 << (fixed_hw_reg.hstride - 1));
458 return MAX2(width * stride, 1) * type_sz(type);
462 type_size_scalar(const struct glsl_type *type)
464 unsigned int size, i;
466 switch (type->base_type) {
469 case GLSL_TYPE_FLOAT:
471 return type->components();
472 case GLSL_TYPE_ARRAY:
473 return type_size_scalar(type->fields.array) * type->length;
474 case GLSL_TYPE_STRUCT:
476 for (i = 0; i < type->length; i++) {
477 size += type_size_scalar(type->fields.structure[i].type);
480 case GLSL_TYPE_SAMPLER:
481 /* Samplers take up no register space, since they're baked in at
485 case GLSL_TYPE_ATOMIC_UINT:
487 case GLSL_TYPE_SUBROUTINE:
489 case GLSL_TYPE_IMAGE:
490 return BRW_IMAGE_PARAM_SIZE;
492 case GLSL_TYPE_ERROR:
493 case GLSL_TYPE_INTERFACE:
494 case GLSL_TYPE_DOUBLE:
495 case GLSL_TYPE_FUNCTION:
496 unreachable("not reached");
503 * Create a MOV to read the timestamp register.
505 * The caller is responsible for emitting the MOV. The return value is
506 * the destination of the MOV, with extra parameters set.
509 fs_visitor::get_timestamp(const fs_builder &bld)
511 assert(devinfo->gen >= 7);
513 fs_reg ts = fs_reg(retype(brw_vec4_reg(BRW_ARCHITECTURE_REGISTER_FILE,
516 BRW_REGISTER_TYPE_UD));
518 fs_reg dst = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
520 /* We want to read the 3 fields we care about even if it's not enabled in
523 bld.group(4, 0).exec_all().MOV(dst, ts);
525 /* The caller wants the low 32 bits of the timestamp. Since it's running
526 * at the GPU clock rate of ~1.2ghz, it will roll over every ~3 seconds,
527 * which is plenty of time for our purposes. It is identical across the
528 * EUs, but since it's tracking GPU core speed it will increment at a
529 * varying rate as render P-states change.
531 * The caller could also check if render P-states have changed (or anything
532 * else that might disrupt timing) by setting smear to 2 and checking if
533 * that field is != 0.
541 fs_visitor::emit_shader_time_begin()
543 shader_start_time = get_timestamp(bld.annotate("shader time start"));
547 fs_visitor::emit_shader_time_end()
549 /* Insert our code just before the final SEND with EOT. */
550 exec_node *end = this->instructions.get_tail();
551 assert(end && ((fs_inst *) end)->eot);
552 const fs_builder ibld = bld.annotate("shader time end")
553 .exec_all().at(NULL, end);
555 fs_reg shader_end_time = get_timestamp(ibld);
557 /* Check that there weren't any timestamp reset events (assuming these
558 * were the only two timestamp reads that happened).
560 fs_reg reset = shader_end_time;
562 set_condmod(BRW_CONDITIONAL_Z,
563 ibld.AND(ibld.null_reg_ud(), reset, fs_reg(1u)));
564 ibld.IF(BRW_PREDICATE_NORMAL);
566 fs_reg start = shader_start_time;
568 fs_reg diff = fs_reg(GRF, alloc.allocate(1), BRW_REGISTER_TYPE_UD);
571 const fs_builder cbld = ibld.group(1, 0);
572 cbld.group(1, 0).ADD(diff, start, shader_end_time);
574 /* If there were no instructions between the two timestamp gets, the diff
575 * is 2 cycles. Remove that overhead, so I can forget about that when
576 * trying to determine the time taken for single instructions.
578 cbld.ADD(diff, diff, fs_reg(-2u));
579 SHADER_TIME_ADD(cbld, 0, diff);
580 SHADER_TIME_ADD(cbld, 1, fs_reg(1u));
581 ibld.emit(BRW_OPCODE_ELSE);
582 SHADER_TIME_ADD(cbld, 2, fs_reg(1u));
583 ibld.emit(BRW_OPCODE_ENDIF);
587 fs_visitor::SHADER_TIME_ADD(const fs_builder &bld,
588 int shader_time_subindex,
591 int index = shader_time_index * 3 + shader_time_subindex;
592 fs_reg offset = fs_reg(index * SHADER_TIME_STRIDE);
595 if (dispatch_width == 8)
596 payload = vgrf(glsl_type::uvec2_type);
598 payload = vgrf(glsl_type::uint_type);
600 bld.emit(SHADER_OPCODE_SHADER_TIME_ADD, fs_reg(), payload, offset, value);
604 fs_visitor::vfail(const char *format, va_list va)
613 msg = ralloc_vasprintf(mem_ctx, format, va);
614 msg = ralloc_asprintf(mem_ctx, "%s compile failed: %s\n", stage_abbrev, msg);
616 this->fail_msg = msg;
619 fprintf(stderr, "%s", msg);
624 fs_visitor::fail(const char *format, ...)
628 va_start(va, format);
634 * Mark this program as impossible to compile in SIMD16 mode.
636 * During the SIMD8 compile (which happens first), we can detect and flag
637 * things that are unsupported in SIMD16 mode, so the compiler can skip
638 * the SIMD16 compile altogether.
640 * During a SIMD16 compile (if one happens anyway), this just calls fail().
643 fs_visitor::no16(const char *msg)
645 if (dispatch_width == 16) {
648 simd16_unsupported = true;
650 compiler->shader_perf_log(log_data,
651 "SIMD16 shader failed to compile: %s", msg);
656 * Returns true if the instruction has a flag that means it won't
657 * update an entire destination register.
659 * For example, dead code elimination and live variable analysis want to know
660 * when a write to a variable screens off any preceding values that were in
664 fs_inst::is_partial_write() const
666 return ((this->predicate && this->opcode != BRW_OPCODE_SEL) ||
667 (this->exec_size * type_sz(this->dst.type)) < 32 ||
668 !this->dst.is_contiguous());
672 fs_inst::components_read(unsigned i) const
675 case FS_OPCODE_LINTERP:
681 case FS_OPCODE_PIXEL_X:
682 case FS_OPCODE_PIXEL_Y:
686 case FS_OPCODE_FB_WRITE_LOGICAL:
687 assert(src[6].file == IMM);
688 /* First/second FB write color. */
690 return src[6].fixed_hw_reg.dw1.ud;
694 case SHADER_OPCODE_TEX_LOGICAL:
695 case SHADER_OPCODE_TXD_LOGICAL:
696 case SHADER_OPCODE_TXF_LOGICAL:
697 case SHADER_OPCODE_TXL_LOGICAL:
698 case SHADER_OPCODE_TXS_LOGICAL:
699 case FS_OPCODE_TXB_LOGICAL:
700 case SHADER_OPCODE_TXF_CMS_LOGICAL:
701 case SHADER_OPCODE_TXF_UMS_LOGICAL:
702 case SHADER_OPCODE_TXF_MCS_LOGICAL:
703 case SHADER_OPCODE_LOD_LOGICAL:
704 case SHADER_OPCODE_TG4_LOGICAL:
705 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
706 assert(src[8].file == IMM && src[9].file == IMM);
707 /* Texture coordinates. */
709 return src[8].fixed_hw_reg.dw1.ud;
710 /* Texture derivatives. */
711 else if ((i == 2 || i == 3) && opcode == SHADER_OPCODE_TXD_LOGICAL)
712 return src[9].fixed_hw_reg.dw1.ud;
713 /* Texture offset. */
719 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
720 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
721 assert(src[3].file == IMM);
722 /* Surface coordinates. */
724 return src[3].fixed_hw_reg.dw1.ud;
725 /* Surface operation source (ignored for reads). */
731 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
732 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
733 assert(src[3].file == IMM &&
735 /* Surface coordinates. */
737 return src[3].fixed_hw_reg.dw1.ud;
738 /* Surface operation source. */
740 return src[4].fixed_hw_reg.dw1.ud;
744 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
745 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL: {
746 assert(src[3].file == IMM &&
748 const unsigned op = src[4].fixed_hw_reg.dw1.ud;
749 /* Surface coordinates. */
751 return src[3].fixed_hw_reg.dw1.ud;
752 /* Surface operation source. */
753 else if (i == 1 && op == BRW_AOP_CMPWR)
755 else if (i == 1 && (op == BRW_AOP_INC || op == BRW_AOP_DEC ||
756 op == BRW_AOP_PREDEC))
768 fs_inst::regs_read(int arg) const
771 case FS_OPCODE_FB_WRITE:
772 case SHADER_OPCODE_URB_WRITE_SIMD8:
773 case SHADER_OPCODE_UNTYPED_ATOMIC:
774 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
775 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
776 case SHADER_OPCODE_TYPED_ATOMIC:
777 case SHADER_OPCODE_TYPED_SURFACE_READ:
778 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
779 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
784 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7:
785 /* The payload is actually stored in src1 */
790 case FS_OPCODE_LINTERP:
795 case SHADER_OPCODE_LOAD_PAYLOAD:
796 if (arg < this->header_size)
800 case CS_OPCODE_CS_TERMINATE:
801 case SHADER_OPCODE_BARRIER:
805 if (is_tex() && arg == 0 && src[0].file == GRF)
810 switch (src[arg].file) {
819 return DIV_ROUND_UP(components_read(arg) *
820 src[arg].component_size(exec_size),
823 unreachable("MRF registers are not allowed as sources");
825 unreachable("Invalid register file");
830 fs_inst::reads_flag() const
836 fs_inst::writes_flag() const
838 return (conditional_mod && (opcode != BRW_OPCODE_SEL &&
839 opcode != BRW_OPCODE_IF &&
840 opcode != BRW_OPCODE_WHILE)) ||
841 opcode == FS_OPCODE_MOV_DISPATCH_TO_FLAGS;
845 * Returns how many MRFs an FS opcode will write over.
847 * Note that this is not the 0 or 1 implied writes in an actual gen
848 * instruction -- the FS opcodes often generate MOVs in addition.
851 fs_visitor::implied_mrf_writes(fs_inst *inst)
856 if (inst->base_mrf == -1)
859 switch (inst->opcode) {
860 case SHADER_OPCODE_RCP:
861 case SHADER_OPCODE_RSQ:
862 case SHADER_OPCODE_SQRT:
863 case SHADER_OPCODE_EXP2:
864 case SHADER_OPCODE_LOG2:
865 case SHADER_OPCODE_SIN:
866 case SHADER_OPCODE_COS:
867 return 1 * dispatch_width / 8;
868 case SHADER_OPCODE_POW:
869 case SHADER_OPCODE_INT_QUOTIENT:
870 case SHADER_OPCODE_INT_REMAINDER:
871 return 2 * dispatch_width / 8;
872 case SHADER_OPCODE_TEX:
874 case SHADER_OPCODE_TXD:
875 case SHADER_OPCODE_TXF:
876 case SHADER_OPCODE_TXF_CMS:
877 case SHADER_OPCODE_TXF_MCS:
878 case SHADER_OPCODE_TG4:
879 case SHADER_OPCODE_TG4_OFFSET:
880 case SHADER_OPCODE_TXL:
881 case SHADER_OPCODE_TXS:
882 case SHADER_OPCODE_LOD:
883 case SHADER_OPCODE_SAMPLEINFO:
885 case FS_OPCODE_FB_WRITE:
887 case FS_OPCODE_GET_BUFFER_SIZE:
888 case FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD:
889 case SHADER_OPCODE_GEN4_SCRATCH_READ:
891 case FS_OPCODE_VARYING_PULL_CONSTANT_LOAD:
893 case SHADER_OPCODE_GEN4_SCRATCH_WRITE:
895 case SHADER_OPCODE_UNTYPED_ATOMIC:
896 case SHADER_OPCODE_UNTYPED_SURFACE_READ:
897 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE:
898 case SHADER_OPCODE_TYPED_ATOMIC:
899 case SHADER_OPCODE_TYPED_SURFACE_READ:
900 case SHADER_OPCODE_TYPED_SURFACE_WRITE:
901 case SHADER_OPCODE_URB_WRITE_SIMD8:
902 case FS_OPCODE_INTERPOLATE_AT_CENTROID:
903 case FS_OPCODE_INTERPOLATE_AT_SAMPLE:
904 case FS_OPCODE_INTERPOLATE_AT_SHARED_OFFSET:
905 case FS_OPCODE_INTERPOLATE_AT_PER_SLOT_OFFSET:
908 unreachable("not reached");
913 fs_visitor::vgrf(const glsl_type *const type)
915 int reg_width = dispatch_width / 8;
916 return fs_reg(GRF, alloc.allocate(type_size_scalar(type) * reg_width),
917 brw_type_for_base_type(type));
920 /** Fixed HW reg constructor. */
921 fs_reg::fs_reg(enum register_file file, int reg)
926 this->type = BRW_REGISTER_TYPE_F;
927 this->stride = (file == UNIFORM ? 0 : 1);
930 /** Fixed HW reg constructor. */
931 fs_reg::fs_reg(enum register_file file, int reg, enum brw_reg_type type)
937 this->stride = (file == UNIFORM ? 0 : 1);
940 /* For SIMD16, we need to follow from the uniform setup of SIMD8 dispatch.
941 * This brings in those uniform definitions
944 fs_visitor::import_uniforms(fs_visitor *v)
946 this->push_constant_loc = v->push_constant_loc;
947 this->pull_constant_loc = v->pull_constant_loc;
948 this->uniforms = v->uniforms;
949 this->param_size = v->param_size;
953 fs_visitor::setup_vec4_uniform_value(unsigned param_offset,
954 const gl_constant_value *values,
957 static const gl_constant_value zero = { 0 };
959 for (unsigned i = 0; i < n; ++i)
960 stage_prog_data->param[param_offset + i] = &values[i];
962 for (unsigned i = n; i < 4; ++i)
963 stage_prog_data->param[param_offset + i] = &zero;
967 fs_visitor::emit_fragcoord_interpolation(bool pixel_center_integer,
968 bool origin_upper_left)
970 assert(stage == MESA_SHADER_FRAGMENT);
971 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
972 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec4_type));
974 bool flip = !origin_upper_left ^ key->render_to_fbo;
977 if (pixel_center_integer) {
978 bld.MOV(wpos, this->pixel_x);
980 bld.ADD(wpos, this->pixel_x, fs_reg(0.5f));
982 wpos = offset(wpos, bld, 1);
985 if (!flip && pixel_center_integer) {
986 bld.MOV(wpos, this->pixel_y);
988 fs_reg pixel_y = this->pixel_y;
989 float offset = (pixel_center_integer ? 0.0f : 0.5f);
992 pixel_y.negate = true;
993 offset += key->drawable_height - 1.0f;
996 bld.ADD(wpos, pixel_y, fs_reg(offset));
998 wpos = offset(wpos, bld, 1);
1000 /* gl_FragCoord.z */
1001 if (devinfo->gen >= 6) {
1002 bld.MOV(wpos, fs_reg(brw_vec8_grf(payload.source_depth_reg, 0)));
1004 bld.emit(FS_OPCODE_LINTERP, wpos,
1005 this->delta_xy[BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC],
1006 interp_reg(VARYING_SLOT_POS, 2));
1008 wpos = offset(wpos, bld, 1);
1010 /* gl_FragCoord.w: Already set up in emit_interpolation */
1011 bld.MOV(wpos, this->wpos_w);
1017 fs_visitor::emit_linterp(const fs_reg &attr, const fs_reg &interp,
1018 glsl_interp_qualifier interpolation_mode,
1019 bool is_centroid, bool is_sample)
1021 brw_wm_barycentric_interp_mode barycoord_mode;
1022 if (devinfo->gen >= 6) {
1024 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1025 barycoord_mode = BRW_WM_PERSPECTIVE_CENTROID_BARYCENTRIC;
1027 barycoord_mode = BRW_WM_NONPERSPECTIVE_CENTROID_BARYCENTRIC;
1028 } else if (is_sample) {
1029 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1030 barycoord_mode = BRW_WM_PERSPECTIVE_SAMPLE_BARYCENTRIC;
1032 barycoord_mode = BRW_WM_NONPERSPECTIVE_SAMPLE_BARYCENTRIC;
1034 if (interpolation_mode == INTERP_QUALIFIER_SMOOTH)
1035 barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
1037 barycoord_mode = BRW_WM_NONPERSPECTIVE_PIXEL_BARYCENTRIC;
1040 /* On Ironlake and below, there is only one interpolation mode.
1041 * Centroid interpolation doesn't mean anything on this hardware --
1042 * there is no multisampling.
1044 barycoord_mode = BRW_WM_PERSPECTIVE_PIXEL_BARYCENTRIC;
1046 return bld.emit(FS_OPCODE_LINTERP, attr,
1047 this->delta_xy[barycoord_mode], interp);
1051 fs_visitor::emit_general_interpolation(fs_reg attr, const char *name,
1052 const glsl_type *type,
1053 glsl_interp_qualifier interpolation_mode,
1054 int location, bool mod_centroid,
1057 attr.type = brw_type_for_base_type(type->get_scalar_type());
1059 assert(stage == MESA_SHADER_FRAGMENT);
1060 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1061 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1063 unsigned int array_elements;
1065 if (type->is_array()) {
1066 array_elements = type->length;
1067 if (array_elements == 0) {
1068 fail("dereferenced array '%s' has length 0\n", name);
1070 type = type->fields.array;
1075 if (interpolation_mode == INTERP_QUALIFIER_NONE) {
1077 location == VARYING_SLOT_COL0 || location == VARYING_SLOT_COL1;
1078 if (key->flat_shade && is_gl_Color) {
1079 interpolation_mode = INTERP_QUALIFIER_FLAT;
1081 interpolation_mode = INTERP_QUALIFIER_SMOOTH;
1085 for (unsigned int i = 0; i < array_elements; i++) {
1086 for (unsigned int j = 0; j < type->matrix_columns; j++) {
1087 if (prog_data->urb_setup[location] == -1) {
1088 /* If there's no incoming setup data for this slot, don't
1089 * emit interpolation for it.
1091 attr = offset(attr, bld, type->vector_elements);
1096 if (interpolation_mode == INTERP_QUALIFIER_FLAT) {
1097 /* Constant interpolation (flat shading) case. The SF has
1098 * handed us defined values in only the constant offset
1099 * field of the setup reg.
1101 for (unsigned int k = 0; k < type->vector_elements; k++) {
1102 struct brw_reg interp = interp_reg(location, k);
1103 interp = suboffset(interp, 3);
1104 interp.type = attr.type;
1105 bld.emit(FS_OPCODE_CINTERP, attr, fs_reg(interp));
1106 attr = offset(attr, bld, 1);
1109 /* Smooth/noperspective interpolation case. */
1110 for (unsigned int k = 0; k < type->vector_elements; k++) {
1111 struct brw_reg interp = interp_reg(location, k);
1112 if (devinfo->needs_unlit_centroid_workaround && mod_centroid) {
1113 /* Get the pixel/sample mask into f0 so that we know
1114 * which pixels are lit. Then, for each channel that is
1115 * unlit, replace the centroid data with non-centroid
1118 bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
1121 inst = emit_linterp(attr, fs_reg(interp), interpolation_mode,
1123 inst->predicate = BRW_PREDICATE_NORMAL;
1124 inst->predicate_inverse = true;
1125 if (devinfo->has_pln)
1126 inst->no_dd_clear = true;
1128 inst = emit_linterp(attr, fs_reg(interp), interpolation_mode,
1129 mod_centroid && !key->persample_shading,
1130 mod_sample || key->persample_shading);
1131 inst->predicate = BRW_PREDICATE_NORMAL;
1132 inst->predicate_inverse = false;
1133 if (devinfo->has_pln)
1134 inst->no_dd_check = true;
1137 emit_linterp(attr, fs_reg(interp), interpolation_mode,
1138 mod_centroid && !key->persample_shading,
1139 mod_sample || key->persample_shading);
1141 if (devinfo->gen < 6 && interpolation_mode == INTERP_QUALIFIER_SMOOTH) {
1142 bld.MUL(attr, attr, this->pixel_w);
1144 attr = offset(attr, bld, 1);
1154 fs_visitor::emit_frontfacing_interpolation()
1156 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::bool_type));
1158 if (devinfo->gen >= 6) {
1159 /* Bit 15 of g0.0 is 0 if the polygon is front facing. We want to create
1160 * a boolean result from this (~0/true or 0/false).
1162 * We can use the fact that bit 15 is the MSB of g0.0:W to accomplish
1163 * this task in only one instruction:
1164 * - a negation source modifier will flip the bit; and
1165 * - a W -> D type conversion will sign extend the bit into the high
1166 * word of the destination.
1168 * An ASR 15 fills the low word of the destination.
1170 fs_reg g0 = fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_W));
1173 bld.ASR(*reg, g0, fs_reg(15));
1175 /* Bit 31 of g1.6 is 0 if the polygon is front facing. We want to create
1176 * a boolean result from this (1/true or 0/false).
1178 * Like in the above case, since the bit is the MSB of g1.6:UD we can use
1179 * the negation source modifier to flip it. Unfortunately the SHR
1180 * instruction only operates on UD (or D with an abs source modifier)
1181 * sources without negation.
1183 * Instead, use ASR (which will give ~0/true or 0/false).
1185 fs_reg g1_6 = fs_reg(retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_D));
1188 bld.ASR(*reg, g1_6, fs_reg(31));
1195 fs_visitor::compute_sample_position(fs_reg dst, fs_reg int_sample_pos)
1197 assert(stage == MESA_SHADER_FRAGMENT);
1198 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1199 assert(dst.type == BRW_REGISTER_TYPE_F);
1201 if (key->compute_pos_offset) {
1202 /* Convert int_sample_pos to floating point */
1203 bld.MOV(dst, int_sample_pos);
1204 /* Scale to the range [0, 1] */
1205 bld.MUL(dst, dst, fs_reg(1 / 16.0f));
1208 /* From ARB_sample_shading specification:
1209 * "When rendering to a non-multisample buffer, or if multisample
1210 * rasterization is disabled, gl_SamplePosition will always be
1213 bld.MOV(dst, fs_reg(0.5f));
1218 fs_visitor::emit_samplepos_setup()
1220 assert(devinfo->gen >= 6);
1222 const fs_builder abld = bld.annotate("compute sample position");
1223 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::vec2_type));
1225 fs_reg int_sample_x = vgrf(glsl_type::int_type);
1226 fs_reg int_sample_y = vgrf(glsl_type::int_type);
1228 /* WM will be run in MSDISPMODE_PERSAMPLE. So, only one of SIMD8 or SIMD16
1229 * mode will be enabled.
1231 * From the Ivy Bridge PRM, volume 2 part 1, page 344:
1232 * R31.1:0 Position Offset X/Y for Slot[3:0]
1233 * R31.3:2 Position Offset X/Y for Slot[7:4]
1236 * The X, Y sample positions come in as bytes in thread payload. So, read
1237 * the positions using vstride=16, width=8, hstride=2.
1239 struct brw_reg sample_pos_reg =
1240 stride(retype(brw_vec1_grf(payload.sample_pos_reg, 0),
1241 BRW_REGISTER_TYPE_B), 16, 8, 2);
1243 if (dispatch_width == 8) {
1244 abld.MOV(int_sample_x, fs_reg(sample_pos_reg));
1246 abld.half(0).MOV(half(int_sample_x, 0), fs_reg(sample_pos_reg));
1247 abld.half(1).MOV(half(int_sample_x, 1),
1248 fs_reg(suboffset(sample_pos_reg, 16)));
1250 /* Compute gl_SamplePosition.x */
1251 compute_sample_position(pos, int_sample_x);
1252 pos = offset(pos, abld, 1);
1253 if (dispatch_width == 8) {
1254 abld.MOV(int_sample_y, fs_reg(suboffset(sample_pos_reg, 1)));
1256 abld.half(0).MOV(half(int_sample_y, 0),
1257 fs_reg(suboffset(sample_pos_reg, 1)));
1258 abld.half(1).MOV(half(int_sample_y, 1),
1259 fs_reg(suboffset(sample_pos_reg, 17)));
1261 /* Compute gl_SamplePosition.y */
1262 compute_sample_position(pos, int_sample_y);
1267 fs_visitor::emit_sampleid_setup()
1269 assert(stage == MESA_SHADER_FRAGMENT);
1270 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1271 assert(devinfo->gen >= 6);
1273 const fs_builder abld = bld.annotate("compute sample id");
1274 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::int_type));
1276 if (key->compute_sample_id) {
1277 fs_reg t1 = vgrf(glsl_type::int_type);
1278 fs_reg t2 = vgrf(glsl_type::int_type);
1279 t2.type = BRW_REGISTER_TYPE_UW;
1281 /* The PS will be run in MSDISPMODE_PERSAMPLE. For example with
1282 * 8x multisampling, subspan 0 will represent sample N (where N
1283 * is 0, 2, 4 or 6), subspan 1 will represent sample 1, 3, 5 or
1284 * 7. We can find the value of N by looking at R0.0 bits 7:6
1285 * ("Starting Sample Pair Index (SSPI)") and multiplying by two
1286 * (since samples are always delivered in pairs). That is, we
1287 * compute 2*((R0.0 & 0xc0) >> 6) == (R0.0 & 0xc0) >> 5. Then
1288 * we need to add N to the sequence (0, 0, 0, 0, 1, 1, 1, 1) in
1289 * case of SIMD8 and sequence (0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2,
1290 * 2, 3, 3, 3, 3) in case of SIMD16. We compute this sequence by
1291 * populating a temporary variable with the sequence (0, 1, 2, 3),
1292 * and then reading from it using vstride=1, width=4, hstride=0.
1293 * These computations hold good for 4x multisampling as well.
1295 * For 2x MSAA and SIMD16, we want to use the sequence (0, 1, 0, 1):
1296 * the first four slots are sample 0 of subspan 0; the next four
1297 * are sample 1 of subspan 0; the third group is sample 0 of
1298 * subspan 1, and finally sample 1 of subspan 1.
1301 .AND(t1, fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_UD)),
1303 abld.exec_all().SHR(t1, t1, fs_reg(5));
1305 /* This works for both SIMD8 and SIMD16 */
1307 .MOV(t2, brw_imm_v(key->persample_2x ? 0x1010 : 0x3210));
1309 /* This special instruction takes care of setting vstride=1,
1310 * width=4, hstride=0 of t2 during an ADD instruction.
1312 abld.emit(FS_OPCODE_SET_SAMPLE_ID, *reg, t1, t2);
1314 /* As per GL_ARB_sample_shading specification:
1315 * "When rendering to a non-multisample buffer, or if multisample
1316 * rasterization is disabled, gl_SampleID will always be zero."
1318 abld.MOV(*reg, fs_reg(0));
1325 fs_visitor::resolve_source_modifiers(const fs_reg &src)
1327 if (!src.abs && !src.negate)
1330 fs_reg temp = bld.vgrf(src.type);
1337 fs_visitor::emit_discard_jump()
1339 assert(((brw_wm_prog_data*) this->prog_data)->uses_kill);
1341 /* For performance, after a discard, jump to the end of the
1342 * shader if all relevant channels have been discarded.
1344 fs_inst *discard_jump = bld.emit(FS_OPCODE_DISCARD_JUMP);
1345 discard_jump->flag_subreg = 1;
1347 discard_jump->predicate = (dispatch_width == 8)
1348 ? BRW_PREDICATE_ALIGN1_ANY8H
1349 : BRW_PREDICATE_ALIGN1_ANY16H;
1350 discard_jump->predicate_inverse = true;
1354 fs_visitor::assign_curb_setup()
1356 if (dispatch_width == 8) {
1357 prog_data->dispatch_grf_start_reg = payload.num_regs;
1359 if (stage == MESA_SHADER_FRAGMENT) {
1360 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1361 prog_data->dispatch_grf_start_reg_16 = payload.num_regs;
1362 } else if (stage == MESA_SHADER_COMPUTE) {
1363 brw_cs_prog_data *prog_data = (brw_cs_prog_data*) this->prog_data;
1364 prog_data->dispatch_grf_start_reg_16 = payload.num_regs;
1366 unreachable("Unsupported shader type!");
1370 prog_data->curb_read_length = ALIGN(stage_prog_data->nr_params, 8) / 8;
1372 /* Map the offsets in the UNIFORM file to fixed HW regs. */
1373 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1374 for (unsigned int i = 0; i < inst->sources; i++) {
1375 if (inst->src[i].file == UNIFORM) {
1376 int uniform_nr = inst->src[i].reg + inst->src[i].reg_offset;
1378 if (uniform_nr >= 0 && uniform_nr < (int) uniforms) {
1379 constant_nr = push_constant_loc[uniform_nr];
1381 /* Section 5.11 of the OpenGL 4.1 spec says:
1382 * "Out-of-bounds reads return undefined values, which include
1383 * values from other variables of the active program or zero."
1384 * Just return the first push constant.
1389 struct brw_reg brw_reg = brw_vec1_grf(payload.num_regs +
1393 assert(inst->src[i].stride == 0);
1394 inst->src[i].file = HW_REG;
1395 inst->src[i].fixed_hw_reg = byte_offset(
1396 retype(brw_reg, inst->src[i].type),
1397 inst->src[i].subreg_offset);
1402 /* This may be updated in assign_urb_setup or assign_vs_urb_setup. */
1403 this->first_non_payload_grf = payload.num_regs + prog_data->curb_read_length;
1407 fs_visitor::calculate_urb_setup()
1409 assert(stage == MESA_SHADER_FRAGMENT);
1410 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1411 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
1413 memset(prog_data->urb_setup, -1,
1414 sizeof(prog_data->urb_setup[0]) * VARYING_SLOT_MAX);
1417 /* Figure out where each of the incoming setup attributes lands. */
1418 if (devinfo->gen >= 6) {
1419 if (_mesa_bitcount_64(prog->InputsRead &
1420 BRW_FS_VARYING_INPUT_MASK) <= 16) {
1421 /* The SF/SBE pipeline stage can do arbitrary rearrangement of the
1422 * first 16 varying inputs, so we can put them wherever we want.
1423 * Just put them in order.
1425 * This is useful because it means that (a) inputs not used by the
1426 * fragment shader won't take up valuable register space, and (b) we
1427 * won't have to recompile the fragment shader if it gets paired with
1428 * a different vertex (or geometry) shader.
1430 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1431 if (prog->InputsRead & BRW_FS_VARYING_INPUT_MASK &
1432 BITFIELD64_BIT(i)) {
1433 prog_data->urb_setup[i] = urb_next++;
1437 /* We have enough input varyings that the SF/SBE pipeline stage can't
1438 * arbitrarily rearrange them to suit our whim; we have to put them
1439 * in an order that matches the output of the previous pipeline stage
1440 * (geometry or vertex shader).
1442 struct brw_vue_map prev_stage_vue_map;
1443 brw_compute_vue_map(devinfo, &prev_stage_vue_map,
1444 key->input_slots_valid,
1445 shader_prog->SeparateShader);
1446 int first_slot = 2 * BRW_SF_URB_ENTRY_READ_OFFSET;
1447 assert(prev_stage_vue_map.num_slots <= first_slot + 32);
1448 for (int slot = first_slot; slot < prev_stage_vue_map.num_slots;
1450 int varying = prev_stage_vue_map.slot_to_varying[slot];
1451 /* Note that varying == BRW_VARYING_SLOT_COUNT when a slot is
1454 if (varying != BRW_VARYING_SLOT_COUNT &&
1455 (prog->InputsRead & BRW_FS_VARYING_INPUT_MASK &
1456 BITFIELD64_BIT(varying))) {
1457 prog_data->urb_setup[varying] = slot - first_slot;
1460 urb_next = prev_stage_vue_map.num_slots - first_slot;
1463 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
1464 for (unsigned int i = 0; i < VARYING_SLOT_MAX; i++) {
1465 /* Point size is packed into the header, not as a general attribute */
1466 if (i == VARYING_SLOT_PSIZ)
1469 if (key->input_slots_valid & BITFIELD64_BIT(i)) {
1470 /* The back color slot is skipped when the front color is
1471 * also written to. In addition, some slots can be
1472 * written in the vertex shader and not read in the
1473 * fragment shader. So the register number must always be
1474 * incremented, mapped or not.
1476 if (_mesa_varying_slot_in_fs((gl_varying_slot) i))
1477 prog_data->urb_setup[i] = urb_next;
1483 * It's a FS only attribute, and we did interpolation for this attribute
1484 * in SF thread. So, count it here, too.
1486 * See compile_sf_prog() for more info.
1488 if (prog->InputsRead & BITFIELD64_BIT(VARYING_SLOT_PNTC))
1489 prog_data->urb_setup[VARYING_SLOT_PNTC] = urb_next++;
1492 prog_data->num_varying_inputs = urb_next;
1496 fs_visitor::assign_urb_setup()
1498 assert(stage == MESA_SHADER_FRAGMENT);
1499 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
1501 int urb_start = payload.num_regs + prog_data->base.curb_read_length;
1503 /* Offset all the urb_setup[] index by the actual position of the
1504 * setup regs, now that the location of the constants has been chosen.
1506 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1507 if (inst->opcode == FS_OPCODE_LINTERP) {
1508 assert(inst->src[1].file == HW_REG);
1509 inst->src[1].fixed_hw_reg.nr += urb_start;
1512 if (inst->opcode == FS_OPCODE_CINTERP) {
1513 assert(inst->src[0].file == HW_REG);
1514 inst->src[0].fixed_hw_reg.nr += urb_start;
1518 /* Each attribute is 4 setup channels, each of which is half a reg. */
1519 this->first_non_payload_grf += prog_data->num_varying_inputs * 2;
1523 fs_visitor::assign_vs_urb_setup()
1525 brw_vs_prog_data *vs_prog_data = (brw_vs_prog_data *) prog_data;
1526 int grf, count, slot, channel, attr;
1528 assert(stage == MESA_SHADER_VERTEX);
1529 count = _mesa_bitcount_64(vs_prog_data->inputs_read);
1530 if (vs_prog_data->uses_vertexid || vs_prog_data->uses_instanceid)
1533 /* Each attribute is 4 regs. */
1534 this->first_non_payload_grf += count * 4;
1536 unsigned vue_entries =
1537 MAX2(count, vs_prog_data->base.vue_map.num_slots);
1539 /* URB entry size is counted in units of 64 bytes (for the 3DSTATE_URB_VS
1540 * command). Each attribute is 16 bytes (4 floats/dwords), so each unit
1541 * fits four attributes.
1543 vs_prog_data->base.urb_entry_size = ALIGN(vue_entries, 4) / 4;
1544 vs_prog_data->base.urb_read_length = (count + 1) / 2;
1546 assert(vs_prog_data->base.urb_read_length <= 15);
1548 /* Rewrite all ATTR file references to the hw grf that they land in. */
1549 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1550 for (int i = 0; i < inst->sources; i++) {
1551 if (inst->src[i].file == ATTR) {
1553 if (inst->src[i].reg == VERT_ATTRIB_MAX) {
1556 /* Attributes come in in a contiguous block, ordered by their
1557 * gl_vert_attrib value. That means we can compute the slot
1558 * number for an attribute by masking out the enabled
1559 * attributes before it and counting the bits.
1561 attr = inst->src[i].reg + inst->src[i].reg_offset / 4;
1562 slot = _mesa_bitcount_64(vs_prog_data->inputs_read &
1563 BITFIELD64_MASK(attr));
1566 channel = inst->src[i].reg_offset & 3;
1568 grf = payload.num_regs +
1569 prog_data->curb_read_length +
1572 inst->src[i].file = HW_REG;
1573 inst->src[i].fixed_hw_reg =
1574 stride(byte_offset(retype(brw_vec8_grf(grf, 0), inst->src[i].type),
1575 inst->src[i].subreg_offset),
1576 inst->exec_size * inst->src[i].stride,
1577 inst->exec_size, inst->src[i].stride);
1584 * Split large virtual GRFs into separate components if we can.
1586 * This is mostly duplicated with what brw_fs_vector_splitting does,
1587 * but that's really conservative because it's afraid of doing
1588 * splitting that doesn't result in real progress after the rest of
1589 * the optimization phases, which would cause infinite looping in
1590 * optimization. We can do it once here, safely. This also has the
1591 * opportunity to split interpolated values, or maybe even uniforms,
1592 * which we don't have at the IR level.
1594 * We want to split, because virtual GRFs are what we register
1595 * allocate and spill (due to contiguousness requirements for some
1596 * instructions), and they're what we naturally generate in the
1597 * codegen process, but most virtual GRFs don't actually need to be
1598 * contiguous sets of GRFs. If we split, we'll end up with reduced
1599 * live intervals and better dead code elimination and coalescing.
1602 fs_visitor::split_virtual_grfs()
1604 int num_vars = this->alloc.count;
1606 /* Count the total number of registers */
1608 int vgrf_to_reg[num_vars];
1609 for (int i = 0; i < num_vars; i++) {
1610 vgrf_to_reg[i] = reg_count;
1611 reg_count += alloc.sizes[i];
1614 /* An array of "split points". For each register slot, this indicates
1615 * if this slot can be separated from the previous slot. Every time an
1616 * instruction uses multiple elements of a register (as a source or
1617 * destination), we mark the used slots as inseparable. Then we go
1618 * through and split the registers into the smallest pieces we can.
1620 bool split_points[reg_count];
1621 memset(split_points, 0, sizeof(split_points));
1623 /* Mark all used registers as fully splittable */
1624 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1625 if (inst->dst.file == GRF) {
1626 int reg = vgrf_to_reg[inst->dst.reg];
1627 for (unsigned j = 1; j < this->alloc.sizes[inst->dst.reg]; j++)
1628 split_points[reg + j] = true;
1631 for (int i = 0; i < inst->sources; i++) {
1632 if (inst->src[i].file == GRF) {
1633 int reg = vgrf_to_reg[inst->src[i].reg];
1634 for (unsigned j = 1; j < this->alloc.sizes[inst->src[i].reg]; j++)
1635 split_points[reg + j] = true;
1640 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1641 if (inst->dst.file == GRF) {
1642 int reg = vgrf_to_reg[inst->dst.reg] + inst->dst.reg_offset;
1643 for (int j = 1; j < inst->regs_written; j++)
1644 split_points[reg + j] = false;
1646 for (int i = 0; i < inst->sources; i++) {
1647 if (inst->src[i].file == GRF) {
1648 int reg = vgrf_to_reg[inst->src[i].reg] + inst->src[i].reg_offset;
1649 for (int j = 1; j < inst->regs_read(i); j++)
1650 split_points[reg + j] = false;
1655 int new_virtual_grf[reg_count];
1656 int new_reg_offset[reg_count];
1659 for (int i = 0; i < num_vars; i++) {
1660 /* The first one should always be 0 as a quick sanity check. */
1661 assert(split_points[reg] == false);
1664 new_reg_offset[reg] = 0;
1669 for (unsigned j = 1; j < alloc.sizes[i]; j++) {
1670 /* If this is a split point, reset the offset to 0 and allocate a
1671 * new virtual GRF for the previous offset many registers
1673 if (split_points[reg]) {
1674 assert(offset <= MAX_VGRF_SIZE);
1675 int grf = alloc.allocate(offset);
1676 for (int k = reg - offset; k < reg; k++)
1677 new_virtual_grf[k] = grf;
1680 new_reg_offset[reg] = offset;
1685 /* The last one gets the original register number */
1686 assert(offset <= MAX_VGRF_SIZE);
1687 alloc.sizes[i] = offset;
1688 for (int k = reg - offset; k < reg; k++)
1689 new_virtual_grf[k] = i;
1691 assert(reg == reg_count);
1693 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1694 if (inst->dst.file == GRF) {
1695 reg = vgrf_to_reg[inst->dst.reg] + inst->dst.reg_offset;
1696 inst->dst.reg = new_virtual_grf[reg];
1697 inst->dst.reg_offset = new_reg_offset[reg];
1698 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1700 for (int i = 0; i < inst->sources; i++) {
1701 if (inst->src[i].file == GRF) {
1702 reg = vgrf_to_reg[inst->src[i].reg] + inst->src[i].reg_offset;
1703 inst->src[i].reg = new_virtual_grf[reg];
1704 inst->src[i].reg_offset = new_reg_offset[reg];
1705 assert((unsigned)new_reg_offset[reg] < alloc.sizes[new_virtual_grf[reg]]);
1709 invalidate_live_intervals();
1713 * Remove unused virtual GRFs and compact the virtual_grf_* arrays.
1715 * During code generation, we create tons of temporary variables, many of
1716 * which get immediately killed and are never used again. Yet, in later
1717 * optimization and analysis passes, such as compute_live_intervals, we need
1718 * to loop over all the virtual GRFs. Compacting them can save a lot of
1722 fs_visitor::compact_virtual_grfs()
1724 bool progress = false;
1725 int remap_table[this->alloc.count];
1726 memset(remap_table, -1, sizeof(remap_table));
1728 /* Mark which virtual GRFs are used. */
1729 foreach_block_and_inst(block, const fs_inst, inst, cfg) {
1730 if (inst->dst.file == GRF)
1731 remap_table[inst->dst.reg] = 0;
1733 for (int i = 0; i < inst->sources; i++) {
1734 if (inst->src[i].file == GRF)
1735 remap_table[inst->src[i].reg] = 0;
1739 /* Compact the GRF arrays. */
1741 for (unsigned i = 0; i < this->alloc.count; i++) {
1742 if (remap_table[i] == -1) {
1743 /* We just found an unused register. This means that we are
1744 * actually going to compact something.
1748 remap_table[i] = new_index;
1749 alloc.sizes[new_index] = alloc.sizes[i];
1750 invalidate_live_intervals();
1755 this->alloc.count = new_index;
1757 /* Patch all the instructions to use the newly renumbered registers */
1758 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1759 if (inst->dst.file == GRF)
1760 inst->dst.reg = remap_table[inst->dst.reg];
1762 for (int i = 0; i < inst->sources; i++) {
1763 if (inst->src[i].file == GRF)
1764 inst->src[i].reg = remap_table[inst->src[i].reg];
1768 /* Patch all the references to delta_xy, since they're used in register
1769 * allocation. If they're unused, switch them to BAD_FILE so we don't
1770 * think some random VGRF is delta_xy.
1772 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
1773 if (delta_xy[i].file == GRF) {
1774 if (remap_table[delta_xy[i].reg] != -1) {
1775 delta_xy[i].reg = remap_table[delta_xy[i].reg];
1777 delta_xy[i].file = BAD_FILE;
1786 * Assign UNIFORM file registers to either push constants or pull constants.
1788 * We allow a fragment shader to have more than the specified minimum
1789 * maximum number of fragment shader uniform components (64). If
1790 * there are too many of these, they'd fill up all of register space.
1791 * So, this will push some of them out to the pull constant buffer and
1792 * update the program to load them. We also use pull constants for all
1793 * indirect constant loads because we don't support indirect accesses in
1797 fs_visitor::assign_constant_locations()
1799 /* Only the first compile (SIMD8 mode) gets to decide on locations. */
1800 if (dispatch_width != 8)
1803 unsigned int num_pull_constants = 0;
1805 pull_constant_loc = ralloc_array(mem_ctx, int, uniforms);
1806 memset(pull_constant_loc, -1, sizeof(pull_constant_loc[0]) * uniforms);
1808 bool is_live[uniforms];
1809 memset(is_live, 0, sizeof(is_live));
1811 /* First, we walk through the instructions and do two things:
1813 * 1) Figure out which uniforms are live.
1815 * 2) Find all indirect access of uniform arrays and flag them as needing
1816 * to go into the pull constant buffer.
1818 * Note that we don't move constant-indexed accesses to arrays. No
1819 * testing has been done of the performance impact of this choice.
1821 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
1822 for (int i = 0 ; i < inst->sources; i++) {
1823 if (inst->src[i].file != UNIFORM)
1826 if (inst->src[i].reladdr) {
1827 int uniform = inst->src[i].reg;
1829 /* If this array isn't already present in the pull constant buffer,
1832 if (pull_constant_loc[uniform] == -1) {
1833 assert(param_size[uniform]);
1834 for (int j = 0; j < param_size[uniform]; j++)
1835 pull_constant_loc[uniform + j] = num_pull_constants++;
1838 /* Mark the the one accessed uniform as live */
1839 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
1840 if (constant_nr >= 0 && constant_nr < (int) uniforms)
1841 is_live[constant_nr] = true;
1846 /* Only allow 16 registers (128 uniform components) as push constants.
1848 * Just demote the end of the list. We could probably do better
1849 * here, demoting things that are rarely used in the program first.
1851 * If changing this value, note the limitation about total_regs in
1854 unsigned int max_push_components = 16 * 8;
1855 unsigned int num_push_constants = 0;
1857 push_constant_loc = ralloc_array(mem_ctx, int, uniforms);
1859 for (unsigned int i = 0; i < uniforms; i++) {
1860 if (!is_live[i] || pull_constant_loc[i] != -1) {
1861 /* This UNIFORM register is either dead, or has already been demoted
1862 * to a pull const. Mark it as no longer living in the param[] array.
1864 push_constant_loc[i] = -1;
1868 if (num_push_constants < max_push_components) {
1869 /* Retain as a push constant. Record the location in the params[]
1872 push_constant_loc[i] = num_push_constants++;
1874 /* Demote to a pull constant. */
1875 push_constant_loc[i] = -1;
1876 pull_constant_loc[i] = num_pull_constants++;
1880 stage_prog_data->nr_params = num_push_constants;
1881 stage_prog_data->nr_pull_params = num_pull_constants;
1883 /* Up until now, the param[] array has been indexed by reg + reg_offset
1884 * of UNIFORM registers. Move pull constants into pull_param[] and
1885 * condense param[] to only contain the uniforms we chose to push.
1887 * NOTE: Because we are condensing the params[] array, we know that
1888 * push_constant_loc[i] <= i and we can do it in one smooth loop without
1889 * having to make a copy.
1891 for (unsigned int i = 0; i < uniforms; i++) {
1892 const gl_constant_value *value = stage_prog_data->param[i];
1894 if (pull_constant_loc[i] != -1) {
1895 stage_prog_data->pull_param[pull_constant_loc[i]] = value;
1896 } else if (push_constant_loc[i] != -1) {
1897 stage_prog_data->param[push_constant_loc[i]] = value;
1903 * Replace UNIFORM register file access with either UNIFORM_PULL_CONSTANT_LOAD
1904 * or VARYING_PULL_CONSTANT_LOAD instructions which load values into VGRFs.
1907 fs_visitor::demote_pull_constants()
1909 foreach_block_and_inst (block, fs_inst, inst, cfg) {
1910 for (int i = 0; i < inst->sources; i++) {
1911 if (inst->src[i].file != UNIFORM)
1915 unsigned location = inst->src[i].reg + inst->src[i].reg_offset;
1916 if (location >= uniforms) /* Out of bounds access */
1919 pull_index = pull_constant_loc[location];
1921 if (pull_index == -1)
1924 /* Set up the annotation tracking for new generated instructions. */
1925 const fs_builder ibld(this, block, inst);
1926 fs_reg surf_index(stage_prog_data->binding_table.pull_constants_start);
1927 fs_reg dst = vgrf(glsl_type::float_type);
1929 assert(inst->src[i].stride == 0);
1931 /* Generate a pull load into dst. */
1932 if (inst->src[i].reladdr) {
1933 VARYING_PULL_CONSTANT_LOAD(ibld, dst,
1935 *inst->src[i].reladdr,
1937 inst->src[i].reladdr = NULL;
1938 inst->src[i].stride = 1;
1940 const fs_builder ubld = ibld.exec_all().group(8, 0);
1941 fs_reg offset = fs_reg((unsigned)(pull_index * 4) & ~15);
1942 ubld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD,
1943 dst, surf_index, offset);
1944 inst->src[i].set_smear(pull_index & 3);
1947 /* Rewrite the instruction to use the temporary VGRF. */
1948 inst->src[i].file = GRF;
1949 inst->src[i].reg = dst.reg;
1950 inst->src[i].reg_offset = 0;
1953 invalidate_live_intervals();
1957 fs_visitor::opt_algebraic()
1959 bool progress = false;
1961 foreach_block_and_inst(block, fs_inst, inst, cfg) {
1962 switch (inst->opcode) {
1963 case BRW_OPCODE_MOV:
1964 if (inst->src[0].file != IMM)
1967 if (inst->saturate) {
1968 if (inst->dst.type != inst->src[0].type)
1969 assert(!"unimplemented: saturate mixed types");
1971 if (brw_saturate_immediate(inst->dst.type,
1972 &inst->src[0].fixed_hw_reg)) {
1973 inst->saturate = false;
1979 case BRW_OPCODE_MUL:
1980 if (inst->src[1].file != IMM)
1984 if (inst->src[1].is_one()) {
1985 inst->opcode = BRW_OPCODE_MOV;
1986 inst->src[1] = reg_undef;
1992 if (inst->src[1].is_negative_one()) {
1993 inst->opcode = BRW_OPCODE_MOV;
1994 inst->src[0].negate = !inst->src[0].negate;
1995 inst->src[1] = reg_undef;
2001 if (inst->src[1].is_zero()) {
2002 inst->opcode = BRW_OPCODE_MOV;
2003 inst->src[0] = inst->src[1];
2004 inst->src[1] = reg_undef;
2009 if (inst->src[0].file == IMM) {
2010 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2011 inst->opcode = BRW_OPCODE_MOV;
2012 inst->src[0].fixed_hw_reg.dw1.f *= inst->src[1].fixed_hw_reg.dw1.f;
2013 inst->src[1] = reg_undef;
2018 case BRW_OPCODE_ADD:
2019 if (inst->src[1].file != IMM)
2023 if (inst->src[1].is_zero()) {
2024 inst->opcode = BRW_OPCODE_MOV;
2025 inst->src[1] = reg_undef;
2030 if (inst->src[0].file == IMM) {
2031 assert(inst->src[0].type == BRW_REGISTER_TYPE_F);
2032 inst->opcode = BRW_OPCODE_MOV;
2033 inst->src[0].fixed_hw_reg.dw1.f += inst->src[1].fixed_hw_reg.dw1.f;
2034 inst->src[1] = reg_undef;
2040 if (inst->src[0].equals(inst->src[1])) {
2041 inst->opcode = BRW_OPCODE_MOV;
2042 inst->src[1] = reg_undef;
2047 case BRW_OPCODE_LRP:
2048 if (inst->src[1].equals(inst->src[2])) {
2049 inst->opcode = BRW_OPCODE_MOV;
2050 inst->src[0] = inst->src[1];
2051 inst->src[1] = reg_undef;
2052 inst->src[2] = reg_undef;
2057 case BRW_OPCODE_CMP:
2058 if (inst->conditional_mod == BRW_CONDITIONAL_GE &&
2060 inst->src[0].negate &&
2061 inst->src[1].is_zero()) {
2062 inst->src[0].abs = false;
2063 inst->src[0].negate = false;
2064 inst->conditional_mod = BRW_CONDITIONAL_Z;
2069 case BRW_OPCODE_SEL:
2070 if (inst->src[0].equals(inst->src[1])) {
2071 inst->opcode = BRW_OPCODE_MOV;
2072 inst->src[1] = reg_undef;
2073 inst->predicate = BRW_PREDICATE_NONE;
2074 inst->predicate_inverse = false;
2076 } else if (inst->saturate && inst->src[1].file == IMM) {
2077 switch (inst->conditional_mod) {
2078 case BRW_CONDITIONAL_LE:
2079 case BRW_CONDITIONAL_L:
2080 switch (inst->src[1].type) {
2081 case BRW_REGISTER_TYPE_F:
2082 if (inst->src[1].fixed_hw_reg.dw1.f >= 1.0f) {
2083 inst->opcode = BRW_OPCODE_MOV;
2084 inst->src[1] = reg_undef;
2085 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2093 case BRW_CONDITIONAL_GE:
2094 case BRW_CONDITIONAL_G:
2095 switch (inst->src[1].type) {
2096 case BRW_REGISTER_TYPE_F:
2097 if (inst->src[1].fixed_hw_reg.dw1.f <= 0.0f) {
2098 inst->opcode = BRW_OPCODE_MOV;
2099 inst->src[1] = reg_undef;
2100 inst->conditional_mod = BRW_CONDITIONAL_NONE;
2112 case BRW_OPCODE_MAD:
2113 if (inst->src[1].is_zero() || inst->src[2].is_zero()) {
2114 inst->opcode = BRW_OPCODE_MOV;
2115 inst->src[1] = reg_undef;
2116 inst->src[2] = reg_undef;
2118 } else if (inst->src[0].is_zero()) {
2119 inst->opcode = BRW_OPCODE_MUL;
2120 inst->src[0] = inst->src[2];
2121 inst->src[2] = reg_undef;
2123 } else if (inst->src[1].is_one()) {
2124 inst->opcode = BRW_OPCODE_ADD;
2125 inst->src[1] = inst->src[2];
2126 inst->src[2] = reg_undef;
2128 } else if (inst->src[2].is_one()) {
2129 inst->opcode = BRW_OPCODE_ADD;
2130 inst->src[2] = reg_undef;
2132 } else if (inst->src[1].file == IMM && inst->src[2].file == IMM) {
2133 inst->opcode = BRW_OPCODE_ADD;
2134 inst->src[1].fixed_hw_reg.dw1.f *= inst->src[2].fixed_hw_reg.dw1.f;
2135 inst->src[2] = reg_undef;
2139 case SHADER_OPCODE_RCP: {
2140 fs_inst *prev = (fs_inst *)inst->prev;
2141 if (prev->opcode == SHADER_OPCODE_SQRT) {
2142 if (inst->src[0].equals(prev->dst)) {
2143 inst->opcode = SHADER_OPCODE_RSQ;
2144 inst->src[0] = prev->src[0];
2150 case SHADER_OPCODE_BROADCAST:
2151 if (is_uniform(inst->src[0])) {
2152 inst->opcode = BRW_OPCODE_MOV;
2154 inst->force_writemask_all = true;
2156 } else if (inst->src[1].file == IMM) {
2157 inst->opcode = BRW_OPCODE_MOV;
2158 inst->src[0] = component(inst->src[0],
2159 inst->src[1].fixed_hw_reg.dw1.ud);
2161 inst->force_writemask_all = true;
2170 /* Swap if src[0] is immediate. */
2171 if (progress && inst->is_commutative()) {
2172 if (inst->src[0].file == IMM) {
2173 fs_reg tmp = inst->src[1];
2174 inst->src[1] = inst->src[0];
2183 * Optimize sample messages that have constant zero values for the trailing
2184 * texture coordinates. We can just reduce the message length for these
2185 * instructions instead of reserving a register for it. Trailing parameters
2186 * that aren't sent default to zero anyway. This will cause the dead code
2187 * eliminator to remove the MOV instruction that would otherwise be emitted to
2188 * set up the zero value.
2191 fs_visitor::opt_zero_samples()
2193 /* Gen4 infers the texturing opcode based on the message length so we can't
2196 if (devinfo->gen < 5)
2199 bool progress = false;
2201 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2202 if (!inst->is_tex())
2205 fs_inst *load_payload = (fs_inst *) inst->prev;
2207 if (load_payload->is_head_sentinel() ||
2208 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2211 /* We don't want to remove the message header or the first parameter.
2212 * Removing the first parameter is not allowed, see the Haswell PRM
2213 * volume 7, page 149:
2215 * "Parameter 0 is required except for the sampleinfo message, which
2216 * has no parameter 0"
2218 while (inst->mlen > inst->header_size + inst->exec_size / 8 &&
2219 load_payload->src[(inst->mlen - inst->header_size) /
2220 (inst->exec_size / 8) +
2221 inst->header_size - 1].is_zero()) {
2222 inst->mlen -= inst->exec_size / 8;
2228 invalidate_live_intervals();
2234 * Optimize sample messages which are followed by the final RT write.
2236 * CHV, and GEN9+ can mark a texturing SEND instruction with EOT to have its
2237 * results sent directly to the framebuffer, bypassing the EU. Recognize the
2238 * final texturing results copied to the framebuffer write payload and modify
2239 * them to write to the framebuffer directly.
2242 fs_visitor::opt_sampler_eot()
2244 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2246 if (stage != MESA_SHADER_FRAGMENT)
2249 if (devinfo->gen < 9 && !devinfo->is_cherryview)
2252 /* FINISHME: It should be possible to implement this optimization when there
2253 * are multiple drawbuffers.
2255 if (key->nr_color_regions != 1)
2258 /* Look for a texturing instruction immediately before the final FB_WRITE. */
2259 bblock_t *block = cfg->blocks[cfg->num_blocks - 1];
2260 fs_inst *fb_write = (fs_inst *)block->end();
2261 assert(fb_write->eot);
2262 assert(fb_write->opcode == FS_OPCODE_FB_WRITE);
2264 fs_inst *tex_inst = (fs_inst *) fb_write->prev;
2266 /* There wasn't one; nothing to do. */
2267 if (unlikely(tex_inst->is_head_sentinel()) || !tex_inst->is_tex())
2270 /* This optimisation doesn't seem to work for textureGather for some
2271 * reason. I can't find any documentation or known workarounds to indicate
2272 * that this is expected, but considering that it is probably pretty
2273 * unlikely that a shader would directly write out the results from
2274 * textureGather we might as well just disable it.
2276 if (tex_inst->opcode == SHADER_OPCODE_TG4 ||
2277 tex_inst->opcode == SHADER_OPCODE_TG4_OFFSET)
2280 /* If there's no header present, we need to munge the LOAD_PAYLOAD as well.
2281 * It's very likely to be the previous instruction.
2283 fs_inst *load_payload = (fs_inst *) tex_inst->prev;
2284 if (load_payload->is_head_sentinel() ||
2285 load_payload->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
2288 assert(!tex_inst->eot); /* We can't get here twice */
2289 assert((tex_inst->offset & (0xff << 24)) == 0);
2291 const fs_builder ibld(this, block, tex_inst);
2293 tex_inst->offset |= fb_write->target << 24;
2294 tex_inst->eot = true;
2295 tex_inst->dst = ibld.null_reg_ud();
2296 fb_write->remove(cfg->blocks[cfg->num_blocks - 1]);
2298 /* If a header is present, marking the eot is sufficient. Otherwise, we need
2299 * to create a new LOAD_PAYLOAD command with the same sources and a space
2300 * saved for the header. Using a new destination register not only makes sure
2301 * we have enough space, but it will make sure the dead code eliminator kills
2302 * the instruction that this will replace.
2304 if (tex_inst->header_size != 0)
2307 fs_reg send_header = ibld.vgrf(BRW_REGISTER_TYPE_F,
2308 load_payload->sources + 1);
2309 fs_reg *new_sources =
2310 ralloc_array(mem_ctx, fs_reg, load_payload->sources + 1);
2312 new_sources[0] = fs_reg();
2313 for (int i = 0; i < load_payload->sources; i++)
2314 new_sources[i+1] = load_payload->src[i];
2316 /* The LOAD_PAYLOAD helper seems like the obvious choice here. However, it
2317 * requires a lot of information about the sources to appropriately figure
2318 * out the number of registers needed to be used. Given this stage in our
2319 * optimization, we may not have the appropriate GRFs required by
2320 * LOAD_PAYLOAD at this point (copy propagation). Therefore, we need to
2321 * manually emit the instruction.
2323 fs_inst *new_load_payload = new(mem_ctx) fs_inst(SHADER_OPCODE_LOAD_PAYLOAD,
2324 load_payload->exec_size,
2327 load_payload->sources + 1);
2329 new_load_payload->regs_written = load_payload->regs_written + 1;
2330 new_load_payload->header_size = 1;
2332 tex_inst->header_size = 1;
2333 tex_inst->insert_before(cfg->blocks[cfg->num_blocks - 1], new_load_payload);
2334 tex_inst->src[0] = send_header;
2340 fs_visitor::opt_register_renaming()
2342 bool progress = false;
2345 int remap[alloc.count];
2346 memset(remap, -1, sizeof(int) * alloc.count);
2348 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2349 if (inst->opcode == BRW_OPCODE_IF || inst->opcode == BRW_OPCODE_DO) {
2351 } else if (inst->opcode == BRW_OPCODE_ENDIF ||
2352 inst->opcode == BRW_OPCODE_WHILE) {
2356 /* Rewrite instruction sources. */
2357 for (int i = 0; i < inst->sources; i++) {
2358 if (inst->src[i].file == GRF &&
2359 remap[inst->src[i].reg] != -1 &&
2360 remap[inst->src[i].reg] != inst->src[i].reg) {
2361 inst->src[i].reg = remap[inst->src[i].reg];
2366 const int dst = inst->dst.reg;
2369 inst->dst.file == GRF &&
2370 alloc.sizes[inst->dst.reg] == inst->exec_size / 8 &&
2371 !inst->is_partial_write()) {
2372 if (remap[dst] == -1) {
2375 remap[dst] = alloc.allocate(inst->exec_size / 8);
2376 inst->dst.reg = remap[dst];
2379 } else if (inst->dst.file == GRF &&
2381 remap[dst] != dst) {
2382 inst->dst.reg = remap[dst];
2388 invalidate_live_intervals();
2390 for (unsigned i = 0; i < ARRAY_SIZE(delta_xy); i++) {
2391 if (delta_xy[i].file == GRF && remap[delta_xy[i].reg] != -1) {
2392 delta_xy[i].reg = remap[delta_xy[i].reg];
2401 * Remove redundant or useless discard jumps.
2403 * For example, we can eliminate jumps in the following sequence:
2405 * discard-jump (redundant with the next jump)
2406 * discard-jump (useless; jumps to the next instruction)
2410 fs_visitor::opt_redundant_discard_jumps()
2412 bool progress = false;
2414 bblock_t *last_bblock = cfg->blocks[cfg->num_blocks - 1];
2416 fs_inst *placeholder_halt = NULL;
2417 foreach_inst_in_block_reverse(fs_inst, inst, last_bblock) {
2418 if (inst->opcode == FS_OPCODE_PLACEHOLDER_HALT) {
2419 placeholder_halt = inst;
2424 if (!placeholder_halt)
2427 /* Delete any HALTs immediately before the placeholder halt. */
2428 for (fs_inst *prev = (fs_inst *) placeholder_halt->prev;
2429 !prev->is_head_sentinel() && prev->opcode == FS_OPCODE_DISCARD_JUMP;
2430 prev = (fs_inst *) placeholder_halt->prev) {
2431 prev->remove(last_bblock);
2436 invalidate_live_intervals();
2442 fs_visitor::compute_to_mrf()
2444 bool progress = false;
2447 /* No MRFs on Gen >= 7. */
2448 if (devinfo->gen >= 7)
2451 calculate_live_intervals();
2453 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2457 if (inst->opcode != BRW_OPCODE_MOV ||
2458 inst->is_partial_write() ||
2459 inst->dst.file != MRF || inst->src[0].file != GRF ||
2460 inst->dst.type != inst->src[0].type ||
2461 inst->src[0].abs || inst->src[0].negate ||
2462 !inst->src[0].is_contiguous() ||
2463 inst->src[0].subreg_offset)
2466 /* Work out which hardware MRF registers are written by this
2469 int mrf_low = inst->dst.reg & ~BRW_MRF_COMPR4;
2471 if (inst->dst.reg & BRW_MRF_COMPR4) {
2472 mrf_high = mrf_low + 4;
2473 } else if (inst->exec_size == 16) {
2474 mrf_high = mrf_low + 1;
2479 /* Can't compute-to-MRF this GRF if someone else was going to
2482 if (this->virtual_grf_end[inst->src[0].reg] > ip)
2485 /* Found a move of a GRF to a MRF. Let's see if we can go
2486 * rewrite the thing that made this GRF to write into the MRF.
2488 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst, block) {
2489 if (scan_inst->dst.file == GRF &&
2490 scan_inst->dst.reg == inst->src[0].reg) {
2491 /* Found the last thing to write our reg we want to turn
2492 * into a compute-to-MRF.
2495 /* If this one instruction didn't populate all the
2496 * channels, bail. We might be able to rewrite everything
2497 * that writes that reg, but it would require smarter
2498 * tracking to delay the rewriting until complete success.
2500 if (scan_inst->is_partial_write())
2503 /* Things returning more than one register would need us to
2504 * understand coalescing out more than one MOV at a time.
2506 if (scan_inst->regs_written > scan_inst->exec_size / 8)
2509 /* SEND instructions can't have MRF as a destination. */
2510 if (scan_inst->mlen)
2513 if (devinfo->gen == 6) {
2514 /* gen6 math instructions must have the destination be
2515 * GRF, so no compute-to-MRF for them.
2517 if (scan_inst->is_math()) {
2522 if (scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
2523 /* Found the creator of our MRF's source value. */
2524 scan_inst->dst.file = MRF;
2525 scan_inst->dst.reg = inst->dst.reg;
2526 scan_inst->saturate |= inst->saturate;
2527 inst->remove(block);
2533 /* We don't handle control flow here. Most computation of
2534 * values that end up in MRFs are shortly before the MRF
2537 if (block->start() == scan_inst)
2540 /* You can't read from an MRF, so if someone else reads our
2541 * MRF's source GRF that we wanted to rewrite, that stops us.
2543 bool interfered = false;
2544 for (int i = 0; i < scan_inst->sources; i++) {
2545 if (scan_inst->src[i].file == GRF &&
2546 scan_inst->src[i].reg == inst->src[0].reg &&
2547 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
2554 if (scan_inst->dst.file == MRF) {
2555 /* If somebody else writes our MRF here, we can't
2556 * compute-to-MRF before that.
2558 int scan_mrf_low = scan_inst->dst.reg & ~BRW_MRF_COMPR4;
2561 if (scan_inst->dst.reg & BRW_MRF_COMPR4) {
2562 scan_mrf_high = scan_mrf_low + 4;
2563 } else if (scan_inst->exec_size == 16) {
2564 scan_mrf_high = scan_mrf_low + 1;
2566 scan_mrf_high = scan_mrf_low;
2569 if (mrf_low == scan_mrf_low ||
2570 mrf_low == scan_mrf_high ||
2571 mrf_high == scan_mrf_low ||
2572 mrf_high == scan_mrf_high) {
2577 if (scan_inst->mlen > 0 && scan_inst->base_mrf != -1) {
2578 /* Found a SEND instruction, which means that there are
2579 * live values in MRFs from base_mrf to base_mrf +
2580 * scan_inst->mlen - 1. Don't go pushing our MRF write up
2583 if (mrf_low >= scan_inst->base_mrf &&
2584 mrf_low < scan_inst->base_mrf + scan_inst->mlen) {
2587 if (mrf_high >= scan_inst->base_mrf &&
2588 mrf_high < scan_inst->base_mrf + scan_inst->mlen) {
2596 invalidate_live_intervals();
2602 * Eliminate FIND_LIVE_CHANNEL instructions occurring outside any control
2603 * flow. We could probably do better here with some form of divergence
2607 fs_visitor::eliminate_find_live_channel()
2609 bool progress = false;
2612 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
2613 switch (inst->opcode) {
2619 case BRW_OPCODE_ENDIF:
2620 case BRW_OPCODE_WHILE:
2624 case FS_OPCODE_DISCARD_JUMP:
2625 /* This can potentially make control flow non-uniform until the end
2630 case SHADER_OPCODE_FIND_LIVE_CHANNEL:
2632 inst->opcode = BRW_OPCODE_MOV;
2633 inst->src[0] = fs_reg(0);
2635 inst->force_writemask_all = true;
2649 * Once we've generated code, try to convert normal FS_OPCODE_FB_WRITE
2650 * instructions to FS_OPCODE_REP_FB_WRITE.
2653 fs_visitor::emit_repclear_shader()
2655 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
2657 int color_mrf = base_mrf + 2;
2660 if (uniforms == 1) {
2661 mov = bld.exec_all().MOV(vec4(brw_message_reg(color_mrf)),
2662 fs_reg(UNIFORM, 0, BRW_REGISTER_TYPE_F));
2664 struct brw_reg reg =
2665 brw_reg(BRW_GENERAL_REGISTER_FILE,
2666 2, 3, 0, 0, BRW_REGISTER_TYPE_F,
2667 BRW_VERTICAL_STRIDE_8,
2669 BRW_HORIZONTAL_STRIDE_4, BRW_SWIZZLE_XYZW, WRITEMASK_XYZW);
2671 mov = bld.exec_all().MOV(vec4(brw_message_reg(color_mrf)),
2676 if (key->nr_color_regions == 1) {
2677 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
2678 write->saturate = key->clamp_fragment_color;
2679 write->base_mrf = color_mrf;
2681 write->header_size = 0;
2684 assume(key->nr_color_regions > 0);
2685 for (int i = 0; i < key->nr_color_regions; ++i) {
2686 write = bld.emit(FS_OPCODE_REP_FB_WRITE);
2687 write->saturate = key->clamp_fragment_color;
2688 write->base_mrf = base_mrf;
2690 write->header_size = 2;
2698 assign_constant_locations();
2699 assign_curb_setup();
2701 /* Now that we have the uniform assigned, go ahead and force it to a vec4. */
2702 if (uniforms == 1) {
2703 assert(mov->src[0].file == HW_REG);
2704 mov->src[0] = brw_vec4_grf(mov->src[0].fixed_hw_reg.nr, 0);
2709 * Walks through basic blocks, looking for repeated MRF writes and
2710 * removing the later ones.
2713 fs_visitor::remove_duplicate_mrf_writes()
2715 fs_inst *last_mrf_move[16];
2716 bool progress = false;
2718 /* Need to update the MRF tracking for compressed instructions. */
2719 if (dispatch_width == 16)
2722 memset(last_mrf_move, 0, sizeof(last_mrf_move));
2724 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
2725 if (inst->is_control_flow()) {
2726 memset(last_mrf_move, 0, sizeof(last_mrf_move));
2729 if (inst->opcode == BRW_OPCODE_MOV &&
2730 inst->dst.file == MRF) {
2731 fs_inst *prev_inst = last_mrf_move[inst->dst.reg];
2732 if (prev_inst && inst->equals(prev_inst)) {
2733 inst->remove(block);
2739 /* Clear out the last-write records for MRFs that were overwritten. */
2740 if (inst->dst.file == MRF) {
2741 last_mrf_move[inst->dst.reg] = NULL;
2744 if (inst->mlen > 0 && inst->base_mrf != -1) {
2745 /* Found a SEND instruction, which will include two or fewer
2746 * implied MRF writes. We could do better here.
2748 for (int i = 0; i < implied_mrf_writes(inst); i++) {
2749 last_mrf_move[inst->base_mrf + i] = NULL;
2753 /* Clear out any MRF move records whose sources got overwritten. */
2754 if (inst->dst.file == GRF) {
2755 for (unsigned int i = 0; i < ARRAY_SIZE(last_mrf_move); i++) {
2756 if (last_mrf_move[i] &&
2757 last_mrf_move[i]->src[0].reg == inst->dst.reg) {
2758 last_mrf_move[i] = NULL;
2763 if (inst->opcode == BRW_OPCODE_MOV &&
2764 inst->dst.file == MRF &&
2765 inst->src[0].file == GRF &&
2766 !inst->is_partial_write()) {
2767 last_mrf_move[inst->dst.reg] = inst;
2772 invalidate_live_intervals();
2778 clear_deps_for_inst_src(fs_inst *inst, bool *deps, int first_grf, int grf_len)
2780 /* Clear the flag for registers that actually got read (as expected). */
2781 for (int i = 0; i < inst->sources; i++) {
2783 if (inst->src[i].file == GRF) {
2784 grf = inst->src[i].reg;
2785 } else if (inst->src[i].file == HW_REG &&
2786 inst->src[i].fixed_hw_reg.file == BRW_GENERAL_REGISTER_FILE) {
2787 grf = inst->src[i].fixed_hw_reg.nr;
2792 if (grf >= first_grf &&
2793 grf < first_grf + grf_len) {
2794 deps[grf - first_grf] = false;
2795 if (inst->exec_size == 16)
2796 deps[grf - first_grf + 1] = false;
2802 * Implements this workaround for the original 965:
2804 * "[DevBW, DevCL] Implementation Restrictions: As the hardware does not
2805 * check for post destination dependencies on this instruction, software
2806 * must ensure that there is no destination hazard for the case of ‘write
2807 * followed by a posted write’ shown in the following example.
2810 * 2. send r3.xy <rest of send instruction>
2813 * Due to no post-destination dependency check on the ‘send’, the above
2814 * code sequence could have two instructions (1 and 2) in flight at the
2815 * same time that both consider ‘r3’ as the target of their final writes.
2818 fs_visitor::insert_gen4_pre_send_dependency_workarounds(bblock_t *block,
2821 int write_len = inst->regs_written;
2822 int first_write_grf = inst->dst.reg;
2823 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
2824 assert(write_len < (int)sizeof(needs_dep) - 1);
2826 memset(needs_dep, false, sizeof(needs_dep));
2827 memset(needs_dep, true, write_len);
2829 clear_deps_for_inst_src(inst, needs_dep, first_write_grf, write_len);
2831 /* Walk backwards looking for writes to registers we're writing which
2832 * aren't read since being written. If we hit the start of the program,
2833 * we assume that there are no outstanding dependencies on entry to the
2836 foreach_inst_in_block_reverse_starting_from(fs_inst, scan_inst, inst, block) {
2837 /* If we hit control flow, assume that there *are* outstanding
2838 * dependencies, and force their cleanup before our instruction.
2840 if (block->start() == scan_inst) {
2841 for (int i = 0; i < write_len; i++) {
2843 DEP_RESOLVE_MOV(fs_builder(this, block, inst),
2844 first_write_grf + i);
2849 /* We insert our reads as late as possible on the assumption that any
2850 * instruction but a MOV that might have left us an outstanding
2851 * dependency has more latency than a MOV.
2853 if (scan_inst->dst.file == GRF) {
2854 for (int i = 0; i < scan_inst->regs_written; i++) {
2855 int reg = scan_inst->dst.reg + i;
2857 if (reg >= first_write_grf &&
2858 reg < first_write_grf + write_len &&
2859 needs_dep[reg - first_write_grf]) {
2860 DEP_RESOLVE_MOV(fs_builder(this, block, inst), reg);
2861 needs_dep[reg - first_write_grf] = false;
2862 if (scan_inst->exec_size == 16)
2863 needs_dep[reg - first_write_grf + 1] = false;
2868 /* Clear the flag for registers that actually got read (as expected). */
2869 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
2871 /* Continue the loop only if we haven't resolved all the dependencies */
2873 for (i = 0; i < write_len; i++) {
2883 * Implements this workaround for the original 965:
2885 * "[DevBW, DevCL] Errata: A destination register from a send can not be
2886 * used as a destination register until after it has been sourced by an
2887 * instruction with a different destination register.
2890 fs_visitor::insert_gen4_post_send_dependency_workarounds(bblock_t *block, fs_inst *inst)
2892 int write_len = inst->regs_written;
2893 int first_write_grf = inst->dst.reg;
2894 bool needs_dep[BRW_MAX_MRF(devinfo->gen)];
2895 assert(write_len < (int)sizeof(needs_dep) - 1);
2897 memset(needs_dep, false, sizeof(needs_dep));
2898 memset(needs_dep, true, write_len);
2899 /* Walk forwards looking for writes to registers we're writing which aren't
2900 * read before being written.
2902 foreach_inst_in_block_starting_from(fs_inst, scan_inst, inst, block) {
2903 /* If we hit control flow, force resolve all remaining dependencies. */
2904 if (block->end() == scan_inst) {
2905 for (int i = 0; i < write_len; i++) {
2907 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
2908 first_write_grf + i);
2913 /* Clear the flag for registers that actually got read (as expected). */
2914 clear_deps_for_inst_src(scan_inst, needs_dep, first_write_grf, write_len);
2916 /* We insert our reads as late as possible since they're reading the
2917 * result of a SEND, which has massive latency.
2919 if (scan_inst->dst.file == GRF &&
2920 scan_inst->dst.reg >= first_write_grf &&
2921 scan_inst->dst.reg < first_write_grf + write_len &&
2922 needs_dep[scan_inst->dst.reg - first_write_grf]) {
2923 DEP_RESOLVE_MOV(fs_builder(this, block, scan_inst),
2924 scan_inst->dst.reg);
2925 needs_dep[scan_inst->dst.reg - first_write_grf] = false;
2928 /* Continue the loop only if we haven't resolved all the dependencies */
2930 for (i = 0; i < write_len; i++) {
2940 fs_visitor::insert_gen4_send_dependency_workarounds()
2942 if (devinfo->gen != 4 || devinfo->is_g4x)
2945 bool progress = false;
2947 /* Note that we're done with register allocation, so GRF fs_regs always
2948 * have a .reg_offset of 0.
2951 foreach_block_and_inst(block, fs_inst, inst, cfg) {
2952 if (inst->mlen != 0 && inst->dst.file == GRF) {
2953 insert_gen4_pre_send_dependency_workarounds(block, inst);
2954 insert_gen4_post_send_dependency_workarounds(block, inst);
2960 invalidate_live_intervals();
2964 * Turns the generic expression-style uniform pull constant load instruction
2965 * into a hardware-specific series of instructions for loading a pull
2968 * The expression style allows the CSE pass before this to optimize out
2969 * repeated loads from the same offset, and gives the pre-register-allocation
2970 * scheduling full flexibility, while the conversion to native instructions
2971 * allows the post-register-allocation scheduler the best information
2974 * Note that execution masking for setting up pull constant loads is special:
2975 * the channels that need to be written are unrelated to the current execution
2976 * mask, since a later instruction will use one of the result channels as a
2977 * source operand for all 8 or 16 of its channels.
2980 fs_visitor::lower_uniform_pull_constant_loads()
2982 foreach_block_and_inst (block, fs_inst, inst, cfg) {
2983 if (inst->opcode != FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD)
2986 if (devinfo->gen >= 7) {
2987 /* The offset arg before was a vec4-aligned byte offset. We need to
2988 * turn it into a dword offset.
2990 fs_reg const_offset_reg = inst->src[1];
2991 assert(const_offset_reg.file == IMM &&
2992 const_offset_reg.type == BRW_REGISTER_TYPE_UD);
2993 const_offset_reg.fixed_hw_reg.dw1.ud /= 4;
2995 fs_reg payload, offset;
2996 if (devinfo->gen >= 9) {
2997 /* We have to use a message header on Skylake to get SIMD4x2
2998 * mode. Reserve space for the register.
3000 offset = payload = fs_reg(GRF, alloc.allocate(2));
3001 offset.reg_offset++;
3004 offset = payload = fs_reg(GRF, alloc.allocate(1));
3008 /* This is actually going to be a MOV, but since only the first dword
3009 * is accessed, we have a special opcode to do just that one. Note
3010 * that this needs to be an operation that will be considered a def
3011 * by live variable analysis, or register allocation will explode.
3013 fs_inst *setup = new(mem_ctx) fs_inst(FS_OPCODE_SET_SIMD4X2_OFFSET,
3014 8, offset, const_offset_reg);
3015 setup->force_writemask_all = true;
3017 setup->ir = inst->ir;
3018 setup->annotation = inst->annotation;
3019 inst->insert_before(block, setup);
3021 /* Similarly, this will only populate the first 4 channels of the
3022 * result register (since we only use smear values from 0-3), but we
3023 * don't tell the optimizer.
3025 inst->opcode = FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD_GEN7;
3026 inst->src[1] = payload;
3027 inst->base_mrf = -1;
3029 invalidate_live_intervals();
3031 /* Before register allocation, we didn't tell the scheduler about the
3032 * MRF we use. We know it's safe to use this MRF because nothing
3033 * else does except for register spill/unspill, which generates and
3034 * uses its MRF within a single IR instruction.
3036 inst->base_mrf = 14;
3043 fs_visitor::lower_load_payload()
3045 bool progress = false;
3047 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
3048 if (inst->opcode != SHADER_OPCODE_LOAD_PAYLOAD)
3051 assert(inst->dst.file == MRF || inst->dst.file == GRF);
3052 assert(inst->saturate == false);
3053 fs_reg dst = inst->dst;
3055 /* Get rid of COMPR4. We'll add it back in if we need it */
3056 if (dst.file == MRF)
3057 dst.reg = dst.reg & ~BRW_MRF_COMPR4;
3059 const fs_builder ibld(this, block, inst);
3060 const fs_builder hbld = ibld.exec_all().group(8, 0);
3062 for (uint8_t i = 0; i < inst->header_size; i++) {
3063 if (inst->src[i].file != BAD_FILE) {
3064 fs_reg mov_dst = retype(dst, BRW_REGISTER_TYPE_UD);
3065 fs_reg mov_src = retype(inst->src[i], BRW_REGISTER_TYPE_UD);
3066 hbld.MOV(mov_dst, mov_src);
3068 dst = offset(dst, hbld, 1);
3071 if (inst->dst.file == MRF && (inst->dst.reg & BRW_MRF_COMPR4) &&
3072 inst->exec_size > 8) {
3073 /* In this case, the payload portion of the LOAD_PAYLOAD isn't
3074 * a straightforward copy. Instead, the result of the
3075 * LOAD_PAYLOAD is treated as interleaved and the first four
3076 * non-header sources are unpacked as:
3087 * This is used for gen <= 5 fb writes.
3089 assert(inst->exec_size == 16);
3090 assert(inst->header_size + 4 <= inst->sources);
3091 for (uint8_t i = inst->header_size; i < inst->header_size + 4; i++) {
3092 if (inst->src[i].file != BAD_FILE) {
3093 if (devinfo->has_compr4) {
3094 fs_reg compr4_dst = retype(dst, inst->src[i].type);
3095 compr4_dst.reg |= BRW_MRF_COMPR4;
3096 ibld.MOV(compr4_dst, inst->src[i]);
3098 /* Platform doesn't have COMPR4. We have to fake it */
3099 fs_reg mov_dst = retype(dst, inst->src[i].type);
3100 ibld.half(0).MOV(mov_dst, half(inst->src[i], 0));
3102 ibld.half(1).MOV(mov_dst, half(inst->src[i], 1));
3109 /* The loop above only ever incremented us through the first set
3110 * of 4 registers. However, thanks to the magic of COMPR4, we
3111 * actually wrote to the first 8 registers, so we need to take
3112 * that into account now.
3116 /* The COMPR4 code took care of the first 4 sources. We'll let
3117 * the regular path handle any remaining sources. Yes, we are
3118 * modifying the instruction but we're about to delete it so
3119 * this really doesn't hurt anything.
3121 inst->header_size += 4;
3124 for (uint8_t i = inst->header_size; i < inst->sources; i++) {
3125 if (inst->src[i].file != BAD_FILE)
3126 ibld.MOV(retype(dst, inst->src[i].type), inst->src[i]);
3127 dst = offset(dst, ibld, 1);
3130 inst->remove(block);
3135 invalidate_live_intervals();
3141 fs_visitor::lower_integer_multiplication()
3143 bool progress = false;
3145 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
3146 const fs_builder ibld(this, block, inst);
3148 if (inst->opcode == BRW_OPCODE_MUL) {
3149 if (inst->dst.is_accumulator() ||
3150 (inst->dst.type != BRW_REGISTER_TYPE_D &&
3151 inst->dst.type != BRW_REGISTER_TYPE_UD))
3154 /* Gen8's MUL instruction can do a 32-bit x 32-bit -> 32-bit
3155 * operation directly, but CHV/BXT cannot.
3157 if (devinfo->gen >= 8 &&
3158 !devinfo->is_cherryview && !devinfo->is_broxton)
3161 if (inst->src[1].file == IMM &&
3162 inst->src[1].fixed_hw_reg.dw1.ud < (1 << 16)) {
3163 /* The MUL instruction isn't commutative. On Gen <= 6, only the low
3164 * 16-bits of src0 are read, and on Gen >= 7 only the low 16-bits of
3167 * If multiplying by an immediate value that fits in 16-bits, do a
3168 * single MUL instruction with that value in the proper location.
3170 if (devinfo->gen < 7) {
3171 fs_reg imm(GRF, alloc.allocate(dispatch_width / 8),
3173 ibld.MOV(imm, inst->src[1]);
3174 ibld.MUL(inst->dst, imm, inst->src[0]);
3176 ibld.MUL(inst->dst, inst->src[0], inst->src[1]);
3179 /* Gen < 8 (and some Gen8+ low-power parts like Cherryview) cannot
3180 * do 32-bit integer multiplication in one instruction, but instead
3181 * must do a sequence (which actually calculates a 64-bit result):
3183 * mul(8) acc0<1>D g3<8,8,1>D g4<8,8,1>D
3184 * mach(8) null g3<8,8,1>D g4<8,8,1>D
3185 * mov(8) g2<1>D acc0<8,8,1>D
3187 * But on Gen > 6, the ability to use second accumulator register
3188 * (acc1) for non-float data types was removed, preventing a simple
3189 * implementation in SIMD16. A 16-channel result can be calculated by
3190 * executing the three instructions twice in SIMD8, once with quarter
3191 * control of 1Q for the first eight channels and again with 2Q for
3192 * the second eight channels.
3194 * Which accumulator register is implicitly accessed (by AccWrEnable
3195 * for instance) is determined by the quarter control. Unfortunately
3196 * Ivybridge (and presumably Baytrail) has a hardware bug in which an
3197 * implicit accumulator access by an instruction with 2Q will access
3198 * acc1 regardless of whether the data type is usable in acc1.
3200 * Specifically, the 2Q mach(8) writes acc1 which does not exist for
3201 * integer data types.
3203 * Since we only want the low 32-bits of the result, we can do two
3204 * 32-bit x 16-bit multiplies (like the mul and mach are doing), and
3205 * adjust the high result and add them (like the mach is doing):
3207 * mul(8) g7<1>D g3<8,8,1>D g4.0<8,8,1>UW
3208 * mul(8) g8<1>D g3<8,8,1>D g4.1<8,8,1>UW
3209 * shl(8) g9<1>D g8<8,8,1>D 16D
3210 * add(8) g2<1>D g7<8,8,1>D g8<8,8,1>D
3212 * We avoid the shl instruction by realizing that we only want to add
3213 * the low 16-bits of the "high" result to the high 16-bits of the
3214 * "low" result and using proper regioning on the add:
3216 * mul(8) g7<1>D g3<8,8,1>D g4.0<16,8,2>UW
3217 * mul(8) g8<1>D g3<8,8,1>D g4.1<16,8,2>UW
3218 * add(8) g7.1<2>UW g7.1<16,8,2>UW g8<16,8,2>UW
3220 * Since it does not use the (single) accumulator register, we can
3221 * schedule multi-component multiplications much better.
3224 fs_reg orig_dst = inst->dst;
3225 if (orig_dst.is_null() || orig_dst.file == MRF) {
3226 inst->dst = fs_reg(GRF, alloc.allocate(dispatch_width / 8),
3229 fs_reg low = inst->dst;
3230 fs_reg high(GRF, alloc.allocate(dispatch_width / 8),
3233 if (devinfo->gen >= 7) {
3234 fs_reg src1_0_w = inst->src[1];
3235 fs_reg src1_1_w = inst->src[1];
3237 if (inst->src[1].file == IMM) {
3238 src1_0_w.fixed_hw_reg.dw1.ud &= 0xffff;
3239 src1_1_w.fixed_hw_reg.dw1.ud >>= 16;
3241 src1_0_w.type = BRW_REGISTER_TYPE_UW;
3242 if (src1_0_w.stride != 0) {
3243 assert(src1_0_w.stride == 1);
3244 src1_0_w.stride = 2;
3247 src1_1_w.type = BRW_REGISTER_TYPE_UW;
3248 if (src1_1_w.stride != 0) {
3249 assert(src1_1_w.stride == 1);
3250 src1_1_w.stride = 2;
3252 src1_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
3254 ibld.MUL(low, inst->src[0], src1_0_w);
3255 ibld.MUL(high, inst->src[0], src1_1_w);
3257 fs_reg src0_0_w = inst->src[0];
3258 fs_reg src0_1_w = inst->src[0];
3260 src0_0_w.type = BRW_REGISTER_TYPE_UW;
3261 if (src0_0_w.stride != 0) {
3262 assert(src0_0_w.stride == 1);
3263 src0_0_w.stride = 2;
3266 src0_1_w.type = BRW_REGISTER_TYPE_UW;
3267 if (src0_1_w.stride != 0) {
3268 assert(src0_1_w.stride == 1);
3269 src0_1_w.stride = 2;
3271 src0_1_w.subreg_offset += type_sz(BRW_REGISTER_TYPE_UW);
3273 ibld.MUL(low, src0_0_w, inst->src[1]);
3274 ibld.MUL(high, src0_1_w, inst->src[1]);
3277 fs_reg dst = inst->dst;
3278 dst.type = BRW_REGISTER_TYPE_UW;
3279 dst.subreg_offset = 2;
3282 high.type = BRW_REGISTER_TYPE_UW;
3285 low.type = BRW_REGISTER_TYPE_UW;
3286 low.subreg_offset = 2;
3289 ibld.ADD(dst, low, high);
3291 if (inst->conditional_mod || orig_dst.file == MRF) {
3292 set_condmod(inst->conditional_mod,
3293 ibld.MOV(orig_dst, inst->dst));
3297 } else if (inst->opcode == SHADER_OPCODE_MULH) {
3298 /* Should have been lowered to 8-wide. */
3299 assert(inst->exec_size <= 8);
3300 const fs_reg acc = retype(brw_acc_reg(inst->exec_size),
3302 fs_inst *mul = ibld.MUL(acc, inst->src[0], inst->src[1]);
3303 fs_inst *mach = ibld.MACH(inst->dst, inst->src[0], inst->src[1]);
3305 if (devinfo->gen >= 8) {
3306 /* Until Gen8, integer multiplies read 32-bits from one source,
3307 * and 16-bits from the other, and relying on the MACH instruction
3308 * to generate the high bits of the result.
3310 * On Gen8, the multiply instruction does a full 32x32-bit
3311 * multiply, but in order to do a 64-bit multiply we can simulate
3312 * the previous behavior and then use a MACH instruction.
3314 * FINISHME: Don't use source modifiers on src1.
3316 assert(mul->src[1].type == BRW_REGISTER_TYPE_D ||
3317 mul->src[1].type == BRW_REGISTER_TYPE_UD);
3318 mul->src[1].type = (type_is_signed(mul->src[1].type) ?
3319 BRW_REGISTER_TYPE_W : BRW_REGISTER_TYPE_UW);
3320 mul->src[1].stride *= 2;
3322 } else if (devinfo->gen == 7 && !devinfo->is_haswell &&
3323 inst->force_sechalf) {
3324 /* Among other things the quarter control bits influence which
3325 * accumulator register is used by the hardware for instructions
3326 * that access the accumulator implicitly (e.g. MACH). A
3327 * second-half instruction would normally map to acc1, which
3328 * doesn't exist on Gen7 and up (the hardware does emulate it for
3329 * floating-point instructions *only* by taking advantage of the
3330 * extra precision of acc0 not normally used for floating point
3333 * HSW and up are careful enough not to try to access an
3334 * accumulator register that doesn't exist, but on earlier Gen7
3335 * hardware we need to make sure that the quarter control bits are
3336 * zero to avoid non-deterministic behaviour and emit an extra MOV
3337 * to get the result masked correctly according to the current
3340 mach->force_sechalf = false;
3341 mach->force_writemask_all = true;
3342 mach->dst = ibld.vgrf(inst->dst.type);
3343 ibld.MOV(inst->dst, mach->dst);
3349 inst->remove(block);
3354 invalidate_live_intervals();
3360 setup_color_payload(const fs_builder &bld, const brw_wm_prog_key *key,
3361 fs_reg *dst, fs_reg color, unsigned components)
3363 if (key->clamp_fragment_color) {
3364 fs_reg tmp = bld.vgrf(BRW_REGISTER_TYPE_F, 4);
3365 assert(color.type == BRW_REGISTER_TYPE_F);
3367 for (unsigned i = 0; i < components; i++)
3369 bld.MOV(offset(tmp, bld, i), offset(color, bld, i)));
3374 for (unsigned i = 0; i < components; i++)
3375 dst[i] = offset(color, bld, i);
3379 lower_fb_write_logical_send(const fs_builder &bld, fs_inst *inst,
3380 const brw_wm_prog_data *prog_data,
3381 const brw_wm_prog_key *key,
3382 const fs_visitor::thread_payload &payload)
3384 assert(inst->src[6].file == IMM);
3385 const brw_device_info *devinfo = bld.shader->devinfo;
3386 const fs_reg &color0 = inst->src[0];
3387 const fs_reg &color1 = inst->src[1];
3388 const fs_reg &src0_alpha = inst->src[2];
3389 const fs_reg &src_depth = inst->src[3];
3390 const fs_reg &dst_depth = inst->src[4];
3391 fs_reg sample_mask = inst->src[5];
3392 const unsigned components = inst->src[6].fixed_hw_reg.dw1.ud;
3394 /* We can potentially have a message length of up to 15, so we have to set
3395 * base_mrf to either 0 or 1 in order to fit in m0..m15.
3398 int header_size = 2, payload_header_size;
3399 unsigned length = 0;
3401 /* From the Sandy Bridge PRM, volume 4, page 198:
3403 * "Dispatched Pixel Enables. One bit per pixel indicating
3404 * which pixels were originally enabled when the thread was
3405 * dispatched. This field is only required for the end-of-
3406 * thread message and on all dual-source messages."
3408 if (devinfo->gen >= 6 &&
3409 (devinfo->is_haswell || devinfo->gen >= 8 || !prog_data->uses_kill) &&
3410 color1.file == BAD_FILE &&
3411 key->nr_color_regions == 1) {
3415 if (header_size != 0) {
3416 assert(header_size == 2);
3417 /* Allocate 2 registers for a header */
3421 if (payload.aa_dest_stencil_reg) {
3422 sources[length] = fs_reg(GRF, bld.shader->alloc.allocate(1));
3423 bld.group(8, 0).exec_all().annotate("FB write stencil/AA alpha")
3424 .MOV(sources[length],
3425 fs_reg(brw_vec8_grf(payload.aa_dest_stencil_reg, 0)));
3429 if (prog_data->uses_omask) {
3430 sources[length] = fs_reg(GRF, bld.shader->alloc.allocate(1),
3431 BRW_REGISTER_TYPE_UD);
3433 /* Hand over gl_SampleMask. Only the lower 16 bits of each channel are
3434 * relevant. Since it's unsigned single words one vgrf is always
3435 * 16-wide, but only the lower or higher 8 channels will be used by the
3436 * hardware when doing a SIMD8 write depending on whether we have
3437 * selected the subspans for the first or second half respectively.
3439 assert(sample_mask.file != BAD_FILE && type_sz(sample_mask.type) == 4);
3440 sample_mask.type = BRW_REGISTER_TYPE_UW;
3441 sample_mask.stride *= 2;
3443 bld.exec_all().annotate("FB write oMask")
3444 .MOV(half(retype(sources[length], BRW_REGISTER_TYPE_UW),
3445 inst->force_sechalf),
3450 payload_header_size = length;
3452 if (src0_alpha.file != BAD_FILE) {
3453 /* FIXME: This is being passed at the wrong location in the payload and
3454 * doesn't work when gl_SampleMask and MRTs are used simultaneously.
3455 * It's supposed to be immediately before oMask but there seems to be no
3456 * reasonable way to pass them in the correct order because LOAD_PAYLOAD
3457 * requires header sources to form a contiguous segment at the beginning
3458 * of the message and src0_alpha has per-channel semantics.
3460 setup_color_payload(bld, key, &sources[length], src0_alpha, 1);
3464 setup_color_payload(bld, key, &sources[length], color0, components);
3467 if (color1.file != BAD_FILE) {
3468 setup_color_payload(bld, key, &sources[length], color1, components);
3472 if (src_depth.file != BAD_FILE) {
3473 sources[length] = src_depth;
3477 if (dst_depth.file != BAD_FILE) {
3478 sources[length] = dst_depth;
3483 if (devinfo->gen >= 7) {
3484 /* Send from the GRF */
3485 fs_reg payload = fs_reg(GRF, -1, BRW_REGISTER_TYPE_F);
3486 load = bld.LOAD_PAYLOAD(payload, sources, length, payload_header_size);
3487 payload.reg = bld.shader->alloc.allocate(load->regs_written);
3488 load->dst = payload;
3490 inst->src[0] = payload;
3491 inst->resize_sources(1);
3492 inst->base_mrf = -1;
3494 /* Send from the MRF */
3495 load = bld.LOAD_PAYLOAD(fs_reg(MRF, 1, BRW_REGISTER_TYPE_F),
3496 sources, length, payload_header_size);
3498 /* On pre-SNB, we have to interlace the color values. LOAD_PAYLOAD
3499 * will do this for us if we just give it a COMPR4 destination.
3501 if (devinfo->gen < 6 && bld.dispatch_width() == 16)
3502 load->dst.reg |= BRW_MRF_COMPR4;
3504 inst->resize_sources(0);
3508 inst->opcode = FS_OPCODE_FB_WRITE;
3509 inst->mlen = load->regs_written;
3510 inst->header_size = header_size;
3514 lower_sampler_logical_send_gen4(const fs_builder &bld, fs_inst *inst, opcode op,
3515 const fs_reg &coordinate,
3516 const fs_reg &shadow_c,
3517 const fs_reg &lod, const fs_reg &lod2,
3518 const fs_reg &sampler,
3519 unsigned coord_components,
3520 unsigned grad_components)
3522 const bool has_lod = (op == SHADER_OPCODE_TXL || op == FS_OPCODE_TXB ||
3523 op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS);
3524 fs_reg msg_begin(MRF, 1, BRW_REGISTER_TYPE_F);
3525 fs_reg msg_end = msg_begin;
3528 msg_end = offset(msg_end, bld.group(8, 0), 1);
3530 for (unsigned i = 0; i < coord_components; i++)
3531 bld.MOV(retype(offset(msg_end, bld, i), coordinate.type),
3532 offset(coordinate, bld, i));
3534 msg_end = offset(msg_end, bld, coord_components);
3536 /* Messages other than SAMPLE and RESINFO in SIMD16 and TXD in SIMD8
3537 * require all three components to be present and zero if they are unused.
3539 if (coord_components > 0 &&
3540 (has_lod || shadow_c.file != BAD_FILE ||
3541 (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8))) {
3542 for (unsigned i = coord_components; i < 3; i++)
3543 bld.MOV(offset(msg_end, bld, i), fs_reg(0.0f));
3545 msg_end = offset(msg_end, bld, 3 - coord_components);
3548 if (op == SHADER_OPCODE_TXD) {
3549 /* TXD unsupported in SIMD16 mode. */
3550 assert(bld.dispatch_width() == 8);
3552 /* the slots for u and v are always present, but r is optional */
3553 if (coord_components < 2)
3554 msg_end = offset(msg_end, bld, 2 - coord_components);
3557 * dPdx = dudx, dvdx, drdx
3558 * dPdy = dudy, dvdy, drdy
3560 * 1-arg: Does not exist.
3562 * 2-arg: dudx dvdx dudy dvdy
3563 * dPdx.x dPdx.y dPdy.x dPdy.y
3566 * 3-arg: dudx dvdx drdx dudy dvdy drdy
3567 * dPdx.x dPdx.y dPdx.z dPdy.x dPdy.y dPdy.z
3568 * m5 m6 m7 m8 m9 m10
3570 for (unsigned i = 0; i < grad_components; i++)
3571 bld.MOV(offset(msg_end, bld, i), offset(lod, bld, i));
3573 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3575 for (unsigned i = 0; i < grad_components; i++)
3576 bld.MOV(offset(msg_end, bld, i), offset(lod2, bld, i));
3578 msg_end = offset(msg_end, bld, MAX2(grad_components, 2));
3582 /* Bias/LOD with shadow comparitor is unsupported in SIMD16 -- *Without*
3583 * shadow comparitor (including RESINFO) it's unsupported in SIMD8 mode.
3585 assert(shadow_c.file != BAD_FILE ? bld.dispatch_width() == 8 :
3586 bld.dispatch_width() == 16);
3588 const brw_reg_type type =
3589 (op == SHADER_OPCODE_TXF || op == SHADER_OPCODE_TXS ?
3590 BRW_REGISTER_TYPE_UD : BRW_REGISTER_TYPE_F);
3591 bld.MOV(retype(msg_end, type), lod);
3592 msg_end = offset(msg_end, bld, 1);
3595 if (shadow_c.file != BAD_FILE) {
3596 if (op == SHADER_OPCODE_TEX && bld.dispatch_width() == 8) {
3597 /* There's no plain shadow compare message, so we use shadow
3598 * compare with a bias of 0.0.
3600 bld.MOV(msg_end, fs_reg(0.0f));
3601 msg_end = offset(msg_end, bld, 1);
3604 bld.MOV(msg_end, shadow_c);
3605 msg_end = offset(msg_end, bld, 1);
3609 inst->src[0] = reg_undef;
3610 inst->src[1] = sampler;
3611 inst->resize_sources(2);
3612 inst->base_mrf = msg_begin.reg;
3613 inst->mlen = msg_end.reg - msg_begin.reg;
3614 inst->header_size = 1;
3618 lower_sampler_logical_send_gen5(const fs_builder &bld, fs_inst *inst, opcode op,
3620 const fs_reg &shadow_c,
3621 fs_reg lod, fs_reg lod2,
3622 const fs_reg &sample_index,
3623 const fs_reg &sampler,
3624 const fs_reg &offset_value,
3625 unsigned coord_components,
3626 unsigned grad_components)
3628 fs_reg message(MRF, 2, BRW_REGISTER_TYPE_F);
3629 fs_reg msg_coords = message;
3630 unsigned header_size = 0;
3632 if (offset_value.file != BAD_FILE) {
3633 /* The offsets set up by the visitor are in the m1 header, so we can't
3640 for (unsigned i = 0; i < coord_components; i++) {
3641 bld.MOV(retype(offset(msg_coords, bld, i), coordinate.type), coordinate);
3642 coordinate = offset(coordinate, bld, 1);
3644 fs_reg msg_end = offset(msg_coords, bld, coord_components);
3645 fs_reg msg_lod = offset(msg_coords, bld, 4);
3647 if (shadow_c.file != BAD_FILE) {
3648 fs_reg msg_shadow = msg_lod;
3649 bld.MOV(msg_shadow, shadow_c);
3650 msg_lod = offset(msg_shadow, bld, 1);
3655 case SHADER_OPCODE_TXL:
3657 bld.MOV(msg_lod, lod);
3658 msg_end = offset(msg_lod, bld, 1);
3660 case SHADER_OPCODE_TXD:
3663 * dPdx = dudx, dvdx, drdx
3664 * dPdy = dudy, dvdy, drdy
3666 * Load up these values:
3667 * - dudx dudy dvdx dvdy drdx drdy
3668 * - dPdx.x dPdy.x dPdx.y dPdy.y dPdx.z dPdy.z
3671 for (unsigned i = 0; i < grad_components; i++) {
3672 bld.MOV(msg_end, lod);
3673 lod = offset(lod, bld, 1);
3674 msg_end = offset(msg_end, bld, 1);
3676 bld.MOV(msg_end, lod2);
3677 lod2 = offset(lod2, bld, 1);
3678 msg_end = offset(msg_end, bld, 1);
3681 case SHADER_OPCODE_TXS:
3682 msg_lod = retype(msg_end, BRW_REGISTER_TYPE_UD);
3683 bld.MOV(msg_lod, lod);
3684 msg_end = offset(msg_lod, bld, 1);
3686 case SHADER_OPCODE_TXF:
3687 msg_lod = offset(msg_coords, bld, 3);
3688 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), lod);
3689 msg_end = offset(msg_lod, bld, 1);
3691 case SHADER_OPCODE_TXF_CMS:
3692 msg_lod = offset(msg_coords, bld, 3);
3694 bld.MOV(retype(msg_lod, BRW_REGISTER_TYPE_UD), fs_reg(0u));
3696 bld.MOV(retype(offset(msg_lod, bld, 1), BRW_REGISTER_TYPE_UD), sample_index);
3697 msg_end = offset(msg_lod, bld, 2);
3704 inst->src[0] = reg_undef;
3705 inst->src[1] = sampler;
3706 inst->resize_sources(2);
3707 inst->base_mrf = message.reg;
3708 inst->mlen = msg_end.reg - message.reg;
3709 inst->header_size = header_size;
3711 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
3712 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
3716 is_high_sampler(const struct brw_device_info *devinfo, const fs_reg &sampler)
3718 if (devinfo->gen < 8 && !devinfo->is_haswell)
3721 return sampler.file != IMM || sampler.fixed_hw_reg.dw1.ud >= 16;
3725 lower_sampler_logical_send_gen7(const fs_builder &bld, fs_inst *inst, opcode op,
3727 const fs_reg &shadow_c,
3728 fs_reg lod, fs_reg lod2,
3729 const fs_reg &sample_index,
3730 const fs_reg &mcs, const fs_reg &sampler,
3731 fs_reg offset_value,
3732 unsigned coord_components,
3733 unsigned grad_components)
3735 const brw_device_info *devinfo = bld.shader->devinfo;
3736 int reg_width = bld.dispatch_width() / 8;
3737 unsigned header_size = 0, length = 0;
3738 fs_reg sources[MAX_SAMPLER_MESSAGE_SIZE];
3739 for (unsigned i = 0; i < ARRAY_SIZE(sources); i++)
3740 sources[i] = bld.vgrf(BRW_REGISTER_TYPE_F);
3742 if (op == SHADER_OPCODE_TG4 || op == SHADER_OPCODE_TG4_OFFSET ||
3743 offset_value.file != BAD_FILE ||
3744 is_high_sampler(devinfo, sampler)) {
3745 /* For general texture offsets (no txf workaround), we need a header to
3746 * put them in. Note that we're only reserving space for it in the
3747 * message payload as it will be initialized implicitly by the
3750 * TG4 needs to place its channel select in the header, for interaction
3751 * with ARB_texture_swizzle. The sampler index is only 4-bits, so for
3752 * larger sampler numbers we need to offset the Sampler State Pointer in
3756 sources[0] = fs_reg();
3760 if (shadow_c.file != BAD_FILE) {
3761 bld.MOV(sources[length], shadow_c);
3765 bool coordinate_done = false;
3767 /* The sampler can only meaningfully compute LOD for fragment shader
3768 * messages. For all other stages, we change the opcode to TXL and
3769 * hardcode the LOD to 0.
3771 if (bld.shader->stage != MESA_SHADER_FRAGMENT &&
3772 op == SHADER_OPCODE_TEX) {
3773 op = SHADER_OPCODE_TXL;
3777 /* Set up the LOD info */
3780 case SHADER_OPCODE_TXL:
3781 bld.MOV(sources[length], lod);
3784 case SHADER_OPCODE_TXD:
3785 /* TXD should have been lowered in SIMD16 mode. */
3786 assert(bld.dispatch_width() == 8);
3788 /* Load dPdx and the coordinate together:
3789 * [hdr], [ref], x, dPdx.x, dPdy.x, y, dPdx.y, dPdy.y, z, dPdx.z, dPdy.z
3791 for (unsigned i = 0; i < coord_components; i++) {
3792 bld.MOV(sources[length], coordinate);
3793 coordinate = offset(coordinate, bld, 1);
3796 /* For cube map array, the coordinate is (u,v,r,ai) but there are
3797 * only derivatives for (u, v, r).
3799 if (i < grad_components) {
3800 bld.MOV(sources[length], lod);
3801 lod = offset(lod, bld, 1);
3804 bld.MOV(sources[length], lod2);
3805 lod2 = offset(lod2, bld, 1);
3810 coordinate_done = true;
3812 case SHADER_OPCODE_TXS:
3813 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), lod);
3816 case SHADER_OPCODE_TXF:
3817 /* Unfortunately, the parameters for LD are intermixed: u, lod, v, r.
3818 * On Gen9 they are u, v, lod, r
3820 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate);
3821 coordinate = offset(coordinate, bld, 1);
3824 if (devinfo->gen >= 9) {
3825 if (coord_components >= 2) {
3826 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate);
3827 coordinate = offset(coordinate, bld, 1);
3832 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), lod);
3835 for (unsigned i = devinfo->gen >= 9 ? 2 : 1; i < coord_components; i++) {
3836 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate);
3837 coordinate = offset(coordinate, bld, 1);
3841 coordinate_done = true;
3843 case SHADER_OPCODE_TXF_CMS:
3844 case SHADER_OPCODE_TXF_UMS:
3845 case SHADER_OPCODE_TXF_MCS:
3846 if (op == SHADER_OPCODE_TXF_UMS || op == SHADER_OPCODE_TXF_CMS) {
3847 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), sample_index);
3851 if (op == SHADER_OPCODE_TXF_CMS) {
3852 /* Data from the multisample control surface. */
3853 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_UD), mcs);
3857 /* There is no offsetting for this message; just copy in the integer
3858 * texture coordinates.
3860 for (unsigned i = 0; i < coord_components; i++) {
3861 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), coordinate);
3862 coordinate = offset(coordinate, bld, 1);
3866 coordinate_done = true;
3868 case SHADER_OPCODE_TG4_OFFSET:
3869 /* gather4_po_c should have been lowered in SIMD16 mode. */
3870 assert(bld.dispatch_width() == 8 || shadow_c.file == BAD_FILE);
3872 /* More crazy intermixing */
3873 for (unsigned i = 0; i < 2; i++) { /* u, v */
3874 bld.MOV(sources[length], coordinate);
3875 coordinate = offset(coordinate, bld, 1);
3879 for (unsigned i = 0; i < 2; i++) { /* offu, offv */
3880 bld.MOV(retype(sources[length], BRW_REGISTER_TYPE_D), offset_value);
3881 offset_value = offset(offset_value, bld, 1);
3885 if (coord_components == 3) { /* r if present */
3886 bld.MOV(sources[length], coordinate);
3887 coordinate = offset(coordinate, bld, 1);
3891 coordinate_done = true;
3897 /* Set up the coordinate (except for cases where it was done above) */
3898 if (!coordinate_done) {
3899 for (unsigned i = 0; i < coord_components; i++) {
3900 bld.MOV(sources[length], coordinate);
3901 coordinate = offset(coordinate, bld, 1);
3908 mlen = length * reg_width - header_size;
3910 mlen = length * reg_width;
3912 const fs_reg src_payload = fs_reg(GRF, bld.shader->alloc.allocate(mlen),
3913 BRW_REGISTER_TYPE_F);
3914 bld.LOAD_PAYLOAD(src_payload, sources, length, header_size);
3916 /* Generate the SEND. */
3918 inst->src[0] = src_payload;
3919 inst->src[1] = sampler;
3920 inst->resize_sources(2);
3921 inst->base_mrf = -1;
3923 inst->header_size = header_size;
3925 /* Message length > MAX_SAMPLER_MESSAGE_SIZE disallowed by hardware. */
3926 assert(inst->mlen <= MAX_SAMPLER_MESSAGE_SIZE);
3930 lower_sampler_logical_send(const fs_builder &bld, fs_inst *inst, opcode op)
3932 const brw_device_info *devinfo = bld.shader->devinfo;
3933 const fs_reg &coordinate = inst->src[0];
3934 const fs_reg &shadow_c = inst->src[1];
3935 const fs_reg &lod = inst->src[2];
3936 const fs_reg &lod2 = inst->src[3];
3937 const fs_reg &sample_index = inst->src[4];
3938 const fs_reg &mcs = inst->src[5];
3939 const fs_reg &sampler = inst->src[6];
3940 const fs_reg &offset_value = inst->src[7];
3941 assert(inst->src[8].file == IMM && inst->src[9].file == IMM);
3942 const unsigned coord_components = inst->src[8].fixed_hw_reg.dw1.ud;
3943 const unsigned grad_components = inst->src[9].fixed_hw_reg.dw1.ud;
3945 if (devinfo->gen >= 7) {
3946 lower_sampler_logical_send_gen7(bld, inst, op, coordinate,
3947 shadow_c, lod, lod2, sample_index,
3948 mcs, sampler, offset_value,
3949 coord_components, grad_components);
3950 } else if (devinfo->gen >= 5) {
3951 lower_sampler_logical_send_gen5(bld, inst, op, coordinate,
3952 shadow_c, lod, lod2, sample_index,
3953 sampler, offset_value,
3954 coord_components, grad_components);
3956 lower_sampler_logical_send_gen4(bld, inst, op, coordinate,
3957 shadow_c, lod, lod2, sampler,
3958 coord_components, grad_components);
3963 * Initialize the header present in some typed and untyped surface
3967 emit_surface_header(const fs_builder &bld, const fs_reg &sample_mask)
3969 fs_builder ubld = bld.exec_all().group(8, 0);
3970 const fs_reg dst = ubld.vgrf(BRW_REGISTER_TYPE_UD);
3971 ubld.MOV(dst, fs_reg(0));
3972 ubld.MOV(component(dst, 7), sample_mask);
3977 lower_surface_logical_send(const fs_builder &bld, fs_inst *inst, opcode op,
3978 const fs_reg &sample_mask)
3980 /* Get the logical send arguments. */
3981 const fs_reg &addr = inst->src[0];
3982 const fs_reg &src = inst->src[1];
3983 const fs_reg &surface = inst->src[2];
3984 const UNUSED fs_reg &dims = inst->src[3];
3985 const fs_reg &arg = inst->src[4];
3987 /* Calculate the total number of components of the payload. */
3988 const unsigned addr_sz = inst->components_read(0);
3989 const unsigned src_sz = inst->components_read(1);
3990 const unsigned header_sz = (sample_mask.file == BAD_FILE ? 0 : 1);
3991 const unsigned sz = header_sz + addr_sz + src_sz;
3993 /* Allocate space for the payload. */
3994 fs_reg *const components = new fs_reg[sz];
3995 const fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, sz);
3998 /* Construct the payload. */
4000 components[n++] = emit_surface_header(bld, sample_mask);
4002 for (unsigned i = 0; i < addr_sz; i++)
4003 components[n++] = offset(addr, bld, i);
4005 for (unsigned i = 0; i < src_sz; i++)
4006 components[n++] = offset(src, bld, i);
4008 bld.LOAD_PAYLOAD(payload, components, sz, header_sz);
4010 /* Update the original instruction. */
4012 inst->mlen = header_sz + (addr_sz + src_sz) * inst->exec_size / 8;
4013 inst->header_size = header_sz;
4015 inst->src[0] = payload;
4016 inst->src[1] = surface;
4018 inst->resize_sources(3);
4020 delete[] components;
4024 fs_visitor::lower_logical_sends()
4026 bool progress = false;
4028 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
4029 const fs_builder ibld(this, block, inst);
4031 switch (inst->opcode) {
4032 case FS_OPCODE_FB_WRITE_LOGICAL:
4033 assert(stage == MESA_SHADER_FRAGMENT);
4034 lower_fb_write_logical_send(ibld, inst,
4035 (const brw_wm_prog_data *)prog_data,
4036 (const brw_wm_prog_key *)key,
4040 case SHADER_OPCODE_TEX_LOGICAL:
4041 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TEX);
4044 case SHADER_OPCODE_TXD_LOGICAL:
4045 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXD);
4048 case SHADER_OPCODE_TXF_LOGICAL:
4049 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF);
4052 case SHADER_OPCODE_TXL_LOGICAL:
4053 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXL);
4056 case SHADER_OPCODE_TXS_LOGICAL:
4057 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXS);
4060 case FS_OPCODE_TXB_LOGICAL:
4061 lower_sampler_logical_send(ibld, inst, FS_OPCODE_TXB);
4064 case SHADER_OPCODE_TXF_CMS_LOGICAL:
4065 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_CMS);
4068 case SHADER_OPCODE_TXF_UMS_LOGICAL:
4069 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_UMS);
4072 case SHADER_OPCODE_TXF_MCS_LOGICAL:
4073 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TXF_MCS);
4076 case SHADER_OPCODE_LOD_LOGICAL:
4077 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_LOD);
4080 case SHADER_OPCODE_TG4_LOGICAL:
4081 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4);
4084 case SHADER_OPCODE_TG4_OFFSET_LOGICAL:
4085 lower_sampler_logical_send(ibld, inst, SHADER_OPCODE_TG4_OFFSET);
4088 case SHADER_OPCODE_UNTYPED_SURFACE_READ_LOGICAL:
4089 lower_surface_logical_send(ibld, inst,
4090 SHADER_OPCODE_UNTYPED_SURFACE_READ,
4094 case SHADER_OPCODE_UNTYPED_SURFACE_WRITE_LOGICAL:
4095 lower_surface_logical_send(ibld, inst,
4096 SHADER_OPCODE_UNTYPED_SURFACE_WRITE,
4097 ibld.sample_mask_reg());
4100 case SHADER_OPCODE_UNTYPED_ATOMIC_LOGICAL:
4101 lower_surface_logical_send(ibld, inst,
4102 SHADER_OPCODE_UNTYPED_ATOMIC,
4103 ibld.sample_mask_reg());
4106 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
4107 lower_surface_logical_send(ibld, inst,
4108 SHADER_OPCODE_TYPED_SURFACE_READ,
4112 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
4113 lower_surface_logical_send(ibld, inst,
4114 SHADER_OPCODE_TYPED_SURFACE_WRITE,
4115 ibld.sample_mask_reg());
4118 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
4119 lower_surface_logical_send(ibld, inst,
4120 SHADER_OPCODE_TYPED_ATOMIC,
4121 ibld.sample_mask_reg());
4132 invalidate_live_intervals();
4138 * Get the closest native SIMD width supported by the hardware for instruction
4139 * \p inst. The instruction will be left untouched by
4140 * fs_visitor::lower_simd_width() if the returned value is equal to the
4141 * original execution size.
4144 get_lowered_simd_width(const struct brw_device_info *devinfo,
4145 const fs_inst *inst)
4147 switch (inst->opcode) {
4148 case BRW_OPCODE_MOV:
4149 case BRW_OPCODE_SEL:
4150 case BRW_OPCODE_NOT:
4151 case BRW_OPCODE_AND:
4153 case BRW_OPCODE_XOR:
4154 case BRW_OPCODE_SHR:
4155 case BRW_OPCODE_SHL:
4156 case BRW_OPCODE_ASR:
4157 case BRW_OPCODE_CMP:
4158 case BRW_OPCODE_CMPN:
4159 case BRW_OPCODE_CSEL:
4160 case BRW_OPCODE_F32TO16:
4161 case BRW_OPCODE_F16TO32:
4162 case BRW_OPCODE_BFREV:
4163 case BRW_OPCODE_BFE:
4164 case BRW_OPCODE_BFI1:
4165 case BRW_OPCODE_BFI2:
4166 case BRW_OPCODE_ADD:
4167 case BRW_OPCODE_MUL:
4168 case BRW_OPCODE_AVG:
4169 case BRW_OPCODE_FRC:
4170 case BRW_OPCODE_RNDU:
4171 case BRW_OPCODE_RNDD:
4172 case BRW_OPCODE_RNDE:
4173 case BRW_OPCODE_RNDZ:
4174 case BRW_OPCODE_LZD:
4175 case BRW_OPCODE_FBH:
4176 case BRW_OPCODE_FBL:
4177 case BRW_OPCODE_CBIT:
4178 case BRW_OPCODE_SAD2:
4179 case BRW_OPCODE_MAD:
4180 case BRW_OPCODE_LRP:
4181 case SHADER_OPCODE_RCP:
4182 case SHADER_OPCODE_RSQ:
4183 case SHADER_OPCODE_SQRT:
4184 case SHADER_OPCODE_EXP2:
4185 case SHADER_OPCODE_LOG2:
4186 case SHADER_OPCODE_POW:
4187 case SHADER_OPCODE_INT_QUOTIENT:
4188 case SHADER_OPCODE_INT_REMAINDER:
4189 case SHADER_OPCODE_SIN:
4190 case SHADER_OPCODE_COS: {
4191 /* According to the PRMs:
4192 * "A. In Direct Addressing mode, a source cannot span more than 2
4193 * adjacent GRF registers.
4194 * B. A destination cannot span more than 2 adjacent GRF registers."
4196 * Look for the source or destination with the largest register region
4197 * which is the one that is going to limit the overal execution size of
4198 * the instruction due to this rule.
4200 unsigned reg_count = inst->regs_written;
4202 for (unsigned i = 0; i < inst->sources; i++)
4203 reg_count = MAX2(reg_count, (unsigned)inst->regs_read(i));
4205 /* Calculate the maximum execution size of the instruction based on the
4206 * factor by which it goes over the hardware limit of 2 GRFs.
4208 return inst->exec_size / DIV_ROUND_UP(reg_count, 2);
4210 case SHADER_OPCODE_MULH:
4211 /* MULH is lowered to the MUL/MACH sequence using the accumulator, which
4212 * is 8-wide on Gen7+.
4214 return (devinfo->gen >= 7 ? 8 : inst->exec_size);
4216 case FS_OPCODE_FB_WRITE_LOGICAL:
4217 /* Gen6 doesn't support SIMD16 depth writes but we cannot handle them
4220 assert(devinfo->gen != 6 || inst->src[3].file == BAD_FILE ||
4221 inst->exec_size == 8);
4222 /* Dual-source FB writes are unsupported in SIMD16 mode. */
4223 return (inst->src[1].file != BAD_FILE ? 8 : inst->exec_size);
4225 case SHADER_OPCODE_TXD_LOGICAL:
4226 /* TXD is unsupported in SIMD16 mode. */
4229 case SHADER_OPCODE_TG4_OFFSET_LOGICAL: {
4230 /* gather4_po_c is unsupported in SIMD16 mode. */
4231 const fs_reg &shadow_c = inst->src[1];
4232 return (shadow_c.file != BAD_FILE ? 8 : inst->exec_size);
4234 case SHADER_OPCODE_TXL_LOGICAL:
4235 case FS_OPCODE_TXB_LOGICAL: {
4236 /* Gen4 doesn't have SIMD8 non-shadow-compare bias/LOD instructions, and
4237 * Gen4-6 can't support TXL and TXB with shadow comparison in SIMD16
4238 * mode because the message exceeds the maximum length of 11.
4240 const fs_reg &shadow_c = inst->src[1];
4241 if (devinfo->gen == 4 && shadow_c.file == BAD_FILE)
4243 else if (devinfo->gen < 7 && shadow_c.file != BAD_FILE)
4246 return inst->exec_size;
4248 case SHADER_OPCODE_TXF_LOGICAL:
4249 case SHADER_OPCODE_TXS_LOGICAL:
4250 /* Gen4 doesn't have SIMD8 variants for the RESINFO and LD-with-LOD
4251 * messages. Use SIMD16 instead.
4253 if (devinfo->gen == 4)
4256 return inst->exec_size;
4258 case SHADER_OPCODE_TYPED_ATOMIC_LOGICAL:
4259 case SHADER_OPCODE_TYPED_SURFACE_READ_LOGICAL:
4260 case SHADER_OPCODE_TYPED_SURFACE_WRITE_LOGICAL:
4264 return inst->exec_size;
4269 * The \p rows array of registers represents a \p num_rows by \p num_columns
4270 * matrix in row-major order, write it in column-major order into the register
4271 * passed as destination. \p stride gives the separation between matrix
4272 * elements in the input in fs_builder::dispatch_width() units.
4275 emit_transpose(const fs_builder &bld,
4276 const fs_reg &dst, const fs_reg *rows,
4277 unsigned num_rows, unsigned num_columns, unsigned stride)
4279 fs_reg *const components = new fs_reg[num_rows * num_columns];
4281 for (unsigned i = 0; i < num_columns; ++i) {
4282 for (unsigned j = 0; j < num_rows; ++j)
4283 components[num_rows * i + j] = offset(rows[j], bld, stride * i);
4286 bld.LOAD_PAYLOAD(dst, components, num_rows * num_columns, 0);
4288 delete[] components;
4292 fs_visitor::lower_simd_width()
4294 bool progress = false;
4296 foreach_block_and_inst_safe(block, fs_inst, inst, cfg) {
4297 const unsigned lower_width = get_lowered_simd_width(devinfo, inst);
4299 if (lower_width != inst->exec_size) {
4300 /* Builder matching the original instruction. We may also need to
4301 * emit an instruction of width larger than the original, set the
4302 * execution size of the builder to the highest of both for now so
4303 * we're sure that both cases can be handled.
4305 const fs_builder ibld = bld.at(block, inst)
4306 .exec_all(inst->force_writemask_all)
4307 .group(MAX2(inst->exec_size, lower_width),
4308 inst->force_sechalf);
4310 /* Split the copies in chunks of the execution width of either the
4311 * original or the lowered instruction, whichever is lower.
4313 const unsigned copy_width = MIN2(lower_width, inst->exec_size);
4314 const unsigned n = inst->exec_size / copy_width;
4315 const unsigned dst_size = inst->regs_written * REG_SIZE /
4316 inst->dst.component_size(inst->exec_size);
4319 assert(n > 0 && n <= ARRAY_SIZE(dsts) &&
4320 !inst->writes_accumulator && !inst->mlen);
4322 for (unsigned i = 0; i < n; i++) {
4323 /* Emit a copy of the original instruction with the lowered width.
4324 * If the EOT flag was set throw it away except for the last
4325 * instruction to avoid killing the thread prematurely.
4327 fs_inst split_inst = *inst;
4328 split_inst.exec_size = lower_width;
4329 split_inst.eot = inst->eot && i == n - 1;
4331 /* Select the correct channel enables for the i-th group, then
4332 * transform the sources and destination and emit the lowered
4335 const fs_builder lbld = ibld.group(lower_width, i);
4337 for (unsigned j = 0; j < inst->sources; j++) {
4338 if (inst->src[j].file != BAD_FILE &&
4339 !is_uniform(inst->src[j])) {
4340 /* Get the i-th copy_width-wide chunk of the source. */
4341 const fs_reg src = horiz_offset(inst->src[j], copy_width * i);
4342 const unsigned src_size = inst->components_read(j);
4344 /* Use a trivial transposition to copy one every n
4345 * copy_width-wide components of the register into a
4346 * temporary passed as source to the lowered instruction.
4348 split_inst.src[j] = lbld.vgrf(inst->src[j].type, src_size);
4349 emit_transpose(lbld.group(copy_width, 0),
4350 split_inst.src[j], &src, 1, src_size, n);
4354 if (inst->regs_written) {
4355 /* Allocate enough space to hold the result of the lowered
4356 * instruction and fix up the number of registers written.
4358 split_inst.dst = dsts[i] =
4359 lbld.vgrf(inst->dst.type, dst_size);
4360 split_inst.regs_written =
4361 DIV_ROUND_UP(inst->regs_written * lower_width,
4365 lbld.emit(split_inst);
4368 if (inst->regs_written) {
4369 /* Distance between useful channels in the temporaries, skipping
4370 * garbage if the lowered instruction is wider than the original.
4372 const unsigned m = lower_width / copy_width;
4374 /* Interleave the components of the result from the lowered
4375 * instructions. We need to set exec_all() when copying more than
4376 * one half per component, because LOAD_PAYLOAD (in terms of which
4377 * emit_transpose is implemented) can only use the same channel
4378 * enable signals for all of its non-header sources.
4380 emit_transpose(ibld.exec_all(inst->exec_size > copy_width)
4381 .group(copy_width, 0),
4382 inst->dst, dsts, n, dst_size, m);
4385 inst->remove(block);
4391 invalidate_live_intervals();
4397 fs_visitor::dump_instructions()
4399 dump_instructions(NULL);
4403 fs_visitor::dump_instructions(const char *name)
4405 FILE *file = stderr;
4406 if (name && geteuid() != 0) {
4407 file = fopen(name, "w");
4413 calculate_register_pressure();
4414 int ip = 0, max_pressure = 0;
4415 foreach_block_and_inst(block, backend_instruction, inst, cfg) {
4416 max_pressure = MAX2(max_pressure, regs_live_at_ip[ip]);
4417 fprintf(file, "{%3d} %4d: ", regs_live_at_ip[ip], ip);
4418 dump_instruction(inst, file);
4421 fprintf(file, "Maximum %3d registers live at once.\n", max_pressure);
4424 foreach_in_list(backend_instruction, inst, &instructions) {
4425 fprintf(file, "%4d: ", ip++);
4426 dump_instruction(inst, file);
4430 if (file != stderr) {
4436 fs_visitor::dump_instruction(backend_instruction *be_inst)
4438 dump_instruction(be_inst, stderr);
4442 fs_visitor::dump_instruction(backend_instruction *be_inst, FILE *file)
4444 fs_inst *inst = (fs_inst *)be_inst;
4446 if (inst->predicate) {
4447 fprintf(file, "(%cf0.%d) ",
4448 inst->predicate_inverse ? '-' : '+',
4452 fprintf(file, "%s", brw_instruction_name(inst->opcode));
4454 fprintf(file, ".sat");
4455 if (inst->conditional_mod) {
4456 fprintf(file, "%s", conditional_modifier[inst->conditional_mod]);
4457 if (!inst->predicate &&
4458 (devinfo->gen < 5 || (inst->opcode != BRW_OPCODE_SEL &&
4459 inst->opcode != BRW_OPCODE_IF &&
4460 inst->opcode != BRW_OPCODE_WHILE))) {
4461 fprintf(file, ".f0.%d", inst->flag_subreg);
4464 fprintf(file, "(%d) ", inst->exec_size);
4467 fprintf(file, "(mlen: %d) ", inst->mlen);
4470 switch (inst->dst.file) {
4472 fprintf(file, "vgrf%d", inst->dst.reg);
4473 if (alloc.sizes[inst->dst.reg] != inst->regs_written ||
4474 inst->dst.subreg_offset)
4475 fprintf(file, "+%d.%d",
4476 inst->dst.reg_offset, inst->dst.subreg_offset);
4479 fprintf(file, "m%d", inst->dst.reg);
4482 fprintf(file, "(null)");
4485 fprintf(file, "***u%d***", inst->dst.reg + inst->dst.reg_offset);
4488 fprintf(file, "***attr%d***", inst->dst.reg + inst->dst.reg_offset);
4491 if (inst->dst.fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
4492 switch (inst->dst.fixed_hw_reg.nr) {
4494 fprintf(file, "null");
4496 case BRW_ARF_ADDRESS:
4497 fprintf(file, "a0.%d", inst->dst.fixed_hw_reg.subnr);
4499 case BRW_ARF_ACCUMULATOR:
4500 fprintf(file, "acc%d", inst->dst.fixed_hw_reg.subnr);
4503 fprintf(file, "f%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
4504 inst->dst.fixed_hw_reg.subnr);
4507 fprintf(file, "arf%d.%d", inst->dst.fixed_hw_reg.nr & 0xf,
4508 inst->dst.fixed_hw_reg.subnr);
4512 fprintf(file, "hw_reg%d", inst->dst.fixed_hw_reg.nr);
4514 if (inst->dst.fixed_hw_reg.subnr)
4515 fprintf(file, "+%d", inst->dst.fixed_hw_reg.subnr);
4518 fprintf(file, "???");
4521 fprintf(file, ":%s, ", brw_reg_type_letters(inst->dst.type));
4523 for (int i = 0; i < inst->sources; i++) {
4524 if (inst->src[i].negate)
4526 if (inst->src[i].abs)
4528 switch (inst->src[i].file) {
4530 fprintf(file, "vgrf%d", inst->src[i].reg);
4531 if (alloc.sizes[inst->src[i].reg] != (unsigned)inst->regs_read(i) ||
4532 inst->src[i].subreg_offset)
4533 fprintf(file, "+%d.%d", inst->src[i].reg_offset,
4534 inst->src[i].subreg_offset);
4537 fprintf(file, "***m%d***", inst->src[i].reg);
4540 fprintf(file, "attr%d", inst->src[i].reg + inst->src[i].reg_offset);
4543 fprintf(file, "u%d", inst->src[i].reg + inst->src[i].reg_offset);
4544 if (inst->src[i].reladdr) {
4545 fprintf(file, "+reladdr");
4546 } else if (inst->src[i].subreg_offset) {
4547 fprintf(file, "+%d.%d", inst->src[i].reg_offset,
4548 inst->src[i].subreg_offset);
4552 fprintf(file, "(null)");
4555 switch (inst->src[i].type) {
4556 case BRW_REGISTER_TYPE_F:
4557 fprintf(file, "%ff", inst->src[i].fixed_hw_reg.dw1.f);
4559 case BRW_REGISTER_TYPE_W:
4560 case BRW_REGISTER_TYPE_D:
4561 fprintf(file, "%dd", inst->src[i].fixed_hw_reg.dw1.d);
4563 case BRW_REGISTER_TYPE_UW:
4564 case BRW_REGISTER_TYPE_UD:
4565 fprintf(file, "%uu", inst->src[i].fixed_hw_reg.dw1.ud);
4567 case BRW_REGISTER_TYPE_VF:
4568 fprintf(file, "[%-gF, %-gF, %-gF, %-gF]",
4569 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 0) & 0xff),
4570 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 8) & 0xff),
4571 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 16) & 0xff),
4572 brw_vf_to_float((inst->src[i].fixed_hw_reg.dw1.ud >> 24) & 0xff));
4575 fprintf(file, "???");
4580 if (inst->src[i].fixed_hw_reg.negate)
4582 if (inst->src[i].fixed_hw_reg.abs)
4584 if (inst->src[i].fixed_hw_reg.file == BRW_ARCHITECTURE_REGISTER_FILE) {
4585 switch (inst->src[i].fixed_hw_reg.nr) {
4587 fprintf(file, "null");
4589 case BRW_ARF_ADDRESS:
4590 fprintf(file, "a0.%d", inst->src[i].fixed_hw_reg.subnr);
4592 case BRW_ARF_ACCUMULATOR:
4593 fprintf(file, "acc%d", inst->src[i].fixed_hw_reg.subnr);
4596 fprintf(file, "f%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
4597 inst->src[i].fixed_hw_reg.subnr);
4600 fprintf(file, "arf%d.%d", inst->src[i].fixed_hw_reg.nr & 0xf,
4601 inst->src[i].fixed_hw_reg.subnr);
4605 fprintf(file, "hw_reg%d", inst->src[i].fixed_hw_reg.nr);
4607 if (inst->src[i].fixed_hw_reg.subnr)
4608 fprintf(file, "+%d", inst->src[i].fixed_hw_reg.subnr);
4609 if (inst->src[i].fixed_hw_reg.abs)
4613 fprintf(file, "???");
4616 if (inst->src[i].abs)
4619 if (inst->src[i].file != IMM) {
4620 fprintf(file, ":%s", brw_reg_type_letters(inst->src[i].type));
4623 if (i < inst->sources - 1 && inst->src[i + 1].file != BAD_FILE)
4624 fprintf(file, ", ");
4629 if (dispatch_width == 16 && inst->exec_size == 8) {
4630 if (inst->force_sechalf)
4631 fprintf(file, "2ndhalf ");
4633 fprintf(file, "1sthalf ");
4636 fprintf(file, "\n");
4640 * Possibly returns an instruction that set up @param reg.
4642 * Sometimes we want to take the result of some expression/variable
4643 * dereference tree and rewrite the instruction generating the result
4644 * of the tree. When processing the tree, we know that the
4645 * instructions generated are all writing temporaries that are dead
4646 * outside of this tree. So, if we have some instructions that write
4647 * a temporary, we're free to point that temp write somewhere else.
4649 * Note that this doesn't guarantee that the instruction generated
4650 * only reg -- it might be the size=4 destination of a texture instruction.
4653 fs_visitor::get_instruction_generating_reg(fs_inst *start,
4658 end->is_partial_write() ||
4660 !reg.equals(end->dst)) {
4668 fs_visitor::setup_payload_gen6()
4671 (prog->InputsRead & (1 << VARYING_SLOT_POS)) != 0;
4672 unsigned barycentric_interp_modes =
4673 (stage == MESA_SHADER_FRAGMENT) ?
4674 ((brw_wm_prog_data*) this->prog_data)->barycentric_interp_modes : 0;
4676 assert(devinfo->gen >= 6);
4678 /* R0-1: masks, pixel X/Y coordinates. */
4679 payload.num_regs = 2;
4680 /* R2: only for 32-pixel dispatch.*/
4682 /* R3-26: barycentric interpolation coordinates. These appear in the
4683 * same order that they appear in the brw_wm_barycentric_interp_mode
4684 * enum. Each set of coordinates occupies 2 registers if dispatch width
4685 * == 8 and 4 registers if dispatch width == 16. Coordinates only
4686 * appear if they were enabled using the "Barycentric Interpolation
4687 * Mode" bits in WM_STATE.
4689 for (int i = 0; i < BRW_WM_BARYCENTRIC_INTERP_MODE_COUNT; ++i) {
4690 if (barycentric_interp_modes & (1 << i)) {
4691 payload.barycentric_coord_reg[i] = payload.num_regs;
4692 payload.num_regs += 2;
4693 if (dispatch_width == 16) {
4694 payload.num_regs += 2;
4699 /* R27: interpolated depth if uses source depth */
4701 payload.source_depth_reg = payload.num_regs;
4703 if (dispatch_width == 16) {
4704 /* R28: interpolated depth if not SIMD8. */
4708 /* R29: interpolated W set if GEN6_WM_USES_SOURCE_W. */
4710 payload.source_w_reg = payload.num_regs;
4712 if (dispatch_width == 16) {
4713 /* R30: interpolated W if not SIMD8. */
4718 if (stage == MESA_SHADER_FRAGMENT) {
4719 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
4720 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
4721 prog_data->uses_pos_offset = key->compute_pos_offset;
4722 /* R31: MSAA position offsets. */
4723 if (prog_data->uses_pos_offset) {
4724 payload.sample_pos_reg = payload.num_regs;
4729 /* R32: MSAA input coverage mask */
4730 if (prog->SystemValuesRead & SYSTEM_BIT_SAMPLE_MASK_IN) {
4731 assert(devinfo->gen >= 7);
4732 payload.sample_mask_in_reg = payload.num_regs;
4734 if (dispatch_width == 16) {
4735 /* R33: input coverage mask if not SIMD8. */
4740 /* R34-: bary for 32-pixel. */
4741 /* R58-59: interp W for 32-pixel. */
4743 if (prog->OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH)) {
4744 source_depth_to_render_target = true;
4749 fs_visitor::setup_vs_payload()
4751 /* R0: thread header, R1: urb handles */
4752 payload.num_regs = 2;
4756 fs_visitor::setup_cs_payload()
4758 assert(devinfo->gen >= 7);
4760 payload.num_regs = 1;
4762 if (prog->SystemValuesRead & SYSTEM_BIT_LOCAL_INVOCATION_ID) {
4763 const unsigned local_id_dwords =
4764 brw_cs_prog_local_id_payload_dwords(prog, dispatch_width);
4765 assert((local_id_dwords & 0x7) == 0);
4766 const unsigned local_id_regs = local_id_dwords / 8;
4767 payload.local_invocation_id_reg = payload.num_regs;
4768 payload.num_regs += local_id_regs;
4773 fs_visitor::assign_fs_binding_table_offsets()
4775 assert(stage == MESA_SHADER_FRAGMENT);
4776 brw_wm_prog_data *prog_data = (brw_wm_prog_data*) this->prog_data;
4777 brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
4778 uint32_t next_binding_table_offset = 0;
4780 /* If there are no color regions, we still perform an FB write to a null
4781 * renderbuffer, which we place at surface index 0.
4783 prog_data->binding_table.render_target_start = next_binding_table_offset;
4784 next_binding_table_offset += MAX2(key->nr_color_regions, 1);
4786 assign_common_binding_table_offsets(next_binding_table_offset);
4790 fs_visitor::assign_cs_binding_table_offsets()
4792 assert(stage == MESA_SHADER_COMPUTE);
4793 brw_cs_prog_data *prog_data = (brw_cs_prog_data*) this->prog_data;
4794 uint32_t next_binding_table_offset = 0;
4796 /* May not be used if the gl_NumWorkGroups variable is not accessed. */
4797 prog_data->binding_table.work_groups_start = next_binding_table_offset;
4798 next_binding_table_offset++;
4800 assign_common_binding_table_offsets(next_binding_table_offset);
4804 fs_visitor::calculate_register_pressure()
4806 invalidate_live_intervals();
4807 calculate_live_intervals();
4809 unsigned num_instructions = 0;
4810 foreach_block(block, cfg)
4811 num_instructions += block->instructions.length();
4813 regs_live_at_ip = rzalloc_array(mem_ctx, int, num_instructions);
4815 for (unsigned reg = 0; reg < alloc.count; reg++) {
4816 for (int ip = virtual_grf_start[reg]; ip <= virtual_grf_end[reg]; ip++)
4817 regs_live_at_ip[ip] += alloc.sizes[reg];
4822 fs_visitor::optimize()
4824 /* Start by validating the shader we currently have. */
4827 /* bld is the common builder object pointing at the end of the program we
4828 * used to translate it into i965 IR. For the optimization and lowering
4829 * passes coming next, any code added after the end of the program without
4830 * having explicitly called fs_builder::at() clearly points at a mistake.
4831 * Ideally optimization passes wouldn't be part of the visitor so they
4832 * wouldn't have access to bld at all, but they do, so just in case some
4833 * pass forgets to ask for a location explicitly set it to NULL here to
4834 * make it trip. The dispatch width is initialized to a bogus value to
4835 * make sure that optimizations set the execution controls explicitly to
4836 * match the code they are manipulating instead of relying on the defaults.
4838 bld = fs_builder(this, 64);
4840 assign_constant_locations();
4841 demote_pull_constants();
4845 split_virtual_grfs();
4848 #define OPT(pass, args...) ({ \
4850 bool this_progress = pass(args); \
4852 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER) && this_progress) { \
4853 char filename[64]; \
4854 snprintf(filename, 64, "%s%d-%04d-%02d-%02d-" #pass, \
4855 stage_abbrev, dispatch_width, shader_prog ? shader_prog->Name : 0, iteration, pass_num); \
4857 backend_shader::dump_instructions(filename); \
4862 progress = progress || this_progress; \
4866 if (unlikely(INTEL_DEBUG & DEBUG_OPTIMIZER)) {
4868 snprintf(filename, 64, "%s%d-%04d-00-start",
4869 stage_abbrev, dispatch_width,
4870 shader_prog ? shader_prog->Name : 0);
4872 backend_shader::dump_instructions(filename);
4875 bool progress = false;
4879 OPT(lower_simd_width);
4880 OPT(lower_logical_sends);
4887 OPT(remove_duplicate_mrf_writes);
4891 OPT(opt_copy_propagate);
4892 OPT(opt_peephole_predicated_break);
4893 OPT(opt_cmod_propagation);
4894 OPT(dead_code_eliminate);
4895 OPT(opt_peephole_sel);
4896 OPT(dead_control_flow_eliminate, this);
4897 OPT(opt_register_renaming);
4898 OPT(opt_redundant_discard_jumps);
4899 OPT(opt_saturate_propagation);
4900 OPT(opt_zero_samples);
4901 OPT(register_coalesce);
4902 OPT(compute_to_mrf);
4903 OPT(eliminate_find_live_channel);
4905 OPT(compact_virtual_grfs);
4910 OPT(opt_sampler_eot);
4912 if (OPT(lower_load_payload)) {
4913 split_virtual_grfs();
4914 OPT(register_coalesce);
4915 OPT(compute_to_mrf);
4916 OPT(dead_code_eliminate);
4919 OPT(opt_combine_constants);
4920 OPT(lower_integer_multiplication);
4922 lower_uniform_pull_constant_loads();
4928 * Three source instruction must have a GRF/MRF destination register.
4929 * ARF NULL is not allowed. Fix that up by allocating a temporary GRF.
4932 fs_visitor::fixup_3src_null_dest()
4934 foreach_block_and_inst_safe (block, fs_inst, inst, cfg) {
4935 if (inst->is_3src() && inst->dst.is_null()) {
4936 inst->dst = fs_reg(GRF, alloc.allocate(dispatch_width / 8),
4943 fs_visitor::allocate_registers()
4945 bool allocated_without_spills;
4947 static const enum instruction_scheduler_mode pre_modes[] = {
4949 SCHEDULE_PRE_NON_LIFO,
4953 /* Try each scheduling heuristic to see if it can successfully register
4954 * allocate without spilling. They should be ordered by decreasing
4955 * performance but increasing likelihood of allocating.
4957 for (unsigned i = 0; i < ARRAY_SIZE(pre_modes); i++) {
4958 schedule_instructions(pre_modes[i]);
4961 assign_regs_trivial();
4962 allocated_without_spills = true;
4964 allocated_without_spills = assign_regs(false);
4966 if (allocated_without_spills)
4970 if (!allocated_without_spills) {
4971 /* We assume that any spilling is worse than just dropping back to
4972 * SIMD8. There's probably actually some intermediate point where
4973 * SIMD16 with a couple of spills is still better.
4975 if (dispatch_width == 16) {
4976 fail("Failure to register allocate. Reduce number of "
4977 "live scalar values to avoid this.");
4979 compiler->shader_perf_log(log_data,
4980 "%s shader triggered register spilling. "
4981 "Try reducing the number of live scalar "
4982 "values to improve performance.\n",
4986 /* Since we're out of heuristics, just go spill registers until we
4987 * get an allocation.
4989 while (!assign_regs(true)) {
4995 /* This must come after all optimization and register allocation, since
4996 * it inserts dead code that happens to have side effects, and it does
4997 * so based on the actual physical registers in use.
4999 insert_gen4_send_dependency_workarounds();
5004 if (!allocated_without_spills)
5005 schedule_instructions(SCHEDULE_POST);
5007 if (last_scratch > 0)
5008 prog_data->total_scratch = brw_get_scratch_size(last_scratch);
5012 fs_visitor::run_vs(gl_clip_plane *clip_planes)
5014 assert(stage == MESA_SHADER_VERTEX);
5016 if (prog_data->map_entries == NULL)
5017 assign_common_binding_table_offsets(0);
5020 if (shader_time_index >= 0)
5021 emit_shader_time_begin();
5028 compute_clip_distance(clip_planes);
5032 if (shader_time_index >= 0)
5033 emit_shader_time_end();
5039 assign_curb_setup();
5040 assign_vs_urb_setup();
5042 fixup_3src_null_dest();
5043 allocate_registers();
5049 fs_visitor::run_fs(bool do_rep_send)
5051 brw_wm_prog_data *wm_prog_data = (brw_wm_prog_data *) this->prog_data;
5052 brw_wm_prog_key *wm_key = (brw_wm_prog_key *) this->key;
5054 assert(stage == MESA_SHADER_FRAGMENT);
5056 sanity_param_count = prog->Parameters->NumParameters;
5058 if (prog_data->map_entries == NULL)
5059 assign_fs_binding_table_offsets();
5061 if (devinfo->gen >= 6)
5062 setup_payload_gen6();
5064 setup_payload_gen4();
5068 } else if (do_rep_send) {
5069 assert(dispatch_width == 16);
5070 emit_repclear_shader();
5072 if (shader_time_index >= 0)
5073 emit_shader_time_begin();
5075 calculate_urb_setup();
5076 if (prog->InputsRead > 0) {
5077 if (devinfo->gen < 6)
5078 emit_interpolation_setup_gen4();
5080 emit_interpolation_setup_gen6();
5083 /* We handle discards by keeping track of the still-live pixels in f0.1.
5084 * Initialize it with the dispatched pixels.
5086 if (wm_prog_data->uses_kill) {
5087 fs_inst *discard_init = bld.emit(FS_OPCODE_MOV_DISPATCH_TO_FLAGS);
5088 discard_init->flag_subreg = 1;
5091 /* Generate FS IR for main(). (the visitor only descends into
5092 * functions called "main").
5099 if (wm_prog_data->uses_kill)
5100 bld.emit(FS_OPCODE_PLACEHOLDER_HALT);
5102 if (wm_key->alpha_test_func)
5107 if (shader_time_index >= 0)
5108 emit_shader_time_end();
5114 assign_curb_setup();
5117 fixup_3src_null_dest();
5118 allocate_registers();
5124 if (dispatch_width == 8)
5125 wm_prog_data->reg_blocks = brw_register_blocks(grf_used);
5127 wm_prog_data->reg_blocks_16 = brw_register_blocks(grf_used);
5133 fs_visitor::run_cs()
5135 assert(stage == MESA_SHADER_COMPUTE);
5138 sanity_param_count = prog->Parameters->NumParameters;
5140 assign_cs_binding_table_offsets();
5144 if (shader_time_index >= 0)
5145 emit_shader_time_begin();
5152 emit_cs_terminate();
5154 if (shader_time_index >= 0)
5155 emit_shader_time_end();
5161 assign_curb_setup();
5163 fixup_3src_null_dest();
5164 allocate_registers();
5169 /* If any state parameters were appended, then ParameterValues could have
5170 * been realloced, in which case the driver uniform storage set up by
5171 * _mesa_associate_uniform_storage() would point to freed memory. Make
5172 * sure that didn't happen.
5174 assert(sanity_param_count == prog->Parameters->NumParameters);
5180 brw_wm_fs_emit(struct brw_context *brw,
5182 const struct brw_wm_prog_key *key,
5183 struct brw_wm_prog_data *prog_data,
5184 struct gl_fragment_program *fp,
5185 struct gl_shader_program *prog,
5186 unsigned *final_assembly_size)
5188 struct brw_shader *shader = NULL;
5190 shader = (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
5192 if (unlikely(INTEL_DEBUG & DEBUG_WM) && shader->base.ir)
5193 brw_dump_ir("fragment", prog, &shader->base, &fp->Base);
5195 int st_index8 = -1, st_index16 = -1;
5196 if (INTEL_DEBUG & DEBUG_SHADER_TIME) {
5197 st_index8 = brw_get_shader_time_index(brw, prog, &fp->Base, ST_FS8);
5198 st_index16 = brw_get_shader_time_index(brw, prog, &fp->Base, ST_FS16);
5201 /* Now the main event: Visit the shader IR and generate our FS IR for it.
5203 fs_visitor v(brw->intelScreen->compiler, brw,
5204 mem_ctx, MESA_SHADER_FRAGMENT, key, &prog_data->base,
5205 prog, &fp->Base, 8, st_index8);
5206 if (!v.run_fs(false /* do_rep_send */)) {
5208 prog->LinkStatus = false;
5209 ralloc_strcat(&prog->InfoLog, v.fail_msg);
5212 _mesa_problem(NULL, "Failed to compile fragment shader: %s\n",
5218 cfg_t *simd16_cfg = NULL;
5219 fs_visitor v2(brw->intelScreen->compiler, brw,
5220 mem_ctx, MESA_SHADER_FRAGMENT, key, &prog_data->base,
5221 prog, &fp->Base, 16, st_index16);
5222 if (likely(!(INTEL_DEBUG & DEBUG_NO16) || brw->use_rep_send)) {
5223 if (!v.simd16_unsupported) {
5224 /* Try a SIMD16 compile */
5225 v2.import_uniforms(&v);
5226 if (!v2.run_fs(brw->use_rep_send)) {
5227 perf_debug("SIMD16 shader failed to compile: %s", v2.fail_msg);
5229 simd16_cfg = v2.cfg;
5235 int no_simd8 = (INTEL_DEBUG & DEBUG_NO8) || brw->no_simd8;
5236 if ((no_simd8 || brw->gen < 5) && simd16_cfg) {
5238 prog_data->no_8 = true;
5241 prog_data->no_8 = false;
5244 fs_generator g(brw->intelScreen->compiler, brw,
5245 mem_ctx, (void *) key, &prog_data->base,
5246 &fp->Base, v.promoted_constants, v.runtime_check_aads_emit, "FS");
5248 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
5251 name = ralloc_asprintf(mem_ctx, "%s fragment shader %d",
5252 prog->Label ? prog->Label : "unnamed",
5255 name = ralloc_asprintf(mem_ctx, "fragment program %d", fp->Base.Id);
5257 g.enable_debug(name);
5261 g.generate_code(simd8_cfg, 8);
5263 prog_data->prog_offset_16 = g.generate_code(simd16_cfg, 16);
5265 return g.get_assembly(final_assembly_size);
5269 fs_visitor::emit_cs_local_invocation_id_setup()
5271 assert(stage == MESA_SHADER_COMPUTE);
5273 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
5275 struct brw_reg src =
5276 brw_vec8_grf(payload.local_invocation_id_reg, 0);
5277 src = retype(src, BRW_REGISTER_TYPE_UD);
5279 src.nr += dispatch_width / 8;
5280 bld.MOV(offset(*reg, bld, 1), src);
5281 src.nr += dispatch_width / 8;
5282 bld.MOV(offset(*reg, bld, 2), src);
5288 fs_visitor::emit_cs_work_group_id_setup()
5290 assert(stage == MESA_SHADER_COMPUTE);
5292 fs_reg *reg = new(this->mem_ctx) fs_reg(vgrf(glsl_type::uvec3_type));
5294 struct brw_reg r0_1(retype(brw_vec1_grf(0, 1), BRW_REGISTER_TYPE_UD));
5295 struct brw_reg r0_6(retype(brw_vec1_grf(0, 6), BRW_REGISTER_TYPE_UD));
5296 struct brw_reg r0_7(retype(brw_vec1_grf(0, 7), BRW_REGISTER_TYPE_UD));
5298 bld.MOV(*reg, r0_1);
5299 bld.MOV(offset(*reg, bld, 1), r0_6);
5300 bld.MOV(offset(*reg, bld, 2), r0_7);
5306 brw_cs_emit(struct brw_context *brw,
5308 const struct brw_cs_prog_key *key,
5309 struct brw_cs_prog_data *prog_data,
5310 struct gl_compute_program *cp,
5311 struct gl_shader_program *prog,
5312 unsigned *final_assembly_size)
5314 struct brw_shader *shader =
5315 (struct brw_shader *) prog->_LinkedShaders[MESA_SHADER_COMPUTE];
5317 if (unlikely(INTEL_DEBUG & DEBUG_CS))
5318 brw_dump_ir("compute", prog, &shader->base, &cp->Base);
5320 prog_data->local_size[0] = cp->LocalSize[0];
5321 prog_data->local_size[1] = cp->LocalSize[1];
5322 prog_data->local_size[2] = cp->LocalSize[2];
5323 unsigned local_workgroup_size =
5324 cp->LocalSize[0] * cp->LocalSize[1] * cp->LocalSize[2];
5327 const char *fail_msg = NULL;
5330 if (INTEL_DEBUG & DEBUG_SHADER_TIME)
5331 st_index = brw_get_shader_time_index(brw, prog, &cp->Base, ST_CS);
5333 /* Now the main event: Visit the shader IR and generate our CS IR for it.
5335 fs_visitor v8(brw->intelScreen->compiler, brw,
5336 mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
5337 &cp->Base, 8, st_index);
5339 fail_msg = v8.fail_msg;
5340 } else if (local_workgroup_size <= 8 * brw->max_cs_threads) {
5342 prog_data->simd_size = 8;
5345 fs_visitor v16(brw->intelScreen->compiler, brw,
5346 mem_ctx, MESA_SHADER_COMPUTE, key, &prog_data->base, prog,
5347 &cp->Base, 16, st_index);
5348 if (likely(!(INTEL_DEBUG & DEBUG_NO16)) &&
5349 !fail_msg && !v8.simd16_unsupported &&
5350 local_workgroup_size <= 16 * brw->max_cs_threads) {
5351 /* Try a SIMD16 compile */
5352 v16.import_uniforms(&v8);
5353 if (!v16.run_cs()) {
5354 perf_debug("SIMD16 shader failed to compile: %s", v16.fail_msg);
5357 "Couldn't generate SIMD16 program and not "
5358 "enough threads for SIMD8";
5362 prog_data->simd_size = 16;
5366 if (unlikely(cfg == NULL)) {
5368 prog->LinkStatus = false;
5369 ralloc_strcat(&prog->InfoLog, fail_msg);
5370 _mesa_problem(NULL, "Failed to compile compute shader: %s\n",
5375 fs_generator g(brw->intelScreen->compiler, brw,
5376 mem_ctx, (void*) key, &prog_data->base, &cp->Base,
5377 v8.promoted_constants, v8.runtime_check_aads_emit, "CS");
5378 if (INTEL_DEBUG & DEBUG_CS) {
5379 char *name = ralloc_asprintf(mem_ctx, "%s compute shader %d",
5380 prog->Label ? prog->Label : "unnamed",
5382 g.enable_debug(name);
5385 g.generate_code(cfg, prog_data->simd_size);
5387 return g.get_assembly(final_assembly_size);