2 * Copyright © 2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
26 * This file drives the GLSL IR -> LIR translation, contains the
27 * optimizations on the LIR, and drives the generation of native code
33 #include <sys/types.h>
35 #include "main/macros.h"
36 #include "main/shaderobj.h"
37 #include "main/uniforms.h"
38 #include "program/prog_parameter.h"
39 #include "program/prog_print.h"
40 #include "program/register_allocate.h"
41 #include "program/sampler.h"
42 #include "program/hash_table.h"
43 #include "brw_context.h"
47 #include "brw_shader.h"
49 #include "../glsl/glsl_types.h"
50 #include "../glsl/ir_print_visitor.h"
52 #define MAX_INSTRUCTION (1 << 30)
55 fs_visitor::type_size(const struct glsl_type *type)
59 switch (type->base_type) {
64 return type->components();
66 return type_size(type->fields.array) * type->length;
67 case GLSL_TYPE_STRUCT:
69 for (i = 0; i < type->length; i++) {
70 size += type_size(type->fields.structure[i].type);
73 case GLSL_TYPE_SAMPLER:
74 /* Samplers take up no register space, since they're baked in at
79 assert(!"not reached");
85 fs_visitor::fail(const char *format, ...)
96 msg = ralloc_vasprintf(mem_ctx, format, va);
98 msg = ralloc_asprintf(mem_ctx, "FS compile failed: %s\n", msg);
100 this->fail_msg = msg;
102 if (INTEL_DEBUG & DEBUG_WM) {
103 fprintf(stderr, "%s", msg);
108 fs_visitor::push_force_uncompressed()
110 force_uncompressed_stack++;
114 fs_visitor::pop_force_uncompressed()
116 force_uncompressed_stack--;
117 assert(force_uncompressed_stack >= 0);
121 fs_visitor::push_force_sechalf()
123 force_sechalf_stack++;
127 fs_visitor::pop_force_sechalf()
129 force_sechalf_stack--;
130 assert(force_sechalf_stack >= 0);
134 * Returns how many MRFs an FS opcode will write over.
136 * Note that this is not the 0 or 1 implied writes in an actual gen
137 * instruction -- the FS opcodes often generate MOVs in addition.
140 fs_visitor::implied_mrf_writes(fs_inst *inst)
145 switch (inst->opcode) {
146 case SHADER_OPCODE_RCP:
147 case SHADER_OPCODE_RSQ:
148 case SHADER_OPCODE_SQRT:
149 case SHADER_OPCODE_EXP2:
150 case SHADER_OPCODE_LOG2:
151 case SHADER_OPCODE_SIN:
152 case SHADER_OPCODE_COS:
153 return 1 * c->dispatch_width / 8;
154 case SHADER_OPCODE_POW:
155 return 2 * c->dispatch_width / 8;
161 case FS_OPCODE_FB_WRITE:
163 case FS_OPCODE_PULL_CONSTANT_LOAD:
164 case FS_OPCODE_UNSPILL:
166 case FS_OPCODE_SPILL:
169 assert(!"not reached");
175 fs_visitor::virtual_grf_alloc(int size)
177 if (virtual_grf_array_size <= virtual_grf_next) {
178 if (virtual_grf_array_size == 0)
179 virtual_grf_array_size = 16;
181 virtual_grf_array_size *= 2;
182 virtual_grf_sizes = reralloc(mem_ctx, virtual_grf_sizes, int,
183 virtual_grf_array_size);
185 virtual_grf_sizes[virtual_grf_next] = size;
186 return virtual_grf_next++;
189 /** Fixed HW reg constructor. */
190 fs_reg::fs_reg(enum register_file file, int reg)
195 this->type = BRW_REGISTER_TYPE_F;
198 /** Fixed HW reg constructor. */
199 fs_reg::fs_reg(enum register_file file, int reg, uint32_t type)
207 /** Automatic reg constructor. */
208 fs_reg::fs_reg(class fs_visitor *v, const struct glsl_type *type)
213 this->reg = v->virtual_grf_alloc(v->type_size(type));
214 this->reg_offset = 0;
215 this->type = brw_type_for_base_type(type);
219 fs_visitor::variable_storage(ir_variable *var)
221 return (fs_reg *)hash_table_find(this->variable_ht, var);
225 import_uniforms_callback(const void *key,
229 struct hash_table *dst_ht = (struct hash_table *)closure;
230 const fs_reg *reg = (const fs_reg *)data;
232 if (reg->file != UNIFORM)
235 hash_table_insert(dst_ht, data, key);
238 /* For 16-wide, we need to follow from the uniform setup of 8-wide dispatch.
239 * This brings in those uniform definitions
242 fs_visitor::import_uniforms(fs_visitor *v)
244 hash_table_call_foreach(v->variable_ht,
245 import_uniforms_callback,
247 this->params_remap = v->params_remap;
250 /* Our support for uniforms is piggy-backed on the struct
251 * gl_fragment_program, because that's where the values actually
252 * get stored, rather than in some global gl_shader_program uniform
256 fs_visitor::setup_uniform_values(int loc, const glsl_type *type)
258 unsigned int offset = 0;
260 if (type->is_matrix()) {
261 const glsl_type *column = glsl_type::get_instance(GLSL_TYPE_FLOAT,
262 type->vector_elements,
265 for (unsigned int i = 0; i < type->matrix_columns; i++) {
266 offset += setup_uniform_values(loc + offset, column);
272 switch (type->base_type) {
273 case GLSL_TYPE_FLOAT:
277 for (unsigned int i = 0; i < type->vector_elements; i++) {
278 unsigned int param = c->prog_data.nr_params++;
280 assert(param < ARRAY_SIZE(c->prog_data.param));
282 if (ctx->Const.NativeIntegers) {
283 c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
285 switch (type->base_type) {
286 case GLSL_TYPE_FLOAT:
287 c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
290 c->prog_data.param_convert[param] = PARAM_CONVERT_F2U;
293 c->prog_data.param_convert[param] = PARAM_CONVERT_F2I;
296 c->prog_data.param_convert[param] = PARAM_CONVERT_F2B;
299 assert(!"not reached");
300 c->prog_data.param_convert[param] = PARAM_NO_CONVERT;
304 this->param_index[param] = loc;
305 this->param_offset[param] = i;
309 case GLSL_TYPE_STRUCT:
310 for (unsigned int i = 0; i < type->length; i++) {
311 offset += setup_uniform_values(loc + offset,
312 type->fields.structure[i].type);
316 case GLSL_TYPE_ARRAY:
317 for (unsigned int i = 0; i < type->length; i++) {
318 offset += setup_uniform_values(loc + offset, type->fields.array);
322 case GLSL_TYPE_SAMPLER:
323 /* The sampler takes up a slot, but we don't use any values from it. */
327 assert(!"not reached");
333 /* Our support for builtin uniforms is even scarier than non-builtin.
334 * It sits on top of the PROG_STATE_VAR parameters that are
335 * automatically updated from GL context state.
338 fs_visitor::setup_builtin_uniform_values(ir_variable *ir)
340 const ir_state_slot *const slots = ir->state_slots;
341 assert(ir->state_slots != NULL);
343 for (unsigned int i = 0; i < ir->num_state_slots; i++) {
344 /* This state reference has already been setup by ir_to_mesa, but we'll
345 * get the same index back here.
347 int index = _mesa_add_state_reference(this->fp->Base.Parameters,
348 (gl_state_index *)slots[i].tokens);
350 /* Add each of the unique swizzles of the element as a parameter.
351 * This'll end up matching the expected layout of the
352 * array/matrix/structure we're trying to fill in.
355 for (unsigned int j = 0; j < 4; j++) {
356 int swiz = GET_SWZ(slots[i].swizzle, j);
357 if (swiz == last_swiz)
361 c->prog_data.param_convert[c->prog_data.nr_params] =
363 this->param_index[c->prog_data.nr_params] = index;
364 this->param_offset[c->prog_data.nr_params] = swiz;
365 c->prog_data.nr_params++;
371 fs_visitor::emit_fragcoord_interpolation(ir_variable *ir)
373 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
375 bool flip = !ir->origin_upper_left ^ c->key.render_to_fbo;
378 if (ir->pixel_center_integer) {
379 emit(BRW_OPCODE_MOV, wpos, this->pixel_x);
381 emit(BRW_OPCODE_ADD, wpos, this->pixel_x, fs_reg(0.5f));
386 if (!flip && ir->pixel_center_integer) {
387 emit(BRW_OPCODE_MOV, wpos, this->pixel_y);
389 fs_reg pixel_y = this->pixel_y;
390 float offset = (ir->pixel_center_integer ? 0.0 : 0.5);
393 pixel_y.negate = true;
394 offset += c->key.drawable_height - 1.0;
397 emit(BRW_OPCODE_ADD, wpos, pixel_y, fs_reg(offset));
402 if (intel->gen >= 6) {
403 emit(BRW_OPCODE_MOV, wpos,
404 fs_reg(brw_vec8_grf(c->source_depth_reg, 0)));
406 emit(FS_OPCODE_LINTERP, wpos, this->delta_x, this->delta_y,
407 interp_reg(FRAG_ATTRIB_WPOS, 2));
411 /* gl_FragCoord.w: Already set up in emit_interpolation */
412 emit(BRW_OPCODE_MOV, wpos, this->wpos_w);
418 fs_visitor::emit_general_interpolation(ir_variable *ir)
420 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
421 /* Interpolation is always in floating point regs. */
422 reg->type = BRW_REGISTER_TYPE_F;
425 unsigned int array_elements;
426 const glsl_type *type;
428 if (ir->type->is_array()) {
429 array_elements = ir->type->length;
430 if (array_elements == 0) {
431 fail("dereferenced array '%s' has length 0\n", ir->name);
433 type = ir->type->fields.array;
439 int location = ir->location;
440 for (unsigned int i = 0; i < array_elements; i++) {
441 for (unsigned int j = 0; j < type->matrix_columns; j++) {
442 if (urb_setup[location] == -1) {
443 /* If there's no incoming setup data for this slot, don't
444 * emit interpolation for it.
446 attr.reg_offset += type->vector_elements;
452 location == FRAG_ATTRIB_COL0 || location == FRAG_ATTRIB_COL1;
454 if (c->key.flat_shade && is_gl_Color) {
455 /* Constant interpolation (flat shading) case. The SF has
456 * handed us defined values in only the constant offset
457 * field of the setup reg.
459 for (unsigned int k = 0; k < type->vector_elements; k++) {
460 struct brw_reg interp = interp_reg(location, k);
461 interp = suboffset(interp, 3);
462 emit(FS_OPCODE_CINTERP, attr, fs_reg(interp));
466 /* Perspective interpolation case. */
467 for (unsigned int k = 0; k < type->vector_elements; k++) {
468 /* FINISHME: At some point we probably want to push
469 * this farther by giving similar treatment to the
470 * other potentially constant components of the
471 * attribute, as well as making brw_vs_constval.c
472 * handle varyings other than gl_TexCoord.
474 if (location >= FRAG_ATTRIB_TEX0 &&
475 location <= FRAG_ATTRIB_TEX7 &&
476 k == 3 && !(c->key.proj_attrib_mask & (1 << location))) {
477 emit(BRW_OPCODE_MOV, attr, fs_reg(1.0f));
479 struct brw_reg interp = interp_reg(location, k);
480 emit(FS_OPCODE_LINTERP, attr,
481 this->delta_x, this->delta_y, fs_reg(interp));
486 if (intel->gen < 6) {
487 attr.reg_offset -= type->vector_elements;
488 for (unsigned int k = 0; k < type->vector_elements; k++) {
489 emit(BRW_OPCODE_MUL, attr, attr, this->pixel_w);
502 fs_visitor::emit_frontfacing_interpolation(ir_variable *ir)
504 fs_reg *reg = new(this->mem_ctx) fs_reg(this, ir->type);
506 /* The frontfacing comes in as a bit in the thread payload. */
507 if (intel->gen >= 6) {
508 emit(BRW_OPCODE_ASR, *reg,
509 fs_reg(retype(brw_vec1_grf(0, 0), BRW_REGISTER_TYPE_D)),
511 emit(BRW_OPCODE_NOT, *reg, *reg);
512 emit(BRW_OPCODE_AND, *reg, *reg, fs_reg(1));
514 struct brw_reg r1_6ud = retype(brw_vec1_grf(1, 6), BRW_REGISTER_TYPE_UD);
515 /* bit 31 is "primitive is back face", so checking < (1 << 31) gives
518 fs_inst *inst = emit(BRW_OPCODE_CMP, *reg,
521 inst->conditional_mod = BRW_CONDITIONAL_L;
522 emit(BRW_OPCODE_AND, *reg, *reg, fs_reg(1u));
529 fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src)
532 case SHADER_OPCODE_RCP:
533 case SHADER_OPCODE_RSQ:
534 case SHADER_OPCODE_SQRT:
535 case SHADER_OPCODE_EXP2:
536 case SHADER_OPCODE_LOG2:
537 case SHADER_OPCODE_SIN:
538 case SHADER_OPCODE_COS:
541 assert(!"not reached: bad math opcode");
545 /* Can't do hstride == 0 args to gen6 math, so expand it out. We
546 * might be able to do better by doing execsize = 1 math and then
547 * expanding that result out, but we would need to be careful with
550 * The hardware ignores source modifiers (negate and abs) on math
551 * instructions, so we also move to a temp to set those up.
553 if (intel->gen >= 6 && (src.file == UNIFORM ||
556 fs_reg expanded = fs_reg(this, glsl_type::float_type);
557 emit(BRW_OPCODE_MOV, expanded, src);
561 fs_inst *inst = emit(opcode, dst, src);
563 if (intel->gen < 6) {
565 inst->mlen = c->dispatch_width / 8;
572 fs_visitor::emit_math(enum opcode opcode, fs_reg dst, fs_reg src0, fs_reg src1)
577 assert(opcode == SHADER_OPCODE_POW);
579 if (intel->gen >= 6) {
580 /* Can't do hstride == 0 args to gen6 math, so expand it out.
582 * The hardware ignores source modifiers (negate and abs) on math
583 * instructions, so we also move to a temp to set those up.
585 if (src0.file == UNIFORM || src0.abs || src0.negate) {
586 fs_reg expanded = fs_reg(this, glsl_type::float_type);
587 emit(BRW_OPCODE_MOV, expanded, src0);
591 if (src1.file == UNIFORM || src1.abs || src1.negate) {
592 fs_reg expanded = fs_reg(this, glsl_type::float_type);
593 emit(BRW_OPCODE_MOV, expanded, src1);
597 inst = emit(opcode, dst, src0, src1);
599 emit(BRW_OPCODE_MOV, fs_reg(MRF, base_mrf + 1), src1);
600 inst = emit(opcode, dst, src0, reg_null_f);
602 inst->base_mrf = base_mrf;
603 inst->mlen = 2 * c->dispatch_width / 8;
609 * To be called after the last _mesa_add_state_reference() call, to
610 * set up prog_data.param[] for assign_curb_setup() and
611 * setup_pull_constants().
614 fs_visitor::setup_paramvalues_refs()
616 if (c->dispatch_width != 8)
619 /* Set up the pointers to ParamValues now that that array is finalized. */
620 for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
621 c->prog_data.param[i] =
622 (const float *)fp->Base.Parameters->ParameterValues[this->param_index[i]] +
623 this->param_offset[i];
628 fs_visitor::assign_curb_setup()
630 c->prog_data.curb_read_length = ALIGN(c->prog_data.nr_params, 8) / 8;
631 if (c->dispatch_width == 8) {
632 c->prog_data.first_curbe_grf = c->nr_payload_regs;
634 c->prog_data.first_curbe_grf_16 = c->nr_payload_regs;
637 /* Map the offsets in the UNIFORM file to fixed HW regs. */
638 foreach_list(node, &this->instructions) {
639 fs_inst *inst = (fs_inst *)node;
641 for (unsigned int i = 0; i < 3; i++) {
642 if (inst->src[i].file == UNIFORM) {
643 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
644 struct brw_reg brw_reg = brw_vec1_grf(c->nr_payload_regs +
648 inst->src[i].file = FIXED_HW_REG;
649 inst->src[i].fixed_hw_reg = retype(brw_reg, inst->src[i].type);
656 fs_visitor::calculate_urb_setup()
658 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
663 /* Figure out where each of the incoming setup attributes lands. */
664 if (intel->gen >= 6) {
665 for (unsigned int i = 0; i < FRAG_ATTRIB_MAX; i++) {
666 if (fp->Base.InputsRead & BITFIELD64_BIT(i)) {
667 urb_setup[i] = urb_next++;
671 /* FINISHME: The sf doesn't map VS->FS inputs for us very well. */
672 for (unsigned int i = 0; i < VERT_RESULT_MAX; i++) {
673 if (c->key.vp_outputs_written & BITFIELD64_BIT(i)) {
676 if (i >= VERT_RESULT_VAR0)
677 fp_index = i - (VERT_RESULT_VAR0 - FRAG_ATTRIB_VAR0);
678 else if (i <= VERT_RESULT_TEX7)
684 urb_setup[fp_index] = urb_next++;
689 /* Each attribute is 4 setup channels, each of which is half a reg. */
690 c->prog_data.urb_read_length = urb_next * 2;
694 fs_visitor::assign_urb_setup()
696 int urb_start = c->nr_payload_regs + c->prog_data.curb_read_length;
698 /* Offset all the urb_setup[] index by the actual position of the
699 * setup regs, now that the location of the constants has been chosen.
701 foreach_list(node, &this->instructions) {
702 fs_inst *inst = (fs_inst *)node;
704 if (inst->opcode == FS_OPCODE_LINTERP) {
705 assert(inst->src[2].file == FIXED_HW_REG);
706 inst->src[2].fixed_hw_reg.nr += urb_start;
709 if (inst->opcode == FS_OPCODE_CINTERP) {
710 assert(inst->src[0].file == FIXED_HW_REG);
711 inst->src[0].fixed_hw_reg.nr += urb_start;
715 this->first_non_payload_grf = urb_start + c->prog_data.urb_read_length;
719 * Split large virtual GRFs into separate components if we can.
721 * This is mostly duplicated with what brw_fs_vector_splitting does,
722 * but that's really conservative because it's afraid of doing
723 * splitting that doesn't result in real progress after the rest of
724 * the optimization phases, which would cause infinite looping in
725 * optimization. We can do it once here, safely. This also has the
726 * opportunity to split interpolated values, or maybe even uniforms,
727 * which we don't have at the IR level.
729 * We want to split, because virtual GRFs are what we register
730 * allocate and spill (due to contiguousness requirements for some
731 * instructions), and they're what we naturally generate in the
732 * codegen process, but most virtual GRFs don't actually need to be
733 * contiguous sets of GRFs. If we split, we'll end up with reduced
734 * live intervals and better dead code elimination and coalescing.
737 fs_visitor::split_virtual_grfs()
739 int num_vars = this->virtual_grf_next;
740 bool split_grf[num_vars];
741 int new_virtual_grf[num_vars];
743 /* Try to split anything > 0 sized. */
744 for (int i = 0; i < num_vars; i++) {
745 if (this->virtual_grf_sizes[i] != 1)
748 split_grf[i] = false;
752 /* PLN opcodes rely on the delta_xy being contiguous. */
753 split_grf[this->delta_x.reg] = false;
756 foreach_list(node, &this->instructions) {
757 fs_inst *inst = (fs_inst *)node;
759 /* Texturing produces 4 contiguous registers, so no splitting. */
760 if (inst->is_tex()) {
761 split_grf[inst->dst.reg] = false;
765 /* Allocate new space for split regs. Note that the virtual
766 * numbers will be contiguous.
768 for (int i = 0; i < num_vars; i++) {
770 new_virtual_grf[i] = virtual_grf_alloc(1);
771 for (int j = 2; j < this->virtual_grf_sizes[i]; j++) {
772 int reg = virtual_grf_alloc(1);
773 assert(reg == new_virtual_grf[i] + j - 1);
776 this->virtual_grf_sizes[i] = 1;
780 foreach_list(node, &this->instructions) {
781 fs_inst *inst = (fs_inst *)node;
783 if (inst->dst.file == GRF &&
784 split_grf[inst->dst.reg] &&
785 inst->dst.reg_offset != 0) {
786 inst->dst.reg = (new_virtual_grf[inst->dst.reg] +
787 inst->dst.reg_offset - 1);
788 inst->dst.reg_offset = 0;
790 for (int i = 0; i < 3; i++) {
791 if (inst->src[i].file == GRF &&
792 split_grf[inst->src[i].reg] &&
793 inst->src[i].reg_offset != 0) {
794 inst->src[i].reg = (new_virtual_grf[inst->src[i].reg] +
795 inst->src[i].reg_offset - 1);
796 inst->src[i].reg_offset = 0;
800 this->live_intervals_valid = false;
804 fs_visitor::remove_dead_constants()
806 if (c->dispatch_width == 8) {
807 this->params_remap = ralloc_array(mem_ctx, int, c->prog_data.nr_params);
809 for (unsigned int i = 0; i < c->prog_data.nr_params; i++)
810 this->params_remap[i] = -1;
812 /* Find which params are still in use. */
813 foreach_list(node, &this->instructions) {
814 fs_inst *inst = (fs_inst *)node;
816 for (int i = 0; i < 3; i++) {
817 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
819 if (inst->src[i].file != UNIFORM)
822 assert(constant_nr < (int)c->prog_data.nr_params);
824 /* For now, set this to non-negative. We'll give it the
825 * actual new number in a moment, in order to keep the
826 * register numbers nicely ordered.
828 this->params_remap[constant_nr] = 0;
832 /* Figure out what the new numbers for the params will be. At some
833 * point when we're doing uniform array access, we're going to want
834 * to keep the distinction between .reg and .reg_offset, but for
837 unsigned int new_nr_params = 0;
838 for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
839 if (this->params_remap[i] != -1) {
840 this->params_remap[i] = new_nr_params++;
844 /* Update the list of params to be uploaded to match our new numbering. */
845 for (unsigned int i = 0; i < c->prog_data.nr_params; i++) {
846 int remapped = this->params_remap[i];
851 /* We've already done setup_paramvalues_refs() so no need to worry
852 * about param_index and param_offset.
854 c->prog_data.param[remapped] = c->prog_data.param[i];
855 c->prog_data.param_convert[remapped] = c->prog_data.param_convert[i];
858 c->prog_data.nr_params = new_nr_params;
860 /* This should have been generated in the 8-wide pass already. */
861 assert(this->params_remap);
864 /* Now do the renumbering of the shader to remove unused params. */
865 foreach_list(node, &this->instructions) {
866 fs_inst *inst = (fs_inst *)node;
868 for (int i = 0; i < 3; i++) {
869 int constant_nr = inst->src[i].reg + inst->src[i].reg_offset;
871 if (inst->src[i].file != UNIFORM)
874 assert(this->params_remap[constant_nr] != -1);
875 inst->src[i].reg = this->params_remap[constant_nr];
876 inst->src[i].reg_offset = 0;
884 * Choose accesses from the UNIFORM file to demote to using the pull
887 * We allow a fragment shader to have more than the specified minimum
888 * maximum number of fragment shader uniform components (64). If
889 * there are too many of these, they'd fill up all of register space.
890 * So, this will push some of them out to the pull constant buffer and
891 * update the program to load them.
894 fs_visitor::setup_pull_constants()
896 /* Only allow 16 registers (128 uniform components) as push constants. */
897 unsigned int max_uniform_components = 16 * 8;
898 if (c->prog_data.nr_params <= max_uniform_components)
901 if (c->dispatch_width == 16) {
902 fail("Pull constants not supported in 16-wide\n");
906 /* Just demote the end of the list. We could probably do better
907 * here, demoting things that are rarely used in the program first.
909 int pull_uniform_base = max_uniform_components;
910 int pull_uniform_count = c->prog_data.nr_params - pull_uniform_base;
912 foreach_list(node, &this->instructions) {
913 fs_inst *inst = (fs_inst *)node;
915 for (int i = 0; i < 3; i++) {
916 if (inst->src[i].file != UNIFORM)
919 int uniform_nr = inst->src[i].reg + inst->src[i].reg_offset;
920 if (uniform_nr < pull_uniform_base)
923 fs_reg dst = fs_reg(this, glsl_type::float_type);
924 fs_inst *pull = new(mem_ctx) fs_inst(FS_OPCODE_PULL_CONSTANT_LOAD,
926 pull->offset = ((uniform_nr - pull_uniform_base) * 4) & ~15;
928 pull->annotation = inst->annotation;
932 inst->insert_before(pull);
934 inst->src[i].file = GRF;
935 inst->src[i].reg = dst.reg;
936 inst->src[i].reg_offset = 0;
937 inst->src[i].smear = (uniform_nr - pull_uniform_base) & 3;
941 for (int i = 0; i < pull_uniform_count; i++) {
942 c->prog_data.pull_param[i] = c->prog_data.param[pull_uniform_base + i];
943 c->prog_data.pull_param_convert[i] =
944 c->prog_data.param_convert[pull_uniform_base + i];
946 c->prog_data.nr_params -= pull_uniform_count;
947 c->prog_data.nr_pull_params = pull_uniform_count;
951 fs_visitor::calculate_live_intervals()
953 int num_vars = this->virtual_grf_next;
954 int *def = ralloc_array(mem_ctx, int, num_vars);
955 int *use = ralloc_array(mem_ctx, int, num_vars);
959 if (this->live_intervals_valid)
962 for (int i = 0; i < num_vars; i++) {
963 def[i] = MAX_INSTRUCTION;
968 foreach_list(node, &this->instructions) {
969 fs_inst *inst = (fs_inst *)node;
971 if (inst->opcode == BRW_OPCODE_DO) {
972 if (loop_depth++ == 0)
974 } else if (inst->opcode == BRW_OPCODE_WHILE) {
977 if (loop_depth == 0) {
978 /* Patches up the use of vars marked for being live across
981 for (int i = 0; i < num_vars; i++) {
982 if (use[i] == loop_start) {
988 for (unsigned int i = 0; i < 3; i++) {
989 if (inst->src[i].file == GRF) {
990 int reg = inst->src[i].reg;
995 def[reg] = MIN2(loop_start, def[reg]);
996 use[reg] = loop_start;
998 /* Nobody else is going to go smash our start to
999 * later in the loop now, because def[reg] now
1000 * points before the bb header.
1005 if (inst->dst.file == GRF) {
1006 int reg = inst->dst.reg;
1009 def[reg] = MIN2(def[reg], ip);
1011 def[reg] = MIN2(def[reg], loop_start);
1019 ralloc_free(this->virtual_grf_def);
1020 ralloc_free(this->virtual_grf_use);
1021 this->virtual_grf_def = def;
1022 this->virtual_grf_use = use;
1024 this->live_intervals_valid = true;
1028 * Attempts to move immediate constants into the immediate
1029 * constant slot of following instructions.
1031 * Immediate constants are a bit tricky -- they have to be in the last
1032 * operand slot, you can't do abs/negate on them,
1036 fs_visitor::propagate_constants()
1038 bool progress = false;
1040 calculate_live_intervals();
1042 foreach_list(node, &this->instructions) {
1043 fs_inst *inst = (fs_inst *)node;
1045 if (inst->opcode != BRW_OPCODE_MOV ||
1047 inst->dst.file != GRF || inst->src[0].file != IMM ||
1048 inst->dst.type != inst->src[0].type ||
1049 (c->dispatch_width == 16 &&
1050 (inst->force_uncompressed || inst->force_sechalf)))
1053 /* Don't bother with cases where we should have had the
1054 * operation on the constant folded in GLSL already.
1059 /* Found a move of a constant to a GRF. Find anything else using the GRF
1060 * before it's written, and replace it with the constant if we can.
1062 for (fs_inst *scan_inst = (fs_inst *)inst->next;
1063 !scan_inst->is_tail_sentinel();
1064 scan_inst = (fs_inst *)scan_inst->next) {
1065 if (scan_inst->opcode == BRW_OPCODE_DO ||
1066 scan_inst->opcode == BRW_OPCODE_WHILE ||
1067 scan_inst->opcode == BRW_OPCODE_ELSE ||
1068 scan_inst->opcode == BRW_OPCODE_ENDIF) {
1072 for (int i = 2; i >= 0; i--) {
1073 if (scan_inst->src[i].file != GRF ||
1074 scan_inst->src[i].reg != inst->dst.reg ||
1075 scan_inst->src[i].reg_offset != inst->dst.reg_offset)
1078 /* Don't bother with cases where we should have had the
1079 * operation on the constant folded in GLSL already.
1081 if (scan_inst->src[i].negate || scan_inst->src[i].abs)
1084 switch (scan_inst->opcode) {
1085 case BRW_OPCODE_MOV:
1086 scan_inst->src[i] = inst->src[0];
1090 case BRW_OPCODE_MUL:
1091 case BRW_OPCODE_ADD:
1093 scan_inst->src[i] = inst->src[0];
1095 } else if (i == 0 && scan_inst->src[1].file != IMM) {
1096 /* Fit this constant in by commuting the operands */
1097 scan_inst->src[0] = scan_inst->src[1];
1098 scan_inst->src[1] = inst->src[0];
1103 case BRW_OPCODE_CMP:
1105 scan_inst->src[i] = inst->src[0];
1107 } else if (i == 0 && scan_inst->src[1].file != IMM) {
1110 new_cmod = brw_swap_cmod(scan_inst->conditional_mod);
1111 if (new_cmod != ~0u) {
1112 /* Fit this constant in by swapping the operands and
1115 scan_inst->src[0] = scan_inst->src[1];
1116 scan_inst->src[1] = inst->src[0];
1117 scan_inst->conditional_mod = new_cmod;
1123 case BRW_OPCODE_SEL:
1125 scan_inst->src[i] = inst->src[0];
1127 } else if (i == 0 && scan_inst->src[1].file != IMM) {
1128 scan_inst->src[0] = scan_inst->src[1];
1129 scan_inst->src[1] = inst->src[0];
1131 /* If this was predicated, flipping operands means
1132 * we also need to flip the predicate.
1134 if (scan_inst->conditional_mod == BRW_CONDITIONAL_NONE) {
1135 scan_inst->predicate_inverse =
1136 !scan_inst->predicate_inverse;
1142 case SHADER_OPCODE_RCP:
1143 /* The hardware doesn't do math on immediate values
1144 * (because why are you doing that, seriously?), but
1145 * the correct answer is to just constant fold it
1149 if (inst->src[0].imm.f != 0.0f) {
1150 scan_inst->opcode = BRW_OPCODE_MOV;
1151 scan_inst->src[0] = inst->src[0];
1152 scan_inst->src[0].imm.f = 1.0f / scan_inst->src[0].imm.f;
1162 if (scan_inst->dst.file == GRF &&
1163 scan_inst->dst.reg == inst->dst.reg &&
1164 (scan_inst->dst.reg_offset == inst->dst.reg_offset ||
1165 scan_inst->is_tex())) {
1172 this->live_intervals_valid = false;
1179 * Attempts to move immediate constants into the immediate
1180 * constant slot of following instructions.
1182 * Immediate constants are a bit tricky -- they have to be in the last
1183 * operand slot, you can't do abs/negate on them,
1187 fs_visitor::opt_algebraic()
1189 bool progress = false;
1191 calculate_live_intervals();
1193 foreach_list(node, &this->instructions) {
1194 fs_inst *inst = (fs_inst *)node;
1196 switch (inst->opcode) {
1197 case BRW_OPCODE_MUL:
1198 if (inst->src[1].file != IMM)
1202 if (inst->src[1].type == BRW_REGISTER_TYPE_F &&
1203 inst->src[1].imm.f == 1.0) {
1204 inst->opcode = BRW_OPCODE_MOV;
1205 inst->src[1] = reg_undef;
1220 * Must be called after calculate_live_intervales() to remove unused
1221 * writes to registers -- register allocation will fail otherwise
1222 * because something deffed but not used won't be considered to
1223 * interfere with other regs.
1226 fs_visitor::dead_code_eliminate()
1228 bool progress = false;
1231 calculate_live_intervals();
1233 foreach_list_safe(node, &this->instructions) {
1234 fs_inst *inst = (fs_inst *)node;
1236 if (inst->dst.file == GRF && this->virtual_grf_use[inst->dst.reg] <= pc) {
1245 live_intervals_valid = false;
1251 fs_visitor::register_coalesce()
1253 bool progress = false;
1257 foreach_list_safe(node, &this->instructions) {
1258 fs_inst *inst = (fs_inst *)node;
1260 /* Make sure that we dominate the instructions we're going to
1261 * scan for interfering with our coalescing, or we won't have
1262 * scanned enough to see if anything interferes with our
1263 * coalescing. We don't dominate the following instructions if
1264 * we're in a loop or an if block.
1266 switch (inst->opcode) {
1270 case BRW_OPCODE_WHILE:
1276 case BRW_OPCODE_ENDIF:
1282 if (loop_depth || if_depth)
1285 if (inst->opcode != BRW_OPCODE_MOV ||
1288 inst->dst.file != GRF || (inst->src[0].file != GRF &&
1289 inst->src[0].file != UNIFORM)||
1290 inst->dst.type != inst->src[0].type)
1293 bool has_source_modifiers = inst->src[0].abs || inst->src[0].negate;
1295 /* Found a move of a GRF to a GRF. Let's see if we can coalesce
1296 * them: check for no writes to either one until the exit of the
1299 bool interfered = false;
1301 for (fs_inst *scan_inst = (fs_inst *)inst->next;
1302 !scan_inst->is_tail_sentinel();
1303 scan_inst = (fs_inst *)scan_inst->next) {
1304 if (scan_inst->dst.file == GRF) {
1305 if (scan_inst->dst.reg == inst->dst.reg &&
1306 (scan_inst->dst.reg_offset == inst->dst.reg_offset ||
1307 scan_inst->is_tex())) {
1311 if (inst->src[0].file == GRF &&
1312 scan_inst->dst.reg == inst->src[0].reg &&
1313 (scan_inst->dst.reg_offset == inst->src[0].reg_offset ||
1314 scan_inst->is_tex())) {
1320 /* The gen6 MATH instruction can't handle source modifiers or
1321 * unusual register regions, so avoid coalescing those for
1322 * now. We should do something more specific.
1324 if (intel->gen >= 6 &&
1325 scan_inst->is_math() &&
1326 (has_source_modifiers || inst->src[0].file == UNIFORM)) {
1335 /* Rewrite the later usage to point at the source of the move to
1338 for (fs_inst *scan_inst = inst;
1339 !scan_inst->is_tail_sentinel();
1340 scan_inst = (fs_inst *)scan_inst->next) {
1341 for (int i = 0; i < 3; i++) {
1342 if (scan_inst->src[i].file == GRF &&
1343 scan_inst->src[i].reg == inst->dst.reg &&
1344 scan_inst->src[i].reg_offset == inst->dst.reg_offset) {
1345 fs_reg new_src = inst->src[0];
1346 new_src.negate ^= scan_inst->src[i].negate;
1347 new_src.abs |= scan_inst->src[i].abs;
1348 scan_inst->src[i] = new_src;
1358 live_intervals_valid = false;
1365 fs_visitor::compute_to_mrf()
1367 bool progress = false;
1370 calculate_live_intervals();
1372 foreach_list_safe(node, &this->instructions) {
1373 fs_inst *inst = (fs_inst *)node;
1378 if (inst->opcode != BRW_OPCODE_MOV ||
1380 inst->dst.file != MRF || inst->src[0].file != GRF ||
1381 inst->dst.type != inst->src[0].type ||
1382 inst->src[0].abs || inst->src[0].negate || inst->src[0].smear != -1)
1385 /* Work out which hardware MRF registers are written by this
1388 int mrf_low = inst->dst.reg & ~BRW_MRF_COMPR4;
1390 if (inst->dst.reg & BRW_MRF_COMPR4) {
1391 mrf_high = mrf_low + 4;
1392 } else if (c->dispatch_width == 16 &&
1393 (!inst->force_uncompressed && !inst->force_sechalf)) {
1394 mrf_high = mrf_low + 1;
1399 /* Can't compute-to-MRF this GRF if someone else was going to
1402 if (this->virtual_grf_use[inst->src[0].reg] > ip)
1405 /* Found a move of a GRF to a MRF. Let's see if we can go
1406 * rewrite the thing that made this GRF to write into the MRF.
1409 for (scan_inst = (fs_inst *)inst->prev;
1410 scan_inst->prev != NULL;
1411 scan_inst = (fs_inst *)scan_inst->prev) {
1412 if (scan_inst->dst.file == GRF &&
1413 scan_inst->dst.reg == inst->src[0].reg) {
1414 /* Found the last thing to write our reg we want to turn
1415 * into a compute-to-MRF.
1418 if (scan_inst->is_tex()) {
1419 /* texturing writes several continuous regs, so we can't
1420 * compute-to-mrf that.
1425 /* If it's predicated, it (probably) didn't populate all
1426 * the channels. We might be able to rewrite everything
1427 * that writes that reg, but it would require smarter
1428 * tracking to delay the rewriting until complete success.
1430 if (scan_inst->predicated)
1433 /* If it's half of register setup and not the same half as
1434 * our MOV we're trying to remove, bail for now.
1436 if (scan_inst->force_uncompressed != inst->force_uncompressed ||
1437 scan_inst->force_sechalf != inst->force_sechalf) {
1441 /* SEND instructions can't have MRF as a destination. */
1442 if (scan_inst->mlen)
1445 if (intel->gen >= 6) {
1446 /* gen6 math instructions must have the destination be
1447 * GRF, so no compute-to-MRF for them.
1449 if (scan_inst->is_math()) {
1454 if (scan_inst->dst.reg_offset == inst->src[0].reg_offset) {
1455 /* Found the creator of our MRF's source value. */
1456 scan_inst->dst.file = MRF;
1457 scan_inst->dst.reg = inst->dst.reg;
1458 scan_inst->saturate |= inst->saturate;
1465 /* We don't handle flow control here. Most computation of
1466 * values that end up in MRFs are shortly before the MRF
1469 if (scan_inst->opcode == BRW_OPCODE_DO ||
1470 scan_inst->opcode == BRW_OPCODE_WHILE ||
1471 scan_inst->opcode == BRW_OPCODE_ELSE ||
1472 scan_inst->opcode == BRW_OPCODE_ENDIF) {
1476 /* You can't read from an MRF, so if someone else reads our
1477 * MRF's source GRF that we wanted to rewrite, that stops us.
1479 bool interfered = false;
1480 for (int i = 0; i < 3; i++) {
1481 if (scan_inst->src[i].file == GRF &&
1482 scan_inst->src[i].reg == inst->src[0].reg &&
1483 scan_inst->src[i].reg_offset == inst->src[0].reg_offset) {
1490 if (scan_inst->dst.file == MRF) {
1491 /* If somebody else writes our MRF here, we can't
1492 * compute-to-MRF before that.
1494 int scan_mrf_low = scan_inst->dst.reg & ~BRW_MRF_COMPR4;
1497 if (scan_inst->dst.reg & BRW_MRF_COMPR4) {
1498 scan_mrf_high = scan_mrf_low + 4;
1499 } else if (c->dispatch_width == 16 &&
1500 (!scan_inst->force_uncompressed &&
1501 !scan_inst->force_sechalf)) {
1502 scan_mrf_high = scan_mrf_low + 1;
1504 scan_mrf_high = scan_mrf_low;
1507 if (mrf_low == scan_mrf_low ||
1508 mrf_low == scan_mrf_high ||
1509 mrf_high == scan_mrf_low ||
1510 mrf_high == scan_mrf_high) {
1515 if (scan_inst->mlen > 0) {
1516 /* Found a SEND instruction, which means that there are
1517 * live values in MRFs from base_mrf to base_mrf +
1518 * scan_inst->mlen - 1. Don't go pushing our MRF write up
1521 if (mrf_low >= scan_inst->base_mrf &&
1522 mrf_low < scan_inst->base_mrf + scan_inst->mlen) {
1525 if (mrf_high >= scan_inst->base_mrf &&
1526 mrf_high < scan_inst->base_mrf + scan_inst->mlen) {
1537 * Walks through basic blocks, locking for repeated MRF writes and
1538 * removing the later ones.
1541 fs_visitor::remove_duplicate_mrf_writes()
1543 fs_inst *last_mrf_move[16];
1544 bool progress = false;
1546 /* Need to update the MRF tracking for compressed instructions. */
1547 if (c->dispatch_width == 16)
1550 memset(last_mrf_move, 0, sizeof(last_mrf_move));
1552 foreach_list_safe(node, &this->instructions) {
1553 fs_inst *inst = (fs_inst *)node;
1555 switch (inst->opcode) {
1557 case BRW_OPCODE_WHILE:
1559 case BRW_OPCODE_ELSE:
1560 case BRW_OPCODE_ENDIF:
1561 memset(last_mrf_move, 0, sizeof(last_mrf_move));
1567 if (inst->opcode == BRW_OPCODE_MOV &&
1568 inst->dst.file == MRF) {
1569 fs_inst *prev_inst = last_mrf_move[inst->dst.reg];
1570 if (prev_inst && inst->equals(prev_inst)) {
1577 /* Clear out the last-write records for MRFs that were overwritten. */
1578 if (inst->dst.file == MRF) {
1579 last_mrf_move[inst->dst.reg] = NULL;
1582 if (inst->mlen > 0) {
1583 /* Found a SEND instruction, which will include two or fewer
1584 * implied MRF writes. We could do better here.
1586 for (int i = 0; i < implied_mrf_writes(inst); i++) {
1587 last_mrf_move[inst->base_mrf + i] = NULL;
1591 /* Clear out any MRF move records whose sources got overwritten. */
1592 if (inst->dst.file == GRF) {
1593 for (unsigned int i = 0; i < Elements(last_mrf_move); i++) {
1594 if (last_mrf_move[i] &&
1595 last_mrf_move[i]->src[0].reg == inst->dst.reg) {
1596 last_mrf_move[i] = NULL;
1601 if (inst->opcode == BRW_OPCODE_MOV &&
1602 inst->dst.file == MRF &&
1603 inst->src[0].file == GRF &&
1604 !inst->predicated) {
1605 last_mrf_move[inst->dst.reg] = inst;
1613 fs_visitor::virtual_grf_interferes(int a, int b)
1615 int start = MAX2(this->virtual_grf_def[a], this->virtual_grf_def[b]);
1616 int end = MIN2(this->virtual_grf_use[a], this->virtual_grf_use[b]);
1618 /* We can't handle dead register writes here, without iterating
1619 * over the whole instruction stream to find every single dead
1620 * write to that register to compare to the live interval of the
1621 * other register. Just assert that dead_code_eliminate() has been
1624 assert((this->virtual_grf_use[a] != -1 ||
1625 this->virtual_grf_def[a] == MAX_INSTRUCTION) &&
1626 (this->virtual_grf_use[b] != -1 ||
1627 this->virtual_grf_def[b] == MAX_INSTRUCTION));
1629 /* If the register is used to store 16 values of less than float
1630 * size (only the case for pixel_[xy]), then we can't allocate
1631 * another dword-sized thing to that register that would be used in
1632 * the same instruction. This is because when the GPU decodes (for
1635 * (declare (in ) vec4 gl_FragCoord@0x97766a0)
1636 * add(16) g6<1>F g6<8,8,1>UW 0.5F { align1 compr };
1638 * it's actually processed as:
1639 * add(8) g6<1>F g6<8,8,1>UW 0.5F { align1 };
1640 * add(8) g7<1>F g6.8<8,8,1>UW 0.5F { align1 sechalf };
1642 * so our second half values in g6 got overwritten in the first
1645 if (c->dispatch_width == 16 && (this->pixel_x.reg == a ||
1646 this->pixel_x.reg == b ||
1647 this->pixel_y.reg == a ||
1648 this->pixel_y.reg == b)) {
1649 return start <= end;
1658 uint32_t prog_offset_16 = 0;
1659 uint32_t orig_nr_params = c->prog_data.nr_params;
1661 brw_wm_payload_setup(brw, c);
1663 if (c->dispatch_width == 16) {
1664 /* align to 64 byte boundary. */
1665 while ((c->func.nr_insn * sizeof(struct brw_instruction)) % 64) {
1669 /* Save off the start of this 16-wide program in case we succeed. */
1670 prog_offset_16 = c->func.nr_insn * sizeof(struct brw_instruction);
1672 brw_set_compression_control(p, BRW_COMPRESSION_COMPRESSED);
1678 calculate_urb_setup();
1680 emit_interpolation_setup_gen4();
1682 emit_interpolation_setup_gen6();
1684 /* Generate FS IR for main(). (the visitor only descends into
1685 * functions called "main").
1687 foreach_list(node, &*shader->ir) {
1688 ir_instruction *ir = (ir_instruction *)node;
1690 this->result = reg_undef;
1698 split_virtual_grfs();
1700 setup_paramvalues_refs();
1701 setup_pull_constants();
1707 progress = remove_duplicate_mrf_writes() || progress;
1709 progress = propagate_constants() || progress;
1710 progress = opt_algebraic() || progress;
1711 progress = register_coalesce() || progress;
1712 progress = compute_to_mrf() || progress;
1713 progress = dead_code_eliminate() || progress;
1716 remove_dead_constants();
1718 schedule_instructions();
1720 assign_curb_setup();
1724 /* Debug of register spilling: Go spill everything. */
1725 int virtual_grf_count = virtual_grf_next;
1726 for (int i = 0; i < virtual_grf_count; i++) {
1732 assign_regs_trivial();
1734 while (!assign_regs()) {
1740 assert(force_uncompressed_stack == 0);
1741 assert(force_sechalf_stack == 0);
1748 if (c->dispatch_width == 8) {
1749 c->prog_data.reg_blocks = brw_register_blocks(grf_used);
1751 c->prog_data.reg_blocks_16 = brw_register_blocks(grf_used);
1752 c->prog_data.prog_offset_16 = prog_offset_16;
1754 /* Make sure we didn't try to sneak in an extra uniform */
1755 assert(orig_nr_params == c->prog_data.nr_params);
1762 brw_wm_fs_emit(struct brw_context *brw, struct brw_wm_compile *c,
1763 struct gl_shader_program *prog)
1765 struct intel_context *intel = &brw->intel;
1770 struct brw_shader *shader =
1771 (brw_shader *) prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
1775 if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
1776 printf("GLSL IR for native fragment shader %d:\n", prog->Name);
1777 _mesa_print_ir(shader->ir, NULL);
1781 /* Now the main event: Visit the shader IR and generate our FS IR for it.
1783 c->dispatch_width = 8;
1785 fs_visitor v(c, prog, shader);
1787 prog->LinkStatus = GL_FALSE;
1788 ralloc_strcat(&prog->InfoLog, v.fail_msg);
1793 if (intel->gen >= 5 && c->prog_data.nr_pull_params == 0) {
1794 c->dispatch_width = 16;
1795 fs_visitor v2(c, prog, shader);
1796 v2.import_uniforms(&v);
1800 c->prog_data.dispatch_width = 8;
1806 brw_fs_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
1808 struct brw_context *brw = brw_context(ctx);
1809 struct brw_wm_prog_key key;
1810 struct gl_fragment_program *fp = prog->FragmentProgram;
1811 struct brw_fragment_program *bfp = brw_fragment_program(fp);
1816 memset(&key, 0, sizeof(key));
1819 key.iz_lookup |= IZ_PS_KILL_ALPHATEST_BIT;
1821 if (fp->Base.OutputsWritten & BITFIELD64_BIT(FRAG_RESULT_DEPTH))
1822 key.iz_lookup |= IZ_PS_COMPUTES_DEPTH_BIT;
1824 /* Just assume depth testing. */
1825 key.iz_lookup |= IZ_DEPTH_TEST_ENABLE_BIT;
1826 key.iz_lookup |= IZ_DEPTH_WRITE_ENABLE_BIT;
1828 key.vp_outputs_written |= BITFIELD64_BIT(FRAG_ATTRIB_WPOS);
1829 for (int i = 0; i < FRAG_ATTRIB_MAX; i++) {
1832 if (!(fp->Base.InputsRead & BITFIELD64_BIT(i)))
1835 key.proj_attrib_mask |= 1 << i;
1837 if (i <= FRAG_ATTRIB_TEX7)
1839 else if (i >= FRAG_ATTRIB_VAR0)
1840 vp_index = i - FRAG_ATTRIB_VAR0 + VERT_RESULT_VAR0;
1843 key.vp_outputs_written |= BITFIELD64_BIT(vp_index);
1846 key.clamp_fragment_color = true;
1848 for (int i = 0; i < BRW_MAX_TEX_UNIT; i++) {
1849 if (fp->Base.ShadowSamplers & (1 << i))
1850 key.compare_funcs[i] = GL_LESS;
1852 /* FINISHME: depth compares might use (0,0,0,W) for example */
1853 key.tex_swizzles[i] = SWIZZLE_XYZW;
1856 if (fp->Base.InputsRead & FRAG_BIT_WPOS) {
1857 key.drawable_height = ctx->DrawBuffer->Height;
1858 key.render_to_fbo = ctx->DrawBuffer->Name != 0;
1861 key.nr_color_regions = 1;
1863 key.program_string_id = bfp->id;
1865 uint32_t old_prog_offset = brw->wm.prog_offset;
1866 struct brw_wm_prog_data *old_prog_data = brw->wm.prog_data;
1868 bool success = do_wm_prog(brw, prog, bfp, &key);
1870 brw->wm.prog_offset = old_prog_offset;
1871 brw->wm.prog_data = old_prog_data;