struct gl_shader_program *
brw_new_shader_program(struct gl_context *ctx, GLuint name)
{
- struct brw_shader_program *prog;
- prog = rzalloc(NULL, struct brw_shader_program);
+ struct gl_shader_program *prog = rzalloc(NULL, struct gl_shader_program);
if (prog) {
- prog->base.Name = name;
- _mesa_init_shader_program(ctx, &prog->base);
+ prog->Name = name;
+ _mesa_init_shader_program(ctx, prog);
}
- return &prog->base;
+ return prog;
}
/**
* what non-orthogonal state will be set, in the hope that it reflects
* the eventual NOS used, and thus allows us to produce link failures.
*/
-bool
+static bool
brw_shader_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
{
- if (!brw_fs_precompile(ctx, prog))
+ struct brw_context *brw = brw_context(ctx);
+
+ if (brw->precompile && !brw_fs_precompile(ctx, prog))
return false;
- if (!brw_vs_precompile(ctx, prog))
+ if (brw->precompile && !brw_vs_precompile(ctx, prog))
return false;
return true;
}
+static void
+brw_lower_packing_builtins(struct brw_context *brw,
+ gl_shader_type shader_type,
+ exec_list *ir)
+{
+ int ops = LOWER_PACK_SNORM_2x16
+ | LOWER_UNPACK_SNORM_2x16
+ | LOWER_PACK_UNORM_2x16
+ | LOWER_UNPACK_UNORM_2x16
+ | LOWER_PACK_SNORM_4x8
+ | LOWER_UNPACK_SNORM_4x8
+ | LOWER_PACK_UNORM_4x8
+ | LOWER_UNPACK_UNORM_4x8;
+
+ if (brw->intel.gen >= 7) {
+ /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
+ * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
+ * lowering is needed. For SOA code, the Half2x16 ops must be
+ * scalarized.
+ */
+ if (shader_type == MESA_SHADER_FRAGMENT) {
+ ops |= LOWER_PACK_HALF_2x16_TO_SPLIT
+ | LOWER_UNPACK_HALF_2x16_TO_SPLIT;
+ }
+ } else {
+ ops |= LOWER_PACK_HALF_2x16
+ | LOWER_UNPACK_HALF_2x16;
+ }
+
+ lower_packing_builtins(ir, ops);
+}
+
GLboolean
-brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
+brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
{
struct brw_context *brw = brw_context(ctx);
struct intel_context *intel = &brw->intel;
unsigned int stage;
- for (stage = 0; stage < ARRAY_SIZE(prog->_LinkedShaders); stage++) {
+ for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
struct brw_shader *shader =
- (struct brw_shader *)prog->_LinkedShaders[stage];
+ (struct brw_shader *)shProg->_LinkedShaders[stage];
+ static const GLenum targets[] = {
+ GL_VERTEX_PROGRAM_ARB,
+ GL_FRAGMENT_PROGRAM_ARB,
+ GL_GEOMETRY_PROGRAM_NV
+ };
if (!shader)
continue;
+ struct gl_program *prog =
+ ctx->Driver.NewProgram(ctx, targets[stage], shader->base.Name);
+ if (!prog)
+ return false;
+ prog->Parameters = _mesa_new_parameter_list();
+
+ if (stage == 0) {
+ struct gl_vertex_program *vp = (struct gl_vertex_program *) prog;
+ vp->UsesClipDistance = shProg->Vert.UsesClipDistance;
+ }
+
void *mem_ctx = ralloc_context(NULL);
bool progress;
shader->ir = new(shader) exec_list;
clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
+ /* lower_packing_builtins() inserts arithmetic instructions, so it
+ * must precede lower_instructions().
+ */
+ brw_lower_packing_builtins(brw, (gl_shader_type) stage, shader->ir);
do_mat_op_to_vec(shader->ir);
+ const int lrp_to_arith = (intel->gen < 6 || stage != MESA_SHADER_FRAGMENT)
+ ? LRP_TO_ARITH : 0;
lower_instructions(shader->ir,
MOD_TO_FRACT |
DIV_TO_MUL_RCP |
SUB_TO_ADD_NEG |
EXP_TO_EXP2 |
- LOG_TO_LOG2);
+ LOG_TO_LOG2 |
+ lrp_to_arith);
/* Pre-gen6 HW can only nest if-statements 16 deep. Beyond this,
* if-statements need to be flattened.
lower_if_to_cond_assign(shader->ir, 16);
do_lower_texture_projection(shader->ir);
+ if (intel->gen < 8 && !intel->is_haswell)
+ brw_lower_texture_gradients(shader->ir);
do_vec_index_to_cond_assign(shader->ir);
brw_do_cubemap_normalize(shader->ir);
lower_noise(shader->ir);
bool input = true;
bool output = stage == MESA_SHADER_FRAGMENT;
bool temp = stage == MESA_SHADER_FRAGMENT;
- bool uniform = stage == MESA_SHADER_FRAGMENT;
+ bool uniform = false;
+
+ bool lowered_variable_indexing =
+ lower_variable_index_to_cond_assign(shader->ir,
+ input, output, temp, uniform);
- lower_variable_index_to_cond_assign(shader->ir,
- input, output, temp, uniform);
+ if (unlikely((INTEL_DEBUG & DEBUG_PERF) && lowered_variable_indexing)) {
+ perf_debug("Unsupported form of variable indexing in FS; falling "
+ "back to very inefficient code generation\n");
+ }
+
+ /* FINISHME: Do this before the variable index lowering. */
+ lower_ubo_reference(&shader->base, shader->ir);
do {
progress = false;
|| progress;
} while (progress);
+ /* Make a pass over the IR to add state references for any built-in
+ * uniforms that are used. This has to be done now (during linking).
+ * Code generation doesn't happen until the first time this shader is
+ * used for rendering. Waiting until then to generate the parameters is
+ * too late. At that point, the values for the built-in uniforms won't
+ * get sent to the shader.
+ */
+ foreach_list(node, shader->ir) {
+ ir_variable *var = ((ir_instruction *) node)->as_variable();
+
+ if ((var == NULL) || (var->mode != ir_var_uniform)
+ || (strncmp(var->name, "gl_", 3) != 0))
+ continue;
+
+ const ir_state_slot *const slots = var->state_slots;
+ assert(var->state_slots != NULL);
+
+ for (unsigned int i = 0; i < var->num_state_slots; i++) {
+ _mesa_add_state_reference(prog->Parameters,
+ (gl_state_index *) slots[i].tokens);
+ }
+ }
+
validate_ir_tree(shader->ir);
reparent_ir(shader->ir, shader->ir);
ralloc_free(mem_ctx);
- }
- if (!_mesa_ir_link_shader(ctx, prog))
- return false;
+ do_set_program_inouts(shader->ir, prog,
+ shader->base.Type == GL_FRAGMENT_SHADER);
+
+ prog->SamplersUsed = shader->base.active_samplers;
+ _mesa_update_shader_textures_used(shProg, prog);
+
+ _mesa_reference_program(ctx, &shader->base.Program, prog);
+
+ brw_add_texrect_params(prog);
+
+ /* This has to be done last. Any operation that can cause
+ * prog->ParameterValues to get reallocated (e.g., anything that adds a
+ * program constant) has to happen before creating this linkage.
+ */
+ _mesa_associate_uniform_storage(ctx, shProg, prog->Parameters);
+
+ _mesa_reference_program(ctx, &prog, NULL);
+
+ if (ctx->Shader.Flags & GLSL_DUMP) {
+ static const char *target_strings[]
+ = { "vertex", "fragment", "geometry" };
+ printf("\n");
+ printf("GLSL IR for linked %s program %d:\n", target_strings[stage],
+ shProg->Name);
+ _mesa_print_ir(shader->base.ir, NULL);
+ }
+ }
- if (!brw_shader_precompile(ctx, prog))
+ if (!brw_shader_precompile(ctx, shProg))
return false;
return true;
case GLSL_TYPE_UINT:
return BRW_REGISTER_TYPE_UD;
case GLSL_TYPE_ARRAY:
+ return brw_type_for_base_type(type->fields.array);
case GLSL_TYPE_STRUCT:
case GLSL_TYPE_SAMPLER:
/* These should be overridden with the type of the member when
* way to trip up if we don't.
*/
return BRW_REGISTER_TYPE_UD;
- default:
+ case GLSL_TYPE_VOID:
+ case GLSL_TYPE_ERROR:
+ case GLSL_TYPE_INTERFACE:
assert(!"not reached");
- return BRW_REGISTER_TYPE_F;
+ break;
}
+
+ return BRW_REGISTER_TYPE_F;
}
uint32_t
return 0;
}
}
+
+uint32_t
+brw_texture_offset(ir_constant *offset)
+{
+ assert(offset != NULL);
+
+ signed char offsets[3];
+ for (unsigned i = 0; i < offset->type->vector_elements; i++)
+ offsets[i] = (signed char) offset->value.i[i];
+
+ /* Combine all three offsets into a single unsigned dword:
+ *
+ * bits 11:8 - U Offset (X component)
+ * bits 7:4 - V Offset (Y component)
+ * bits 3:0 - R Offset (Z component)
+ */
+ unsigned offset_bits = 0;
+ for (unsigned i = 0; i < offset->type->vector_elements; i++) {
+ const unsigned shift = 4 * (2 - i);
+ offset_bits |= (offsets[i] << shift) & (0xF << shift);
+ }
+ return offset_bits;
+}