OSDN Git Service

i965/fs: Use the LRP instruction for ir_triop_lrp when possible.
[android-x86/external-mesa.git] / src / mesa / drivers / dri / i965 / brw_shader.cpp
index 9471883..2da5ed5 100644 (file)
 extern "C" {
 #include "main/macros.h"
 #include "brw_context.h"
+#include "brw_vs.h"
 }
 #include "brw_fs.h"
-#include "../glsl/ir_optimization.h"
-#include "../glsl/ir_print_visitor.h"
+#include "glsl/ir_optimization.h"
+#include "glsl/ir_print_visitor.h"
 
 struct gl_shader *
 brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
@@ -47,13 +48,12 @@ brw_new_shader(struct gl_context *ctx, GLuint name, GLuint type)
 struct gl_shader_program *
 brw_new_shader_program(struct gl_context *ctx, GLuint name)
 {
-   struct brw_shader_program *prog;
-   prog = rzalloc(NULL, struct brw_shader_program);
+   struct gl_shader_program *prog = rzalloc(NULL, struct gl_shader_program);
    if (prog) {
-      prog->base.Name = name;
-      _mesa_init_shader_program(ctx, &prog->base);
+      prog->Name = name;
+      _mesa_init_shader_program(ctx, prog);
    }
-   return &prog->base;
+   return prog;
 }
 
 /**
@@ -61,24 +61,82 @@ brw_new_shader_program(struct gl_context *ctx, GLuint name)
  * what non-orthogonal state will be set, in the hope that it reflects
  * the eventual NOS used, and thus allows us to produce link failures.
  */
-bool
+static bool
 brw_shader_precompile(struct gl_context *ctx, struct gl_shader_program *prog)
 {
-   if (!brw_fs_precompile(ctx, prog))
+   struct brw_context *brw = brw_context(ctx);
+
+   if (brw->precompile && !brw_fs_precompile(ctx, prog))
+      return false;
+
+   if (brw->precompile && !brw_vs_precompile(ctx, prog))
       return false;
 
    return true;
 }
 
+static void
+brw_lower_packing_builtins(struct brw_context *brw,
+                           gl_shader_type shader_type,
+                           exec_list *ir)
+{
+   int ops = LOWER_PACK_SNORM_2x16
+           | LOWER_UNPACK_SNORM_2x16
+           | LOWER_PACK_UNORM_2x16
+           | LOWER_UNPACK_UNORM_2x16
+           | LOWER_PACK_SNORM_4x8
+           | LOWER_UNPACK_SNORM_4x8
+           | LOWER_PACK_UNORM_4x8
+           | LOWER_UNPACK_UNORM_4x8;
+
+   if (brw->intel.gen >= 7) {
+      /* Gen7 introduced the f32to16 and f16to32 instructions, which can be
+       * used to execute packHalf2x16 and unpackHalf2x16. For AOS code, no
+       * lowering is needed. For SOA code, the Half2x16 ops must be
+       * scalarized.
+       */
+      if (shader_type == MESA_SHADER_FRAGMENT) {
+         ops |= LOWER_PACK_HALF_2x16_TO_SPLIT
+             |  LOWER_UNPACK_HALF_2x16_TO_SPLIT;
+      }
+   } else {
+      ops |= LOWER_PACK_HALF_2x16
+          |  LOWER_UNPACK_HALF_2x16;
+   }
+
+   lower_packing_builtins(ir, ops);
+}
+
 GLboolean
-brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
+brw_link_shader(struct gl_context *ctx, struct gl_shader_program *shProg)
 {
    struct brw_context *brw = brw_context(ctx);
    struct intel_context *intel = &brw->intel;
+   unsigned int stage;
+
+   for (stage = 0; stage < ARRAY_SIZE(shProg->_LinkedShaders); stage++) {
+      struct brw_shader *shader =
+        (struct brw_shader *)shProg->_LinkedShaders[stage];
+      static const GLenum targets[] = {
+        GL_VERTEX_PROGRAM_ARB,
+        GL_FRAGMENT_PROGRAM_ARB,
+        GL_GEOMETRY_PROGRAM_NV
+      };
+
+      if (!shader)
+        continue;
+
+      struct gl_program *prog =
+        ctx->Driver.NewProgram(ctx, targets[stage], shader->base.Name);
+      if (!prog)
+       return false;
+      prog->Parameters = _mesa_new_parameter_list();
+
+      if (stage == 0) {
+        struct gl_vertex_program *vp = (struct gl_vertex_program *) prog;
+        vp->UsesClipDistance = shProg->Vert.UsesClipDistance;
+      }
 
-   struct brw_shader *shader =
-      (struct brw_shader *)prog->_LinkedShaders[MESA_SHADER_FRAGMENT];
-   if (shader != NULL) {
       void *mem_ctx = ralloc_context(NULL);
       bool progress;
 
@@ -87,13 +145,20 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
       shader->ir = new(shader) exec_list;
       clone_ir_list(mem_ctx, shader->ir, shader->base.ir);
 
+      /* lower_packing_builtins() inserts arithmetic instructions, so it
+       * must precede lower_instructions().
+       */
+      brw_lower_packing_builtins(brw, (gl_shader_type) stage, shader->ir);
       do_mat_op_to_vec(shader->ir);
+      const int lrp_to_arith = (intel->gen < 6 || stage != MESA_SHADER_FRAGMENT)
+                                ? LRP_TO_ARITH : 0;
       lower_instructions(shader->ir,
                         MOD_TO_FRACT |
                         DIV_TO_MUL_RCP |
                         SUB_TO_ADD_NEG |
                         EXP_TO_EXP2 |
-                        LOG_TO_LOG2);
+                        LOG_TO_LOG2 |
+                         lrp_to_arith);
 
       /* Pre-gen6 HW can only nest if-statements 16 deep.  Beyond this,
        * if-statements need to be flattened.
@@ -102,22 +167,37 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
         lower_if_to_cond_assign(shader->ir, 16);
 
       do_lower_texture_projection(shader->ir);
+      if (intel->gen < 8 && !intel->is_haswell)
+         brw_lower_texture_gradients(shader->ir);
       do_vec_index_to_cond_assign(shader->ir);
       brw_do_cubemap_normalize(shader->ir);
       lower_noise(shader->ir);
       lower_quadop_vector(shader->ir, false);
-      lower_variable_index_to_cond_assign(shader->ir,
-                                         GL_TRUE, /* input */
-                                         GL_TRUE, /* output */
-                                         GL_TRUE, /* temp */
-                                         GL_TRUE /* uniform */
-                                         );
+
+      bool input = true;
+      bool output = stage == MESA_SHADER_FRAGMENT;
+      bool temp = stage == MESA_SHADER_FRAGMENT;
+      bool uniform = false;
+
+      bool lowered_variable_indexing =
+         lower_variable_index_to_cond_assign(shader->ir,
+                                             input, output, temp, uniform);
+
+      if (unlikely((INTEL_DEBUG & DEBUG_PERF) && lowered_variable_indexing)) {
+         perf_debug("Unsupported form of variable indexing in FS; falling "
+                    "back to very inefficient code generation\n");
+      }
+
+      /* FINISHME: Do this before the variable index lowering. */
+      lower_ubo_reference(&shader->base, shader->ir);
 
       do {
         progress = false;
 
-        brw_do_channel_expressions(shader->ir);
-        brw_do_vector_splitting(shader->ir);
+        if (stage == MESA_SHADER_FRAGMENT) {
+           brw_do_channel_expressions(shader->ir);
+           brw_do_vector_splitting(shader->ir);
+        }
 
         progress = do_lower_jumps(shader->ir, true, true,
                                   true, /* main return */
@@ -125,22 +205,70 @@ brw_link_shader(struct gl_context *ctx, struct gl_shader_program *prog)
                                   false /* loops */
                                   ) || progress;
 
-        progress = do_common_optimization(shader->ir, true, 32) || progress;
+        progress = do_common_optimization(shader->ir, true, true, 32)
+          || progress;
       } while (progress);
 
+      /* Make a pass over the IR to add state references for any built-in
+       * uniforms that are used.  This has to be done now (during linking).
+       * Code generation doesn't happen until the first time this shader is
+       * used for rendering.  Waiting until then to generate the parameters is
+       * too late.  At that point, the values for the built-in uniforms won't
+       * get sent to the shader.
+       */
+      foreach_list(node, shader->ir) {
+        ir_variable *var = ((ir_instruction *) node)->as_variable();
+
+        if ((var == NULL) || (var->mode != ir_var_uniform)
+            || (strncmp(var->name, "gl_", 3) != 0))
+           continue;
+
+        const ir_state_slot *const slots = var->state_slots;
+        assert(var->state_slots != NULL);
+
+        for (unsigned int i = 0; i < var->num_state_slots; i++) {
+           _mesa_add_state_reference(prog->Parameters,
+                                     (gl_state_index *) slots[i].tokens);
+        }
+      }
+
       validate_ir_tree(shader->ir);
 
       reparent_ir(shader->ir, shader->ir);
       ralloc_free(mem_ctx);
-   }
 
-   if (!_mesa_ir_link_shader(ctx, prog))
-      return GL_FALSE;
+      do_set_program_inouts(shader->ir, prog,
+                           shader->base.Type == GL_FRAGMENT_SHADER);
+
+      prog->SamplersUsed = shader->base.active_samplers;
+      _mesa_update_shader_textures_used(shProg, prog);
 
-   if (!brw_shader_precompile(ctx, prog))
-      return GL_FALSE;
+      _mesa_reference_program(ctx, &shader->base.Program, prog);
 
-   return GL_TRUE;
+      brw_add_texrect_params(prog);
+
+      /* This has to be done last.  Any operation that can cause
+       * prog->ParameterValues to get reallocated (e.g., anything that adds a
+       * program constant) has to happen before creating this linkage.
+       */
+      _mesa_associate_uniform_storage(ctx, shProg, prog->Parameters);
+
+      _mesa_reference_program(ctx, &prog, NULL);
+
+      if (ctx->Shader.Flags & GLSL_DUMP) {
+         static const char *target_strings[]
+            = { "vertex", "fragment", "geometry" };
+         printf("\n");
+         printf("GLSL IR for linked %s program %d:\n", target_strings[stage],
+                shProg->Name);
+         _mesa_print_ir(shader->base.ir, NULL);
+      }
+   }
+
+   if (!brw_shader_precompile(ctx, shProg))
+      return false;
+
+   return true;
 }
 
 
@@ -156,6 +284,7 @@ brw_type_for_base_type(const struct glsl_type *type)
    case GLSL_TYPE_UINT:
       return BRW_REGISTER_TYPE_UD;
    case GLSL_TYPE_ARRAY:
+      return brw_type_for_base_type(type->fields.array);
    case GLSL_TYPE_STRUCT:
    case GLSL_TYPE_SAMPLER:
       /* These should be overridden with the type of the member when
@@ -163,10 +292,14 @@ brw_type_for_base_type(const struct glsl_type *type)
        * way to trip up if we don't.
        */
       return BRW_REGISTER_TYPE_UD;
-   default:
+   case GLSL_TYPE_VOID:
+   case GLSL_TYPE_ERROR:
+   case GLSL_TYPE_INTERFACE:
       assert(!"not reached");
-      return BRW_REGISTER_TYPE_F;
+      break;
    }
+
+   return BRW_REGISTER_TYPE_F;
 }
 
 uint32_t
@@ -192,3 +325,56 @@ brw_conditional_for_comparison(unsigned int op)
       return BRW_CONDITIONAL_NZ;
    }
 }
+
+uint32_t
+brw_math_function(enum opcode op)
+{
+   switch (op) {
+   case SHADER_OPCODE_RCP:
+      return BRW_MATH_FUNCTION_INV;
+   case SHADER_OPCODE_RSQ:
+      return BRW_MATH_FUNCTION_RSQ;
+   case SHADER_OPCODE_SQRT:
+      return BRW_MATH_FUNCTION_SQRT;
+   case SHADER_OPCODE_EXP2:
+      return BRW_MATH_FUNCTION_EXP;
+   case SHADER_OPCODE_LOG2:
+      return BRW_MATH_FUNCTION_LOG;
+   case SHADER_OPCODE_POW:
+      return BRW_MATH_FUNCTION_POW;
+   case SHADER_OPCODE_SIN:
+      return BRW_MATH_FUNCTION_SIN;
+   case SHADER_OPCODE_COS:
+      return BRW_MATH_FUNCTION_COS;
+   case SHADER_OPCODE_INT_QUOTIENT:
+      return BRW_MATH_FUNCTION_INT_DIV_QUOTIENT;
+   case SHADER_OPCODE_INT_REMAINDER:
+      return BRW_MATH_FUNCTION_INT_DIV_REMAINDER;
+   default:
+      assert(!"not reached: unknown math function");
+      return 0;
+   }
+}
+
+uint32_t
+brw_texture_offset(ir_constant *offset)
+{
+   assert(offset != NULL);
+
+   signed char offsets[3];
+   for (unsigned i = 0; i < offset->type->vector_elements; i++)
+      offsets[i] = (signed char) offset->value.i[i];
+
+   /* Combine all three offsets into a single unsigned dword:
+    *
+    *    bits 11:8 - U Offset (X component)
+    *    bits  7:4 - V Offset (Y component)
+    *    bits  3:0 - R Offset (Z component)
+    */
+   unsigned offset_bits = 0;
+   for (unsigned i = 0; i < offset->type->vector_elements; i++) {
+      const unsigned shift = 4 * (2 - i);
+      offset_bits |= (offsets[i] << shift) & (0xF << shift);
+   }
+   return offset_bits;
+}