OSDN Git Service

Merge remote-tracking branch 'mesa-public/master' into vulkan
[android-x86/external-mesa.git] / src / mesa / drivers / dri / i965 / brw_fs_nir.cpp
index 6b0c4a5..fdf2f5b 100644 (file)
  */
 
 #include "glsl/ir.h"
-#include "glsl/ir_optimization.h"
-#include "glsl/nir/glsl_to_nir.h"
 #include "main/shaderimage.h"
-#include "program/prog_to_nir.h"
 #include "brw_fs.h"
 #include "brw_fs_surface_builder.h"
-#include "brw_vec4_gs_visitor.h"
 #include "brw_nir.h"
-#include "brw_fs_surface_builder.h"
-#include "brw_vec4_gs_visitor.h"
+#include "brw_program.h"
 
 using namespace brw;
 using namespace brw::surface_access;
@@ -81,15 +76,42 @@ fs_visitor::nir_setup_inputs()
          reg.type = BRW_REGISTER_TYPE_D;
          bld.emit(FS_OPCODE_CINTERP, retype(input, BRW_REGISTER_TYPE_D), reg);
       } else {
-         emit_general_interpolation(input, var->name, var->type,
+         int location = var->data.location;
+         emit_general_interpolation(&input, var->name, var->type,
                                     (glsl_interp_qualifier) var->data.interpolation,
-                                    var->data.location, var->data.centroid,
+                                    &location, var->data.centroid,
                                     var->data.sample);
       }
    }
 }
 
 void
+fs_visitor::nir_setup_single_output_varying(fs_reg *reg,
+                                            const glsl_type *type,
+                                            unsigned *location)
+{
+   if (type->is_array() || type->is_matrix()) {
+      const struct glsl_type *elem_type = glsl_get_array_element(type);
+      const unsigned length = glsl_get_length(type);
+
+      for (unsigned i = 0; i < length; i++) {
+         nir_setup_single_output_varying(reg, elem_type, location);
+      }
+   } else if (type->is_record()) {
+      for (unsigned i = 0; i < type->length; i++) {
+         const struct glsl_type *field_type = type->fields.structure[i].type;
+         nir_setup_single_output_varying(reg, field_type, location);
+      }
+   } else {
+      assert(type->is_scalar() || type->is_vector());
+      this->outputs[*location] = *reg;
+      this->output_components[*location] = type->vector_elements;
+      *reg = offset(*reg, bld, 4);
+      (*location)++;
+   }
+}
+
+void
 fs_visitor::nir_setup_outputs()
 {
    brw_wm_prog_key *key = (brw_wm_prog_key*) this->key;
@@ -99,17 +121,13 @@ fs_visitor::nir_setup_outputs()
    nir_foreach_variable(var, &nir->outputs) {
       fs_reg reg = offset(nir_outputs, bld, var->data.driver_location);
 
-      int vector_elements = var->type->without_array()->vector_elements;
-
       switch (stage) {
       case MESA_SHADER_VERTEX:
-      case MESA_SHADER_GEOMETRY:
-         for (int i = 0; i < type_size_vec4(var->type); i++) {
-            int output = var->data.location + i;
-            this->outputs[output] = offset(reg, bld, 4 * i);
-            this->output_components[output] = vector_elements;
-         }
+      case MESA_SHADER_GEOMETRY: {
+         unsigned location = var->data.location;
+         nir_setup_single_output_varying(&reg, var->type, &location);
          break;
+      }
       case MESA_SHADER_FRAGMENT:
          if (var->data.index > 0) {
             assert(var->data.location == FRAG_RESULT_DATA0);
@@ -129,6 +147,8 @@ fs_visitor::nir_setup_outputs()
          } else if (var->data.location == FRAG_RESULT_SAMPLE_MASK) {
             this->sample_mask = reg;
          } else {
+            int vector_elements = var->type->without_array()->vector_elements;
+
             /* gl_FragData or a user-defined FS output */
             assert(var->data.location >= FRAG_RESULT_DATA0 &&
                    var->data.location < FRAG_RESULT_DATA0+BRW_MAX_DRAW_BUFFERS);
@@ -153,7 +173,7 @@ fs_visitor::nir_setup_uniforms()
    if (dispatch_width != 8)
       return;
 
-   uniforms = nir->num_uniforms;
+   uniforms = nir->num_uniforms / 4;
 
    nir_foreach_variable(var, &nir->uniforms) {
       /* UBO's and atomics don't take up space in the uniform file */
@@ -161,7 +181,7 @@ fs_visitor::nir_setup_uniforms()
          continue;
 
       if (type_size_scalar(var->type) > 0)
-         param_size[var->data.driver_location] = type_size_scalar(var->type);
+         param_size[var->data.driver_location / 4] = type_size_scalar(var->type);
    }
 }
 
@@ -1076,28 +1096,6 @@ fs_visitor::nir_emit_undef(const fs_builder &bld, nir_ssa_undef_instr *instr)
                                                instr->def.num_components);
 }
 
-static fs_reg
-fs_reg_for_nir_reg(fs_visitor *v, nir_register *nir_reg,
-                   unsigned base_offset, nir_src *indirect)
-{
-   fs_reg reg;
-
-   assert(!nir_reg->is_global);
-
-   reg = v->nir_locals[nir_reg->index];
-
-   reg = offset(reg, v->bld, base_offset * nir_reg->num_components);
-   if (indirect) {
-      int multiplier = nir_reg->num_components * (v->dispatch_width / 8);
-
-      reg.reladdr = new(v->mem_ctx) fs_reg(v->vgrf(glsl_type::int_type));
-      v->bld.MUL(*reg.reladdr, v->get_nir_src(*indirect),
-                 brw_imm_d(multiplier));
-   }
-
-   return reg;
-}
-
 fs_reg
 fs_visitor::get_nir_src(nir_src src)
 {
@@ -1105,8 +1103,10 @@ fs_visitor::get_nir_src(nir_src src)
    if (src.is_ssa) {
       reg = nir_ssa_values[src.ssa->index];
    } else {
-      reg = fs_reg_for_nir_reg(this, src.reg.reg, src.reg.base_offset,
-                               src.reg.indirect);
+      /* We don't handle indirects on locals */
+      assert(src.reg.indirect == NULL);
+      reg = offset(nir_locals[src.reg.reg->index], bld,
+                   src.reg.base_offset * src.reg.reg->num_components);
    }
 
    /* to avoid floating-point denorm flushing problems, set the type by
@@ -1123,16 +1123,18 @@ fs_visitor::get_nir_dest(nir_dest dest)
       nir_ssa_values[dest.ssa.index] = bld.vgrf(BRW_REGISTER_TYPE_F,
                                                 dest.ssa.num_components);
       return nir_ssa_values[dest.ssa.index];
+   } else {
+      /* We don't handle indirects on locals */
+      assert(dest.reg.indirect == NULL);
+      return offset(nir_locals[dest.reg.reg->index], bld,
+                    dest.reg.base_offset * dest.reg.reg->num_components);
    }
-
-   return fs_reg_for_nir_reg(this, dest.reg.reg, dest.reg.base_offset,
-                             dest.reg.indirect);
 }
 
 fs_reg
 fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
 {
-   fs_reg image(UNIFORM, deref->var->data.driver_location,
+   fs_reg image(UNIFORM, deref->var->data.driver_location / 4,
                 BRW_REGISTER_TYPE_UD);
 
    for (const nir_deref *tail = &deref->deref; tail->child;
@@ -1163,7 +1165,7 @@ fs_visitor::get_nir_image_deref(const nir_deref_var *deref)
             bld.MOV(tmp, get_nir_src(deref_array->indirect));
          }
 
-         bld.MUL(tmp, tmp, brw_imm_ud(element_size));
+         bld.MUL(tmp, tmp, brw_imm_ud(element_size * 4));
          if (image.reladdr)
             bld.ADD(*image.reladdr, *image.reladdr, tmp);
          else
@@ -1601,28 +1603,30 @@ fs_visitor::emit_gs_vertex(const nir_src &vertex_count_nir_src,
 void
 fs_visitor::emit_gs_input_load(const fs_reg &dst,
                                const nir_src &vertex_src,
-                               const fs_reg &indirect_offset,
-                               unsigned imm_offset,
+                               unsigned base_offset,
+                               const nir_src &offset_src,
                                unsigned num_components)
 {
    struct brw_gs_prog_data *gs_prog_data = (struct brw_gs_prog_data *) prog_data;
 
+   nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
+   nir_const_value *offset_const = nir_src_as_const_value(offset_src);
+   const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
+
    /* Offset 0 is the VUE header, which contains VARYING_SLOT_LAYER [.y],
     * VARYING_SLOT_VIEWPORT [.z], and VARYING_SLOT_PSIZ [.w].  Only
     * gl_PointSize is available as a GS input, however, so it must be that.
     */
-   const bool is_point_size =
-      indirect_offset.file == BAD_FILE && imm_offset == 0;
+   const bool is_point_size = (base_offset == 0);
 
-   nir_const_value *vertex_const = nir_src_as_const_value(vertex_src);
-   const unsigned push_reg_count = gs_prog_data->base.urb_read_length * 8;
-
-   if (indirect_offset.file == BAD_FILE && vertex_const != NULL &&
-       4 * imm_offset < push_reg_count) {
-      imm_offset = 4 * imm_offset + vertex_const->u[0] * push_reg_count;
+   if (offset_const != NULL && vertex_const != NULL &&
+       4 * (base_offset + offset_const->u[0]) < push_reg_count) {
+      int imm_offset = (base_offset + offset_const->u[0]) * 4 +
+                       vertex_const->u[0] * push_reg_count;
       /* This input was pushed into registers. */
       if (is_point_size) {
          /* gl_PointSize comes in .w */
+         assert(imm_offset == 0);
          bld.MOV(dst, fs_reg(ATTR, imm_offset + 3, dst.type));
       } else {
          for (unsigned i = 0; i < num_components; i++) {
@@ -1681,21 +1685,21 @@ fs_visitor::emit_gs_input_load(const fs_reg &dst,
       }
 
       fs_inst *inst;
-      if (indirect_offset.file == BAD_FILE) {
+      if (offset_const) {
          /* Constant indexing - use global offset. */
          inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8, dst, icp_handle);
-         inst->offset = imm_offset;
+         inst->offset = base_offset + offset_const->u[0];
          inst->base_mrf = -1;
          inst->mlen = 1;
          inst->regs_written = num_components;
       } else {
          /* Indirect indexing - use per-slot offsets as well. */
-         const fs_reg srcs[] = { icp_handle, indirect_offset };
+         const fs_reg srcs[] = { icp_handle, get_nir_src(offset_src) };
          fs_reg payload = bld.vgrf(BRW_REGISTER_TYPE_UD, 2);
          bld.LOAD_PAYLOAD(payload, srcs, ARRAY_SIZE(srcs), 0);
 
          inst = bld.emit(SHADER_OPCODE_URB_READ_SIMD8_PER_SLOT, dst, payload);
-         inst->offset = imm_offset;
+         inst->offset = base_offset;
          inst->base_mrf = -1;
          inst->mlen = 2;
          inst->regs_written = num_components;
@@ -1761,17 +1765,12 @@ fs_visitor::nir_emit_gs_intrinsic(const fs_builder &bld,
               retype(fs_reg(brw_vec8_grf(2, 0)), BRW_REGISTER_TYPE_UD));
       break;
 
-   case nir_intrinsic_load_input_indirect:
    case nir_intrinsic_load_input:
       unreachable("load_input intrinsics are invalid for the GS stage");
 
-   case nir_intrinsic_load_per_vertex_input_indirect:
-      indirect_offset = retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_D);
-      /* fallthrough */
    case nir_intrinsic_load_per_vertex_input:
-      emit_gs_input_load(dest, instr->src[0],
-                         indirect_offset, instr->const_index[0],
-                         instr->num_components);
+      emit_gs_input_load(dest, instr->src[0], instr->const_index[0],
+                         instr->src[1], instr->num_components);
       break;
 
    case nir_intrinsic_emit_vertex_with_counter:
@@ -2091,6 +2090,37 @@ fs_visitor::nir_emit_cs_intrinsic(const fs_builder &bld,
       break;
    }
 
+   case nir_intrinsic_shared_atomic_add:
+      nir_emit_shared_atomic(bld, BRW_AOP_ADD, instr);
+      break;
+   case nir_intrinsic_shared_atomic_imin:
+      nir_emit_shared_atomic(bld, BRW_AOP_IMIN, instr);
+      break;
+   case nir_intrinsic_shared_atomic_umin:
+      nir_emit_shared_atomic(bld, BRW_AOP_UMIN, instr);
+      break;
+   case nir_intrinsic_shared_atomic_imax:
+      nir_emit_shared_atomic(bld, BRW_AOP_IMAX, instr);
+      break;
+   case nir_intrinsic_shared_atomic_umax:
+      nir_emit_shared_atomic(bld, BRW_AOP_UMAX, instr);
+      break;
+   case nir_intrinsic_shared_atomic_and:
+      nir_emit_shared_atomic(bld, BRW_AOP_AND, instr);
+      break;
+   case nir_intrinsic_shared_atomic_or:
+      nir_emit_shared_atomic(bld, BRW_AOP_OR, instr);
+      break;
+   case nir_intrinsic_shared_atomic_xor:
+      nir_emit_shared_atomic(bld, BRW_AOP_XOR, instr);
+      break;
+   case nir_intrinsic_shared_atomic_exchange:
+      nir_emit_shared_atomic(bld, BRW_AOP_MOV, instr);
+      break;
+   case nir_intrinsic_shared_atomic_comp_swap:
+      nir_emit_shared_atomic(bld, BRW_AOP_CMPWR, instr);
+      break;
+
    default:
       nir_emit_intrinsic(bld, instr);
       break;
@@ -2104,8 +2134,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
    if (nir_intrinsic_infos[instr->intrinsic].has_dest)
       dest = get_nir_dest(instr->dest);
 
-   bool has_indirect = false;
-
    switch (instr->intrinsic) {
    case nir_intrinsic_atomic_counter_inc:
    case nir_intrinsic_atomic_counter_dec:
@@ -2294,27 +2322,27 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       bld.MOV(retype(dest, BRW_REGISTER_TYPE_D), brw_imm_d(1));
       break;
 
-   case nir_intrinsic_load_uniform_indirect:
-      has_indirect = true;
-      /* fallthrough */
    case nir_intrinsic_load_uniform: {
-      fs_reg uniform_reg(UNIFORM, instr->const_index[0]);
-      uniform_reg.reg_offset = instr->const_index[1];
+      /* Offsets are in bytes but they should always be multiples of 4 */
+      assert(instr->const_index[0] % 4 == 0);
 
-      for (unsigned j = 0; j < instr->num_components; j++) {
-         fs_reg src = offset(retype(uniform_reg, dest.type), bld, j);
-         if (has_indirect)
-            src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
+      fs_reg src(UNIFORM, instr->const_index[0] / 4, dest.type);
 
-         bld.MOV(dest, src);
-         dest = offset(dest, bld, 1);
+      nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
+      if (const_offset) {
+         /* Offsets are in bytes but they should always be multiples of 4 */
+         assert(const_offset->u[0] % 4 == 0);
+         src.reg_offset = const_offset->u[0] / 4;
+      } else {
+         src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
+      }
+
+      for (unsigned j = 0; j < instr->num_components; j++) {
+         bld.MOV(offset(dest, bld, j), offset(src, bld, j));
       }
       break;
    }
 
-   case nir_intrinsic_load_ubo_indirect:
-      has_indirect = true;
-      /* fallthrough */
    case nir_intrinsic_load_ubo: {
       nir_const_value *const_index = nir_src_as_const_value(instr->src[0]);
       fs_reg surf_index;
@@ -2342,27 +2370,24 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
                                nir->info.num_ubos - 1);
       }
 
-      if (has_indirect) {
-         /* Turn the byte offset into a dword offset. */
-         fs_reg base_offset = vgrf(glsl_type::int_type);
-         bld.SHR(base_offset, retype(get_nir_src(instr->src[1]),
-                                     BRW_REGISTER_TYPE_D),
-                 brw_imm_d(2));
+      nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
+      if (const_offset == NULL) {
+         fs_reg base_offset = retype(get_nir_src(instr->src[1]),
+                                     BRW_REGISTER_TYPE_D);
 
-         unsigned vec4_offset = instr->const_index[0] / 4;
          for (int i = 0; i < instr->num_components; i++)
             VARYING_PULL_CONSTANT_LOAD(bld, offset(dest, bld, i), surf_index,
-                                       base_offset, vec4_offset + i);
+                                       base_offset, i * 4);
       } else {
          fs_reg packed_consts = vgrf(glsl_type::float_type);
          packed_consts.type = dest.type;
 
-         struct brw_reg const_offset_reg = brw_imm_ud(instr->const_index[0] & ~15);
+         struct brw_reg const_offset_reg = brw_imm_ud(const_offset->u[0] & ~15);
          bld.emit(FS_OPCODE_UNIFORM_PULL_CONSTANT_LOAD, packed_consts,
                   surf_index, const_offset_reg);
 
          for (unsigned i = 0; i < instr->num_components; i++) {
-            packed_consts.set_smear(instr->const_index[0] % 16 / 4 + i);
+            packed_consts.set_smear(const_offset->u[0] % 16 / 4 + i);
 
             /* The std140 packing rules don't allow vectors to cross 16-byte
              * boundaries, and a reg is 32 bytes.
@@ -2376,9 +2401,6 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       break;
    }
 
-   case nir_intrinsic_load_ssbo_indirect:
-      has_indirect = true;
-      /* fallthrough */
    case nir_intrinsic_load_ssbo: {
       assert(devinfo->gen >= 7);
 
@@ -2404,12 +2426,41 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
                                nir->info.num_ssbos - 1);
       }
 
-      /* Get the offset to read from */
       fs_reg offset_reg;
-      if (has_indirect) {
+      nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
+      if (const_offset) {
+         offset_reg = brw_imm_ud(const_offset->u[0]);
+      } else {
          offset_reg = get_nir_src(instr->src[1]);
+      }
+
+      /* Read the vector */
+      fs_reg read_result = emit_untyped_read(bld, surf_index, offset_reg,
+                                             1 /* dims */,
+                                             instr->num_components,
+                                             BRW_PREDICATE_NONE);
+      read_result.type = dest.type;
+      for (int i = 0; i < instr->num_components; i++)
+         bld.MOV(offset(dest, bld, i), offset(read_result, bld, i));
+
+      break;
+   }
+
+   case nir_intrinsic_load_shared: {
+      assert(devinfo->gen >= 7);
+
+      fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
+
+      /* Get the offset to read from */
+      fs_reg offset_reg;
+      nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
+      if (const_offset) {
+         offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0]);
       } else {
-         offset_reg = brw_imm_ud(instr->const_index[0]);
+         offset_reg = vgrf(glsl_type::uint_type);
+         bld.ADD(offset_reg,
+                 retype(get_nir_src(instr->src[0]), BRW_REGISTER_TYPE_UD),
+                 brw_imm_ud(instr->const_index[0]));
       }
 
       /* Read the vector */
@@ -2424,32 +2475,72 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       break;
    }
 
-   case nir_intrinsic_load_input_indirect:
-      has_indirect = true;
-      /* fallthrough */
-   case nir_intrinsic_load_input: {
-      unsigned index = 0;
-      for (unsigned j = 0; j < instr->num_components; j++) {
-         fs_reg src;
-         if (stage == MESA_SHADER_VERTEX) {
-            src = offset(fs_reg(ATTR, instr->const_index[0], dest.type), bld, index);
+   case nir_intrinsic_store_shared: {
+      assert(devinfo->gen >= 7);
+
+      /* Block index */
+      fs_reg surf_index = brw_imm_ud(GEN7_BTI_SLM);
+
+      /* Value */
+      fs_reg val_reg = get_nir_src(instr->src[0]);
+
+      /* Writemask */
+      unsigned writemask = instr->const_index[1];
+
+      /* Combine groups of consecutive enabled channels in one write
+       * message. We use ffs to find the first enabled channel and then ffs on
+       * the bit-inverse, down-shifted writemask to determine the length of
+       * the block of enabled bits.
+       */
+      while (writemask) {
+         unsigned first_component = ffs(writemask) - 1;
+         unsigned length = ffs(~(writemask >> first_component)) - 1;
+         fs_reg offset_reg;
+
+         nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
+         if (const_offset) {
+            offset_reg = brw_imm_ud(instr->const_index[0] + const_offset->u[0] +
+                                    4 * first_component);
          } else {
-            src = offset(retype(nir_inputs, dest.type), bld,
-                         instr->const_index[0] + index);
+            offset_reg = vgrf(glsl_type::uint_type);
+            bld.ADD(offset_reg,
+                    retype(get_nir_src(instr->src[1]), BRW_REGISTER_TYPE_UD),
+                    brw_imm_ud(instr->const_index[0] + 4 * first_component));
          }
-         if (has_indirect)
-            src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[0]));
-         index++;
 
-         bld.MOV(dest, src);
-         dest = offset(dest, bld, 1);
+         emit_untyped_write(bld, surf_index, offset_reg,
+                            offset(val_reg, bld, first_component),
+                            1 /* dims */, length,
+                            BRW_PREDICATE_NONE);
+
+         /* Clear the bits in the writemask that we just wrote, then try
+          * again to see if more channels are left.
+          */
+         writemask &= (15 << (first_component + length));
+      }
+
+      break;
+   }
+
+   case nir_intrinsic_load_input: {
+      fs_reg src;
+      if (stage == MESA_SHADER_VERTEX) {
+         src = fs_reg(ATTR, instr->const_index[0], dest.type);
+      } else {
+         src = offset(retype(nir_inputs, dest.type), bld,
+                      instr->const_index[0]);
+      }
+
+      nir_const_value *const_offset = nir_src_as_const_value(instr->src[0]);
+      assert(const_offset && "Indirect input loads not allowed");
+      src = offset(src, bld, const_offset->u[0]);
+
+      for (unsigned j = 0; j < instr->num_components; j++) {
+         bld.MOV(offset(dest, bld, j), offset(src, bld, j));
       }
       break;
    }
 
-   case nir_intrinsic_store_ssbo_indirect:
-      has_indirect = true;
-      /* fallthrough */
    case nir_intrinsic_store_ssbo: {
       assert(devinfo->gen >= 7);
 
@@ -2476,7 +2567,7 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       fs_reg val_reg = get_nir_src(instr->src[0]);
 
       /* Writemask */
-      unsigned writemask = instr->const_index[1];
+      unsigned writemask = instr->const_index[0];
 
       /* Combine groups of consecutive enabled channels in one write
        * message. We use ffs to find the first enabled channel and then ffs on
@@ -2486,10 +2577,11 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       while (writemask) {
          unsigned first_component = ffs(writemask) - 1;
          unsigned length = ffs(~(writemask >> first_component)) - 1;
-         fs_reg offset_reg;
 
-         if (!has_indirect) {
-            offset_reg = brw_imm_ud(instr->const_index[0] + 4 * first_component);
+         fs_reg offset_reg;
+         nir_const_value *const_offset = nir_src_as_const_value(instr->src[2]);
+         if (const_offset) {
+            offset_reg = brw_imm_ud(const_offset->u[0] + 4 * first_component);
          } else {
             offset_reg = vgrf(glsl_type::uint_type);
             bld.ADD(offset_reg,
@@ -2510,20 +2602,17 @@ fs_visitor::nir_emit_intrinsic(const fs_builder &bld, nir_intrinsic_instr *instr
       break;
    }
 
-   case nir_intrinsic_store_output_indirect:
-      has_indirect = true;
-      /* fallthrough */
    case nir_intrinsic_store_output: {
       fs_reg src = get_nir_src(instr->src[0]);
-      unsigned index = 0;
+      fs_reg new_dest = offset(retype(nir_outputs, src.type), bld,
+                               instr->const_index[0]);
+
+      nir_const_value *const_offset = nir_src_as_const_value(instr->src[1]);
+      assert(const_offset && "Indirect output stores not allowed");
+      new_dest = offset(new_dest, bld, const_offset->u[0]);
+
       for (unsigned j = 0; j < instr->num_components; j++) {
-         fs_reg new_dest = offset(retype(nir_outputs, src.type), bld,
-                                  instr->const_index[0] + index);
-         if (has_indirect)
-            src.reladdr = new(mem_ctx) fs_reg(get_nir_src(instr->src[1]));
-         index++;
-         bld.MOV(new_dest, src);
-         src = offset(src, bld, 1);
+         bld.MOV(offset(new_dest, bld, j), offset(src, bld, j));
       }
       break;
    }
@@ -2647,6 +2736,33 @@ fs_visitor::nir_emit_ssbo_atomic(const fs_builder &bld,
 }
 
 void
+fs_visitor::nir_emit_shared_atomic(const fs_builder &bld,
+                                   int op, nir_intrinsic_instr *instr)
+{
+   fs_reg dest;
+   if (nir_intrinsic_infos[instr->intrinsic].has_dest)
+      dest = get_nir_dest(instr->dest);
+
+   fs_reg surface = brw_imm_ud(GEN7_BTI_SLM);
+   fs_reg offset = get_nir_src(instr->src[0]);
+   fs_reg data1 = get_nir_src(instr->src[1]);
+   fs_reg data2;
+   if (op == BRW_AOP_CMPWR)
+      data2 = get_nir_src(instr->src[2]);
+
+   /* Emit the actual atomic operation operation */
+
+   fs_reg atomic_result =
+      surface_access::emit_untyped_atomic(bld, surface, offset,
+                                          data1, data2,
+                                          1 /* dims */, 1 /* rsize */,
+                                          op,
+                                          BRW_PREDICATE_NONE);
+   dest.type = atomic_result.type;
+   bld.MOV(dest, atomic_result);
+}
+
+void
 fs_visitor::nir_emit_texture(const fs_builder &bld, nir_tex_instr *instr)
 {
    unsigned texture = instr->texture_index;