OSDN Git Service

i965: Assert that the offset in the VBO is below the VBO size.
[android-x86/external-mesa.git] / src / mesa / drivers / dri / i965 / brw_draw_upload.c
index c9e1a6e..05079c0 100644 (file)
 
 #include <stdlib.h>
 
-#include "glheader.h"
-#include "context.h"
-#include "state.h"
-#include "api_validate.h"
-#include "enums.h"
+#include "main/glheader.h"
+#include "main/context.h"
+#include "main/state.h"
+#include "main/api_validate.h"
+#include "main/enums.h"
 
 #include "brw_draw.h"
 #include "brw_defines.h"
 #include "brw_state.h"
 #include "brw_fallback.h"
 
-#include "intel_ioctl.h"
 #include "intel_batchbuffer.h"
 #include "intel_buffer_objects.h"
 #include "intel_tex.h"
 
-struct brw_array_state {
-   union header_union header;
-
-   struct {
-      union {
-        struct {
-           GLuint pitch:11; 
-           GLuint pad:15;
-           GLuint access_type:1; 
-           GLuint vb_index:5; 
-        } bits;
-        GLuint dword;
-      } vb0;
-   
-      dri_bo *buffer;
-      GLuint offset;
-
-      GLuint max_index;   
-      GLuint instance_data_step_rate;
-
-   } vb[BRW_VBP_MAX];
-};
-
-
-static dri_bo *array_buffer( struct intel_context *intel,
-                            const struct gl_client_array *array )
-{
-   return intel_bufferobj_buffer(intel, intel_buffer_object(array->BufferObj),
-                                INTEL_WRITE_PART);
-}
-
 static GLuint double_types[5] = {
    0,
    BRW_SURFACEFORMAT_R64_FLOAT,
@@ -188,7 +156,13 @@ static GLuint byte_types_scale[5] = {
 };
 
 
-static GLuint get_surface_type( GLenum type, GLuint size, GLboolean normalized )
+/**
+ * Given vertex array type/size/format/normalized info, return
+ * the appopriate hardware surface type.
+ * Format will be GL_RGBA or possibly GL_BGRA for GLubyte[4] color arrays.
+ */
+static GLuint get_surface_type( GLenum type, GLuint size,
+                                GLenum format, GLboolean normalized )
 {
    if (INTEL_DEBUG & DEBUG_VERTS)
       _mesa_printf("type %s size %d normalized %d\n", 
@@ -203,11 +177,20 @@ static GLuint get_surface_type( GLenum type, GLuint size, GLboolean normalized )
       case GL_BYTE: return byte_types_norm[size];
       case GL_UNSIGNED_INT: return uint_types_norm[size];
       case GL_UNSIGNED_SHORT: return ushort_types_norm[size];
-      case GL_UNSIGNED_BYTE: return ubyte_types_norm[size];
+      case GL_UNSIGNED_BYTE:
+         if (format == GL_BGRA) {
+            /* See GL_EXT_vertex_array_bgra */
+            assert(size == 4);
+            return BRW_SURFACEFORMAT_B8G8R8A8_UNORM;
+         }
+         else {
+            return ubyte_types_norm[size];
+         }
       default: assert(0); return 0;
       }      
    }
    else {
+      assert(format == GL_RGBA); /* sanity check */
       switch (type) {
       case GL_DOUBLE: return double_types[size];
       case GL_FLOAT: return float_types[size];
@@ -248,165 +231,124 @@ static GLuint get_index_type(GLenum type)
    }
 }
 
-static void copy_strided_array( GLubyte *dest, 
-                               const GLubyte *src, 
-                               GLuint size, 
-                               GLuint stride,
-                               GLuint count )
-{
-   if (size == stride) 
-      memcpy(dest, src, count * size);
-   else {
-      GLuint i;
-   
-      for (i = 0; i < count; i++) {
-        memcpy(dest, src, size);
-        src += stride;
-        dest += size;
-      }
-   }
-}
-
 static void wrap_buffers( struct brw_context *brw,
                          GLuint size )
 {
-   GLcontext *ctx = &brw->intel.ctx;
-
    if (size < BRW_UPLOAD_INIT_SIZE)
       size = BRW_UPLOAD_INIT_SIZE;
 
-   brw->vb.upload.buf++;
-   brw->vb.upload.buf %= BRW_NR_UPLOAD_BUFS;
    brw->vb.upload.offset = 0;
 
-   ctx->Driver.BufferData(ctx,
-                         GL_ARRAY_BUFFER_ARB,
-                         size,
-                         NULL,
-                         GL_DYNAMIC_DRAW_ARB,
-                         brw->vb.upload.vbo[brw->vb.upload.buf]);
+   if (brw->vb.upload.bo != NULL)
+      dri_bo_unreference(brw->vb.upload.bo);
+   brw->vb.upload.bo = dri_bo_alloc(brw->intel.bufmgr, "temporary VBO",
+                                   size, 1);
+
+   /* Set the internal VBO\ to no-backing-store.  We only use them as a
+    * temporary within a brw_try_draw_prims while the lock is held.
+    */
+   /* DON'T DO THIS AS IF WE HAVE TO RE-ORG MEMORY WE NEED SOMEWHERE WITH
+      FAKE TO PUSH THIS STUFF */
+//   if (!brw->intel.ttm)
+//      dri_bo_fake_disable_backing_store(brw->vb.upload.bo, NULL, NULL);
 }
 
 static void get_space( struct brw_context *brw,
                       GLuint size,
-                      struct gl_buffer_object **vbo_return,
+                      dri_bo **bo_return,
                       GLuint *offset_return )
 {
    size = ALIGN(size, 64);
-   
-   if (brw->vb.upload.offset + size > BRW_UPLOAD_INIT_SIZE)
+
+   if (brw->vb.upload.bo == NULL ||
+       brw->vb.upload.offset + size > brw->vb.upload.bo->size) {
       wrap_buffers(brw, size);
+   }
 
-   *vbo_return = brw->vb.upload.vbo[brw->vb.upload.buf];
+   assert(*bo_return == NULL);
+   dri_bo_reference(brw->vb.upload.bo);
+   *bo_return = brw->vb.upload.bo;
    *offset_return = brw->vb.upload.offset;
-
    brw->vb.upload.offset += size;
 }
 
-static struct gl_client_array *
+static void
 copy_array_to_vbo_array( struct brw_context *brw,
-                        GLuint i,
-                        const struct gl_client_array *array,
-                        GLuint element_size,
-                        GLuint count)
+                        struct brw_vertex_element *element,
+                        GLuint dst_stride)
 {
-   GLcontext *ctx = &brw->intel.ctx;
-   struct gl_client_array *vbo_array = &brw->vb.vbo_array[i];
-   GLuint size = count * element_size;
-   struct gl_buffer_object *vbo;
-   GLuint offset;
-   GLuint new_stride;
+   struct intel_context *intel = &brw->intel;
+   GLuint size = element->count * dst_stride;
 
-   get_space(brw, size, &vbo, &offset);
+   get_space(brw, size, &element->bo, &element->offset);
 
-   if (array->StrideB == 0) {
-      assert(count == 1);
-      new_stride = 0;
+   if (element->glarray->StrideB == 0) {
+      assert(element->count == 1);
+      element->stride = 0;
+   } else {
+      element->stride = dst_stride;
    }
-   else 
-      new_stride = element_size;
-
-   vbo_array->Size = array->Size;
-   vbo_array->Type = array->Type;
-   vbo_array->Stride = new_stride;
-   vbo_array->StrideB = new_stride;   
-   vbo_array->Ptr = (const void *)offset;
-   vbo_array->Enabled = 1;
-   vbo_array->Normalized = array->Normalized;
-   vbo_array->_MaxElement = array->_MaxElement;        /* ? */
-   vbo_array->BufferObj = vbo;
 
-   {
-      GLubyte *map = ctx->Driver.MapBuffer(ctx,
-                                          GL_ARRAY_BUFFER_ARB,
-                                          GL_DYNAMIC_DRAW_ARB,
-                                          vbo);
-   
-      map += offset;
-
-      copy_strided_array( map, 
-                         array->Ptr,
-                         element_size,
-                         array->StrideB,
-                         count);
+   if (dst_stride == element->glarray->StrideB) {
+      if (intel->intelScreen->kernel_exec_fencing) {
+        drm_intel_gem_bo_map_gtt(element->bo);
+        memcpy((char *)element->bo->virtual + element->offset,
+               element->glarray->Ptr, size);
+        drm_intel_gem_bo_unmap_gtt(element->bo);
+      } else {
+        dri_bo_subdata(element->bo,
+                       element->offset,
+                       size,
+                       element->glarray->Ptr);
+      }
+   } else {
+      char *dest;
+      const unsigned char *src = element->glarray->Ptr;
+      int i;
+
+      if (intel->intelScreen->kernel_exec_fencing) {
+        drm_intel_gem_bo_map_gtt(element->bo);
+        dest = element->bo->virtual;
+        dest += element->offset;
+
+        for (i = 0; i < element->count; i++) {
+           memcpy(dest, src, dst_stride);
+           src += element->glarray->StrideB;
+           dest += dst_stride;
+        }
 
-      ctx->Driver.UnmapBuffer(ctx, GL_ARRAY_BUFFER_ARB, vbo_array->BufferObj);
-   }
+        drm_intel_gem_bo_unmap_gtt(element->bo);
+      } else {
+        void *data;
 
-   return vbo_array;
-}
+        data = _mesa_malloc(dst_stride * element->count);
+        dest = data;
+        for (i = 0; i < element->count; i++) {
+           memcpy(dest, src, dst_stride);
+           src += element->glarray->StrideB;
+           dest += dst_stride;
+        }
 
-/**
- * Just a wrapper to highlight which cause of copy_array_to_vbo_array
- * is happening in the profile.
- */
-static struct gl_client_array *
-interleaved_copy_array_to_vbo_array(struct brw_context *brw,
-                                   GLuint i,
-                                   const struct gl_client_array *array,
-                                   GLuint element_size,
-                                   GLuint count)
-{
-   return copy_array_to_vbo_array(brw, i, array, element_size, count);
-}
+        dri_bo_subdata(element->bo,
+                       element->offset,
+                       size,
+                       data);
 
-static struct gl_client_array *
-interleaved_vbo_array( struct brw_context *brw,
-                      GLuint i,
-                      const struct gl_client_array *uploaded_array,
-                      const struct gl_client_array *array,
-                      const char *ptr)
-{
-   struct gl_client_array *vbo_array = &brw->vb.vbo_array[i];
-
-   vbo_array->Size = array->Size;
-   vbo_array->Type = array->Type;
-   vbo_array->Stride = array->Stride;
-   vbo_array->StrideB = array->StrideB;   
-   vbo_array->Ptr = (const void *)((const char *)uploaded_array->Ptr + 
-                                  ((const char *)array->Ptr - ptr));
-   vbo_array->Enabled = 1;
-   vbo_array->Normalized = array->Normalized;
-   vbo_array->_MaxElement = array->_MaxElement;        
-   vbo_array->BufferObj = uploaded_array->BufferObj;
-
-   return vbo_array;
+        _mesa_free(data);
+      }
+   }
 }
 
-
-GLboolean brw_upload_vertices( struct brw_context *brw,
-                              GLuint min_index,
-                              GLuint max_index )
+static void brw_prepare_vertices(struct brw_context *brw)
 {
    GLcontext *ctx = &brw->intel.ctx;
    struct intel_context *intel = intel_context(ctx);
-   GLuint tmp = brw->vs.prog_data->inputs_read; 
+   GLbitfield vs_inputs = brw->vs.prog_data->inputs_read; 
    GLuint i;
-   const void *ptr = NULL;
+   const unsigned char *ptr = NULL;
    GLuint interleave = 0;
-
-   struct brw_vertex_element *enabled[VERT_ATTRIB_MAX];
-   GLuint nr_enabled = 0;
+   unsigned int min_index = brw->vb.min_index;
+   unsigned int max_index = brw->vb.max_index;
 
    struct brw_vertex_element *upload[VERT_ATTRIB_MAX];
    GLuint nr_uploads = 0;
@@ -415,30 +357,86 @@ GLboolean brw_upload_vertices( struct brw_context *brw,
     */
    if (0)
       _mesa_printf("%s %d..%d\n", __FUNCTION__, min_index, max_index);
-   
-   while (tmp) {
-      GLuint i = _mesa_ffsll(tmp)-1;
+
+   /* Accumulate the list of enabled arrays. */
+   brw->vb.nr_enabled = 0;
+   while (vs_inputs) {
+      GLuint i = _mesa_ffsll(vs_inputs) - 1;
       struct brw_vertex_element *input = &brw->vb.inputs[i];
 
-      tmp &= ~(1<<i);
-      enabled[nr_enabled++] = input;
+      vs_inputs &= ~(1 << i);
+      brw->vb.enabled[brw->vb.nr_enabled++] = input;
+   }
+
+   /* XXX: In the rare cases where this happens we fallback all
+    * the way to software rasterization, although a tnl fallback
+    * would be sufficient.  I don't know of *any* real world
+    * cases with > 17 vertex attributes enabled, so it probably
+    * isn't an issue at this point.
+    */
+   if (brw->vb.nr_enabled >= BRW_VEP_MAX) {
+      intel->Fallback = 1;
+      return;
+   }
+
+   for (i = 0; i < brw->vb.nr_enabled; i++) {
+      struct brw_vertex_element *input = brw->vb.enabled[i];
 
       input->element_size = get_size(input->glarray->Type) * input->glarray->Size;
       input->count = input->glarray->StrideB ? max_index + 1 - min_index : 1;
 
-      if (!input->glarray->BufferObj->Name) {
+      if (input->glarray->BufferObj->Name != 0) {
+        struct intel_buffer_object *intel_buffer =
+           intel_buffer_object(input->glarray->BufferObj);
+
+        /* Named buffer object: Just reference its contents directly. */
+        dri_bo_unreference(input->bo);
+        input->bo = intel_bufferobj_buffer(intel, intel_buffer,
+                                           INTEL_READ);
+        dri_bo_reference(input->bo);
+        input->offset = (unsigned long)input->glarray->Ptr;
+        input->stride = input->glarray->StrideB;
+
+        /* This is a common place to reach if the user mistakenly supplies
+         * a pointer in place of a VBO offset.  If we just let it go through,
+         * we may end up dereferencing a pointer beyond the bounds of the
+         * GTT.  We would hope that the VBO's max_index would save us, but
+         * Mesa appears to hand us min/max values not clipped to the
+         * array object's _MaxElement, and _MaxElement frequently appears
+         * to be wrong anyway.
+         *
+         * The VBO spec allows application termination in this case, and it's
+         * probably a service to the poor programmer to do so rather than
+         * trying to just not render.
+         */
+        assert(input->offset < input->bo->size);
+      } else {
+        if (input->bo != NULL) {
+           /* Already-uploaded vertex data is present from a previous
+            * prepare_vertices, but we had to re-validate state due to
+            * check_aperture failing and a new batch being produced.
+            */
+           continue;
+        }
+
+        /* Queue the buffer object up to be uploaded in the next pass,
+         * when we've decided if we're doing interleaved or not.
+         */
         if (i == 0) {
            /* Position array not properly enabled:
             */
-           if (input->glarray->StrideB == 0)
-              return GL_FALSE;
+            if (input->glarray->StrideB == 0) {
+               intel->Fallback = 1;
+               return;
+            }
 
            interleave = input->glarray->StrideB;
            ptr = input->glarray->Ptr;
         }
         else if (interleave != input->glarray->StrideB ||
-                 (const char *)input->glarray->Ptr - (const char *)ptr < 0 ||
-                 (const char *)input->glarray->Ptr - (const char *)ptr > interleave) {
+                 (const unsigned char *)input->glarray->Ptr - ptr < 0 ||
+                 (const unsigned char *)input->glarray->Ptr - ptr > interleave)
+        {
            interleave = 0;
         }
 
@@ -455,43 +453,67 @@ GLboolean brw_upload_vertices( struct brw_context *brw,
       }
    }
 
-   /* Upload interleaved arrays if all uploads are interleaved
-    */
-   if (nr_uploads > 1 && 
-       interleave && 
-       interleave <= 256) {
-      upload[0]->glarray =
-        interleaved_copy_array_to_vbo_array(brw, 0,
-                                            upload[0]->glarray,
-                                            interleave,
-                                            upload[0]->count);
+   /* Handle any arrays to be uploaded. */
+   if (nr_uploads > 1 && interleave && interleave <= 256) {
+      /* All uploads are interleaved, so upload the arrays together as
+       * interleaved.  First, upload the contents and set up upload[0].
+       */
+      copy_array_to_vbo_array(brw, upload[0], interleave);
 
       for (i = 1; i < nr_uploads; i++) {
-        upload[i]->glarray = interleaved_vbo_array(brw,
-                                                   i,
-                                                   upload[0]->glarray,
-                                                   upload[i]->glarray,
-                                                   ptr);
+        /* Then, just point upload[i] at upload[0]'s buffer. */
+        upload[i]->stride = interleave;
+        upload[i]->offset = upload[0]->offset +
+           ((const unsigned char *)upload[i]->glarray->Ptr - ptr);
+        upload[i]->bo = upload[0]->bo;
+        dri_bo_reference(upload[i]->bo);
       }
    }
    else {
+      /* Upload non-interleaved arrays */
       for (i = 0; i < nr_uploads; i++) {
-        upload[i]->glarray = copy_array_to_vbo_array(brw, i,
-                                                     upload[i]->glarray,
-                                                     upload[i]->element_size,
-                                                     upload[i]->count);
-
+          copy_array_to_vbo_array(brw, upload[i], upload[i]->element_size);
       }
    }
 
-   /* XXX: In the rare cases where this happens we fallback all
-    * the way to software rasterization, although a tnl fallback
-    * would be sufficient.  I don't know of *any* real world
-    * cases with > 17 vertex attributes enabled, so it probably
-    * isn't an issue at this point.
+   brw_prepare_query_begin(brw);
+
+   for (i = 0; i < brw->vb.nr_enabled; i++) {
+      struct brw_vertex_element *input = brw->vb.enabled[i];
+
+      brw_add_validated_bo(brw, input->bo);
+   }
+}
+
+static void brw_emit_vertices(struct brw_context *brw)
+{
+   GLcontext *ctx = &brw->intel.ctx;
+   struct intel_context *intel = intel_context(ctx);
+   GLuint i;
+
+   brw_emit_query_begin(brw);
+
+   /* If the VS doesn't read any inputs (calculating vertex position from
+    * a state variable for some reason, for example), emit a single pad
+    * VERTEX_ELEMENT struct and bail.
+    *
+    * The stale VB state stays in place, but they don't do anything unless
+    * a VE loads from them.
     */
-   if (nr_enabled >= BRW_VEP_MAX)
-        return GL_FALSE;
+   if (brw->vb.nr_enabled == 0) {
+      BEGIN_BATCH(3, IGNORE_CLIPRECTS);
+      OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | 1);
+      OUT_BATCH((0 << BRW_VE0_INDEX_SHIFT) |
+               BRW_VE0_VALID |
+               (BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT) |
+               (0 << BRW_VE0_SRC_OFFSET_SHIFT));
+      OUT_BATCH((BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_0_SHIFT) |
+               (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_1_SHIFT) |
+               (BRW_VE1_COMPONENT_STORE_0 << BRW_VE1_COMPONENT_2_SHIFT) |
+               (BRW_VE1_COMPONENT_STORE_1_FLT << BRW_VE1_COMPONENT_3_SHIFT));
+      ADVANCE_BATCH();
+      return;
+   }
 
    /* Now emit VB and VEP state packets.
     *
@@ -499,30 +521,31 @@ GLboolean brw_upload_vertices( struct brw_context *brw,
     * are interleaved or from the same VBO.  TBD if this makes a
     * performance difference.
     */
-   BEGIN_BATCH(1 + nr_enabled * 4, IGNORE_CLIPRECTS);
+   BEGIN_BATCH(1 + brw->vb.nr_enabled * 4, IGNORE_CLIPRECTS);
    OUT_BATCH((CMD_VERTEX_BUFFER << 16) |
-            ((1 + nr_enabled * 4) - 2));
+            ((1 + brw->vb.nr_enabled * 4) - 2));
 
-   for (i = 0; i < nr_enabled; i++) {
-      struct brw_vertex_element *input = enabled[i];
+   for (i = 0; i < brw->vb.nr_enabled; i++) {
+      struct brw_vertex_element *input = brw->vb.enabled[i];
 
       OUT_BATCH((i << BRW_VB0_INDEX_SHIFT) |
                BRW_VB0_ACCESS_VERTEXDATA |
-               (input->glarray->StrideB << BRW_VB0_PITCH_SHIFT));
-      OUT_RELOC(array_buffer(intel, input->glarray),
-               DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
-               (GLuint)input->glarray->Ptr);
-      OUT_BATCH(max_index);
+               (input->stride << BRW_VB0_PITCH_SHIFT));
+      OUT_RELOC(input->bo,
+               I915_GEM_DOMAIN_VERTEX, 0,
+               input->offset);
+      OUT_BATCH(brw->vb.max_index);
       OUT_BATCH(0); /* Instance data step rate */
    }
    ADVANCE_BATCH();
 
-   BEGIN_BATCH(1 + nr_enabled * 2, IGNORE_CLIPRECTS);
-   OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | ((1 + nr_enabled * 2) - 2));
-   for (i = 0; i < nr_enabled; i++) {
-      struct brw_vertex_element *input = enabled[i];
+   BEGIN_BATCH(1 + brw->vb.nr_enabled * 2, IGNORE_CLIPRECTS);
+   OUT_BATCH((CMD_VERTEX_ELEMENT << 16) | ((1 + brw->vb.nr_enabled * 2) - 2));
+   for (i = 0; i < brw->vb.nr_enabled; i++) {
+      struct brw_vertex_element *input = brw->vb.enabled[i];
       uint32_t format = get_surface_type(input->glarray->Type,
                                         input->glarray->Size,
+                                        input->glarray->Format,
                                         input->glarray->Normalized);
       uint32_t comp0 = BRW_VE1_COMPONENT_STORE_SRC;
       uint32_t comp1 = BRW_VE1_COMPONENT_STORE_SRC;
@@ -548,18 +571,33 @@ GLboolean brw_upload_vertices( struct brw_context *brw,
                ((i * 4) << BRW_VE1_DST_OFFSET_SHIFT));
    }
    ADVANCE_BATCH();
-
-   return GL_TRUE;
 }
 
-void brw_upload_indices( struct brw_context *brw,
-                        const struct _mesa_index_buffer *index_buffer )
+const struct brw_tracked_state brw_vertices = {
+   .dirty = {
+      .mesa = 0,
+      .brw = BRW_NEW_BATCH | BRW_NEW_VERTICES,
+      .cache = 0,
+   },
+   .prepare = brw_prepare_vertices,
+   .emit = brw_emit_vertices,
+};
+
+static void brw_prepare_indices(struct brw_context *brw)
 {
    GLcontext *ctx = &brw->intel.ctx;
    struct intel_context *intel = &brw->intel;
-   GLuint ib_size = get_size(index_buffer->type) * index_buffer->count;
-   struct gl_buffer_object *bufferobj = index_buffer->obj;
-   GLuint offset = (GLuint)index_buffer->ptr;
+   const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
+   GLuint ib_size;
+   dri_bo *bo = NULL;
+   struct gl_buffer_object *bufferobj;
+   GLuint offset;
+
+   if (index_buffer == NULL)
+      return;
+
+   ib_size = get_size(index_buffer->type) * index_buffer->count;
+   bufferobj = index_buffer->obj;;
 
    /* Turn into a proper VBO:
     */
@@ -567,50 +605,64 @@ void brw_upload_indices( struct brw_context *brw,
      
       /* Get new bufferobj, offset:
        */
-      get_space(brw, ib_size, &bufferobj, &offset);
+      get_space(brw, ib_size, &bo, &offset);
 
       /* Straight upload
        */
-      ctx->Driver.BufferSubData( ctx,
-                                GL_ELEMENT_ARRAY_BUFFER_ARB,
-                                offset, 
-                                ib_size,
-                                index_buffer->ptr,
-                                bufferobj);
+      if (intel->intelScreen->kernel_exec_fencing) {
+        drm_intel_gem_bo_map_gtt(bo);
+        memcpy((char *)bo->virtual + offset, index_buffer->ptr, ib_size);
+        drm_intel_gem_bo_unmap_gtt(bo);
+      } else {
+        dri_bo_subdata(bo, offset, ib_size, index_buffer->ptr);
+      }
    } else {
+      offset = (GLuint) (unsigned long) index_buffer->ptr;
+
       /* If the index buffer isn't aligned to its element size, we have to
        * rebase it into a temporary.
        */
        if ((get_size(index_buffer->type) - 1) & offset) {
-           struct gl_buffer_object *vbo;
-           GLuint voffset;
            GLubyte *map = ctx->Driver.MapBuffer(ctx,
                                                 GL_ELEMENT_ARRAY_BUFFER_ARB,
                                                 GL_DYNAMIC_DRAW_ARB,
                                                 bufferobj);
            map += offset;
-           get_space(brw, ib_size, &vbo, &voffset);
-           
-           ctx->Driver.BufferSubData(ctx,
-                                     GL_ELEMENT_ARRAY_BUFFER_ARB,
-                                     voffset,
-                                     ib_size,
-                                     map,
-                                     vbo);
-           ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
 
-           bufferobj = vbo;
-           offset = voffset;
+          get_space(brw, ib_size, &bo, &offset);
+
+          dri_bo_subdata(bo, offset, ib_size, map);
+
+           ctx->Driver.UnmapBuffer(ctx, GL_ELEMENT_ARRAY_BUFFER_ARB, bufferobj);
+       } else {
+         bo = intel_bufferobj_buffer(intel, intel_buffer_object(bufferobj),
+                                     INTEL_READ);
+         dri_bo_reference(bo);
        }
    }
 
+   dri_bo_unreference(brw->ib.bo);
+   brw->ib.bo = bo;
+   brw->ib.offset = offset;
+
+   brw_add_validated_bo(brw, brw->ib.bo);
+}
+
+static void brw_emit_indices(struct brw_context *brw)
+{
+   struct intel_context *intel = &brw->intel;
+   const struct _mesa_index_buffer *index_buffer = brw->ib.ib;
+   GLuint ib_size;
+
+   if (index_buffer == NULL)
+      return;
+
+   ib_size = get_size(index_buffer->type) * index_buffer->count - 1;
+
    /* Emit the indexbuffer packet:
     */
    {
       struct brw_indexbuffer ib;
-      dri_bo *buffer = intel_bufferobj_buffer(intel,
-                                             intel_buffer_object(bufferobj),
-                                             INTEL_READ);
 
       memset(&ib, 0, sizeof(ib));
    
@@ -622,10 +674,23 @@ void brw_upload_indices( struct brw_context *brw,
 
       BEGIN_BATCH(4, IGNORE_CLIPRECTS);
       OUT_BATCH( ib.header.dword );
-      OUT_RELOC( buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, offset);
-      OUT_RELOC( buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
-                offset + ib_size);
+      OUT_RELOC(brw->ib.bo,
+               I915_GEM_DOMAIN_VERTEX, 0,
+               brw->ib.offset);
+      OUT_RELOC(brw->ib.bo,
+               I915_GEM_DOMAIN_VERTEX, 0,
+               brw->ib.offset + ib_size);
       OUT_BATCH( 0 );
       ADVANCE_BATCH();
    }
 }
+
+const struct brw_tracked_state brw_indices = {
+   .dirty = {
+      .mesa = 0,
+      .brw = BRW_NEW_BATCH | BRW_NEW_INDICES,
+      .cache = 0,
+   },
+   .prepare = brw_prepare_indices,
+   .emit = brw_emit_indices,
+};