OSDN Git Service

intel: Reformat to the kernel coding style. Welcome to the 8-space future.
authorEric Anholt <eric@anholt.net>
Tue, 6 Oct 2009 19:40:42 +0000 (12:40 -0700)
committerEric Anholt <eric@anholt.net>
Tue, 6 Oct 2009 22:45:06 +0000 (15:45 -0700)
This is done with:
Lindent *.[ch]
perl -pi -e 's|drm_intel_bo \* |drm_intel_bo *|g' *.[ch]
perl -pi -e 's|drm_intel_bufmgr \* |drm_intel_bufmgr *|g' *.[ch]
perl -pi -e 's|drm_intel_bo_gem \* |drm_intel_bo_gem *|g' *.[ch]
perl -pi -e 's|drm_intel_bufmgr_gem \* |drm_intel_bufmgr_gem *|g' *.[ch]
perl -pi -e 's|_fake \* |_fake *|g' *.[ch]
hand-editing to whack indented comments into line and other touchups.

libdrm/intel/intel_atomic.h
libdrm/intel/intel_bufmgr.c
libdrm/intel/intel_bufmgr.h
libdrm/intel/intel_bufmgr_fake.c
libdrm/intel/intel_bufmgr_gem.c
libdrm/intel/intel_bufmgr_priv.h
libdrm/intel/mm.c
libdrm/intel/mm.h

index 562394a..9eb50a1 100644 (file)
@@ -42,7 +42,9 @@
 
 #define HAS_ATOMIC_OPS 1
 
-typedef struct { int atomic; } atomic_t;
+typedef struct {
+       int atomic;
+} atomic_t;
 
 # define atomic_read(x) ((x)->atomic)
 # define atomic_set(x, val) ((x)->atomic = (val))
index 20e59b8..fd5a2e7 100644 (file)
  * Convenience functions for buffer management methods.
  */
 
-drm_intel_bo *
-drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
-                  unsigned long size, unsigned int alignment)
+drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
+                                unsigned long size, unsigned int alignment)
 {
-   return bufmgr->bo_alloc(bufmgr, name, size, alignment);
+       return bufmgr->bo_alloc(bufmgr, name, size, alignment);
 }
 
-drm_intel_bo *
-drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
-                             unsigned long size, unsigned int alignment)
+drm_intel_bo *drm_intel_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
+                                           const char *name,
+                                           unsigned long size,
+                                           unsigned int alignment)
 {
-   return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
+       return bufmgr->bo_alloc_for_render(bufmgr, name, size, alignment);
 }
 
-void
-drm_intel_bo_reference(drm_intel_bo *bo)
+void drm_intel_bo_reference(drm_intel_bo *bo)
 {
-   bo->bufmgr->bo_reference(bo);
+       bo->bufmgr->bo_reference(bo);
 }
 
-void
-drm_intel_bo_unreference(drm_intel_bo *bo)
+void drm_intel_bo_unreference(drm_intel_bo *bo)
 {
-   if (bo == NULL)
-      return;
+       if (bo == NULL)
+               return;
 
-   bo->bufmgr->bo_unreference(bo);
+       bo->bufmgr->bo_unreference(bo);
 }
 
-int
-drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
+int drm_intel_bo_map(drm_intel_bo *buf, int write_enable)
 {
-   return buf->bufmgr->bo_map(buf, write_enable);
+       return buf->bufmgr->bo_map(buf, write_enable);
 }
 
-int
-drm_intel_bo_unmap(drm_intel_bo *buf)
+int drm_intel_bo_unmap(drm_intel_bo *buf)
 {
-   return buf->bufmgr->bo_unmap(buf);
+       return buf->bufmgr->bo_unmap(buf);
 }
 
 int
 drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
                     unsigned long size, const void *data)
 {
-   int ret;
+       int ret;
 
-   if (bo->bufmgr->bo_subdata)
-      return bo->bufmgr->bo_subdata(bo, offset, size, data);
-   if (size == 0 || data == NULL)
-      return 0;
+       if (bo->bufmgr->bo_subdata)
+               return bo->bufmgr->bo_subdata(bo, offset, size, data);
+       if (size == 0 || data == NULL)
+               return 0;
 
-   ret = drm_intel_bo_map(bo, 1);
-   if (ret)
-       return ret;
-   memcpy((unsigned char *)bo->virtual + offset, data, size);
-   drm_intel_bo_unmap(bo);
-   return 0;
+       ret = drm_intel_bo_map(bo, 1);
+       if (ret)
+               return ret;
+       memcpy((unsigned char *)bo->virtual + offset, data, size);
+       drm_intel_bo_unmap(bo);
+       return 0;
 }
 
 int
 drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
                         unsigned long size, void *data)
 {
-   int ret;
-   if (bo->bufmgr->bo_subdata)
-      return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
+       int ret;
+       if (bo->bufmgr->bo_subdata)
+               return bo->bufmgr->bo_get_subdata(bo, offset, size, data);
 
-   if (size == 0 || data == NULL)
-      return 0;
+       if (size == 0 || data == NULL)
+               return 0;
 
-   ret = drm_intel_bo_map(bo, 0);
-   if (ret)
-       return ret;
-   memcpy(data, (unsigned char *)bo->virtual + offset, size);
-   drm_intel_bo_unmap(bo);
-   return 0;
+       ret = drm_intel_bo_map(bo, 0);
+       if (ret)
+               return ret;
+       memcpy(data, (unsigned char *)bo->virtual + offset, size);
+       drm_intel_bo_unmap(bo);
+       return 0;
 }
 
-void
-drm_intel_bo_wait_rendering(drm_intel_bo *bo)
+void drm_intel_bo_wait_rendering(drm_intel_bo *bo)
 {
-   bo->bufmgr->bo_wait_rendering(bo);
+       bo->bufmgr->bo_wait_rendering(bo);
 }
 
-void
-drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
+void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr)
 {
-   bufmgr->destroy(bufmgr);
+       bufmgr->destroy(bufmgr);
 }
 
 int
 drm_intel_bo_exec(drm_intel_bo *bo, int used,
-                 drm_clip_rect_t *cliprects, int num_cliprects,
-                 int DR4)
+                 drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
 {
-   return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
+       return bo->bufmgr->bo_exec(bo, used, cliprects, num_cliprects, DR4);
 }
 
-void
-drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
+void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug)
 {
-   bufmgr->debug = enable_debug;
+       bufmgr->debug = enable_debug;
 }
 
-int
-drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count)
+int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count)
 {
        return bo_array[0]->bufmgr->check_aperture_space(bo_array, count);
 }
 
-int
-drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name)
+int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name)
 {
-    if (bo->bufmgr->bo_flink)
-       return bo->bufmgr->bo_flink(bo, name);
+       if (bo->bufmgr->bo_flink)
+               return bo->bufmgr->bo_flink(bo, name);
 
-    return -ENODEV;
+       return -ENODEV;
 }
 
 int
@@ -174,43 +164,41 @@ drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
                                         read_domains, write_domain);
 }
 
-int
-drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
+int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment)
 {
-    if (bo->bufmgr->bo_pin)
-       return bo->bufmgr->bo_pin(bo, alignment);
+       if (bo->bufmgr->bo_pin)
+               return bo->bufmgr->bo_pin(bo, alignment);
 
-    return -ENODEV;
+       return -ENODEV;
 }
 
-int
-drm_intel_bo_unpin(drm_intel_bo *bo)
+int drm_intel_bo_unpin(drm_intel_bo *bo)
 {
-    if (bo->bufmgr->bo_unpin)
-       return bo->bufmgr->bo_unpin(bo);
+       if (bo->bufmgr->bo_unpin)
+               return bo->bufmgr->bo_unpin(bo);
 
-    return -ENODEV;
+       return -ENODEV;
 }
 
-int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
+int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
                            uint32_t stride)
 {
-    if (bo->bufmgr->bo_set_tiling)
-       return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
+       if (bo->bufmgr->bo_set_tiling)
+               return bo->bufmgr->bo_set_tiling(bo, tiling_mode, stride);
 
-    *tiling_mode = I915_TILING_NONE;
-    return 0;
+       *tiling_mode = I915_TILING_NONE;
+       return 0;
 }
 
-int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
-                           uint32_t *swizzle_mode)
+int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+                           uint32_t * swizzle_mode)
 {
-    if (bo->bufmgr->bo_get_tiling)
-       return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
+       if (bo->bufmgr->bo_get_tiling)
+               return bo->bufmgr->bo_get_tiling(bo, tiling_mode, swizzle_mode);
 
-    *tiling_mode = I915_TILING_NONE;
-    *swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
-    return 0;
+       *tiling_mode = I915_TILING_NONE;
+       *swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+       return 0;
 }
 
 int drm_intel_bo_disable_reuse(drm_intel_bo *bo)
@@ -227,17 +215,14 @@ int drm_intel_bo_busy(drm_intel_bo *bo)
        return 0;
 }
 
-int
-drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
+int drm_intel_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
 {
        return bo->bufmgr->bo_references(bo, target_bo);
 }
 
-int
-drm_intel_get_pipe_from_crtc_id (drm_intel_bufmgr *bufmgr, int crtc_id)
+int drm_intel_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
 {
        if (bufmgr->get_pipe_from_crtc_id)
                return bufmgr->get_pipe_from_crtc_id(bufmgr, crtc_id);
        return -1;
 }
-
index cb7196c..9f07a94 100644 (file)
@@ -40,37 +40,40 @@ typedef struct _drm_intel_bufmgr drm_intel_bufmgr;
 typedef struct _drm_intel_bo drm_intel_bo;
 
 struct _drm_intel_bo {
-    /**
-     * Size in bytes of the buffer object.
-     *
-     * The size may be larger than the size originally requested for the
-     * allocation, such as being aligned to page size.
-     */
-    unsigned long size;
-    /**
-     * Alignment requirement for object
-     *
-     * Used for GTT mapping & pinning the object.
-     */
-    unsigned long align;
-
-    /**
-     * Card virtual address (offset from the beginning of the aperture) for the
-     * object.  Only valid while validated.
-     */
-    unsigned long offset;
-    /**
-     * Virtual address for accessing the buffer data.  Only valid while mapped.
-     */
-    void *virtual;
-
-    /** Buffer manager context associated with this buffer object */
-    drm_intel_bufmgr *bufmgr;
-
-    /**
-     * MM-specific handle for accessing object
-     */
-    int handle;
+       /**
+        * Size in bytes of the buffer object.
+        *
+        * The size may be larger than the size originally requested for the
+        * allocation, such as being aligned to page size.
+        */
+       unsigned long size;
+
+       /**
+        * Alignment requirement for object
+        *
+        * Used for GTT mapping & pinning the object.
+        */
+       unsigned long align;
+
+       /**
+        * Card virtual address (offset from the beginning of the aperture)
+        * for the object.  Only valid while validated.
+        */
+       unsigned long offset;
+
+       /**
+        * Virtual address for accessing the buffer data.  Only valid while
+        * mapped.
+        */
+       void *virtual;
+
+       /** Buffer manager context associated with this buffer object */
+       drm_intel_bufmgr *bufmgr;
+
+       /**
+        * MM-specific handle for accessing object
+        */
+       int handle;
 };
 
 drm_intel_bo *drm_intel_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
@@ -85,28 +88,27 @@ int drm_intel_bo_map(drm_intel_bo *bo, int write_enable);
 int drm_intel_bo_unmap(drm_intel_bo *bo);
 
 int drm_intel_bo_subdata(drm_intel_bo *bo, unsigned long offset,
-                    unsigned long size, const void *data);
+                        unsigned long size, const void *data);
 int drm_intel_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
-                        unsigned long size, void *data);
+                            unsigned long size, void *data);
 void drm_intel_bo_wait_rendering(drm_intel_bo *bo);
 
 void drm_intel_bufmgr_set_debug(drm_intel_bufmgr *bufmgr, int enable_debug);
 void drm_intel_bufmgr_destroy(drm_intel_bufmgr *bufmgr);
 int drm_intel_bo_exec(drm_intel_bo *bo, int used,
-                     drm_clip_rect_t *cliprects, int num_cliprects,
-                     int DR4);
-int drm_intel_bufmgr_check_aperture_space(drm_intel_bo **bo_array, int count);
+                     drm_clip_rect_t * cliprects, int num_cliprects, int DR4);
+int drm_intel_bufmgr_check_aperture_space(drm_intel_bo ** bo_array, int count);
 
 int drm_intel_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
                            drm_intel_bo *target_bo, uint32_t target_offset,
                            uint32_t read_domains, uint32_t write_domain);
 int drm_intel_bo_pin(drm_intel_bo *bo, uint32_t alignment);
 int drm_intel_bo_unpin(drm_intel_bo *bo);
-int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
+int drm_intel_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
                            uint32_t stride);
-int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
-                       uint32_t *swizzle_mode);
-int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t *name);
+int drm_intel_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+                           uint32_t * swizzle_mode);
+int drm_intel_bo_flink(drm_intel_bo *bo, uint32_t * name);
 int drm_intel_bo_busy(drm_intel_bo *bo);
 
 int drm_intel_bo_disable_reuse(drm_intel_bo *bo);
@@ -129,26 +131,29 @@ drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
                                             unsigned long low_offset,
                                             void *low_virtual,
                                             unsigned long size,
-                                            volatile unsigned int *last_dispatch);
+                                            volatile unsigned int
+                                            *last_dispatch);
 void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
-                                            volatile unsigned int *last_dispatch);
+                                            volatile unsigned int
+                                            *last_dispatch);
 void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
-                                            int (*exec)(drm_intel_bo *bo,
-                                                        unsigned int used,
-                                                        void *priv),
+                                            int (*exec) (drm_intel_bo *bo,
+                                                         unsigned int used,
+                                                         void *priv),
                                             void *priv);
 void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
-                                             unsigned int (*emit)(void *priv),
-                                             void (*wait)(unsigned int fence,
-                                                          void *priv),
+                                             unsigned int (*emit) (void *priv),
+                                             void (*wait) (unsigned int fence,
+                                                           void *priv),
                                              void *priv);
 drm_intel_bo *drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
                                             const char *name,
-                                            unsigned long offset, unsigned long size,
-                                            void *virtual);
+                                            unsigned long offset,
+                                            unsigned long size, void *virtual);
 void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
-                                            void (*invalidate_cb)(drm_intel_bo *bo,
-                                                                  void *ptr),
+                                            void (*invalidate_cb) (drm_intel_bo
+                                                                   * bo,
+                                                                   void *ptr),
                                             void *ptr);
 
 void drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr);
@@ -174,8 +179,8 @@ void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
 #define dri_bo_emit_reloc(reloc_bo, read, write, target_offset,                \
                          reloc_offset, target_bo)                      \
        drm_intel_bo_emit_reloc(reloc_bo, reloc_offset,                 \
-                           target_bo, target_offset,                   \
-                           read, write);
+                               target_bo, target_offset,               \
+                               read, write);
 #define dri_bo_pin drm_intel_bo_pin
 #define dri_bo_unpin drm_intel_bo_unpin
 #define dri_bo_get_tiling drm_intel_bo_get_tiling
@@ -196,4 +201,3 @@ void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr);
 /** @{ */
 
 #endif /* INTEL_BUFMGR_H */
-
index 969c03d..f325482 100644 (file)
@@ -54,8 +54,8 @@
 #define ALIGN(value, alignment)  ((value + alignment - 1) & ~(alignment - 1))
 
 #define DBG(...) do {                                  \
-   if (bufmgr_fake->bufmgr.debug)                      \
-      drmMsg(__VA_ARGS__);                             \
+       if (bufmgr_fake->bufmgr.debug)                  \
+               drmMsg(__VA_ARGS__);                    \
 } while (0)
 
 /* Internal flags:
  */
 #define MAX_RELOCS 4096
 
-struct fake_buffer_reloc
-{
-   /** Buffer object that the relocation points at. */
-   drm_intel_bo *target_buf;
-   /** Offset of the relocation entry within reloc_buf. */
-   uint32_t offset;
-   /** Cached value of the offset when we last performed this relocation. */
-   uint32_t last_target_offset;
-   /** Value added to target_buf's offset to get the relocation entry. */
-   uint32_t delta;
-   /** Cache domains the target buffer is read into. */
-   uint32_t read_domains;
-   /** Cache domain the target buffer will have dirty cachelines in. */
-   uint32_t write_domain;
+struct fake_buffer_reloc {
+       /** Buffer object that the relocation points at. */
+       drm_intel_bo *target_buf;
+       /** Offset of the relocation entry within reloc_buf. */
+       uint32_t offset;
+       /**
+        * Cached value of the offset when we last performed this relocation.
+        */
+       uint32_t last_target_offset;
+       /** Value added to target_buf's offset to get the relocation entry. */
+       uint32_t delta;
+       /** Cache domains the target buffer is read into. */
+       uint32_t read_domains;
+       /** Cache domain the target buffer will have dirty cachelines in. */
+       uint32_t write_domain;
 };
 
 struct block {
-   struct block *next, *prev;
-   struct mem_block *mem;      /* BM_MEM_AGP */
-
-   /**
-    * Marks that the block is currently in the aperture and has yet to be
-    * fenced.
-    */
-   unsigned on_hardware:1;
-   /**
-    * Marks that the block is currently fenced (being used by rendering) and
-    * can't be freed until @fence is passed.
-    */
-   unsigned fenced:1;
-
-   /** Fence cookie for the block. */
-   unsigned fence; /* Split to read_fence, write_fence */
-
-   drm_intel_bo *bo;
-   void *virtual;
+       struct block *next, *prev;
+       struct mem_block *mem;  /* BM_MEM_AGP */
+
+       /**
+        * Marks that the block is currently in the aperture and has yet to be
+        * fenced.
+        */
+       unsigned on_hardware:1;
+       /**
+        * Marks that the block is currently fenced (being used by rendering)
+        * and can't be freed until @fence is passed.
+        */
+       unsigned fenced:1;
+
+       /** Fence cookie for the block. */
+       unsigned fence;         /* Split to read_fence, write_fence */
+
+       drm_intel_bo *bo;
+       void *virtual;
 };
 
 typedef struct _bufmgr_fake {
-   drm_intel_bufmgr bufmgr;
-
-   pthread_mutex_t lock;
-
-   unsigned long low_offset;
-   unsigned long size;
-   void *virtual;
-
-   struct mem_block *heap;
-
-   unsigned buf_nr;            /* for generating ids */
-
-   /**
-    * List of blocks which are currently in the GART but haven't been
-    * fenced yet.
-    */
-   struct block on_hardware;
-   /**
-    * List of blocks which are in the GART and have an active fence on them.
-    */
-   struct block fenced;
-   /**
-    * List of blocks which have an expired fence and are ready to be evicted.
-    */
-   struct block lru;
-
-   unsigned int last_fence;
-
-   unsigned fail:1;
-   unsigned need_fence:1;
-   int thrashing;
-
-   /**
-    * Driver callback to emit a fence, returning the cookie.
-    *
-    * This allows the driver to hook in a replacement for the DRM usage in
-    * bufmgr_fake.
-    *
-    * Currently, this also requires that a write flush be emitted before
-    * emitting the fence, but this should change.
-    */
-   unsigned int (*fence_emit)(void *private);
-   /** Driver callback to wait for a fence cookie to have passed. */
-   void (*fence_wait)(unsigned int fence, void *private);
-   void *fence_priv;
-
-   /**
-    * Driver callback to execute a buffer.
-    *
-    * This allows the driver to hook in a replacement for the DRM usage in
-    * bufmgr_fake.
-    */
-   int (*exec)(drm_intel_bo *bo, unsigned int used, void *priv);
-   void *exec_priv;
-
-   /** Driver-supplied argument to driver callbacks */
-   void *driver_priv;
-   /* Pointer to kernel-updated sarea data for the last completed user irq */
-   volatile int *last_dispatch;
-
-   int fd;
-
-   int debug;
-
-   int performed_rendering;
+       drm_intel_bufmgr bufmgr;
+
+       pthread_mutex_t lock;
+
+       unsigned long low_offset;
+       unsigned long size;
+       void *virtual;
+
+       struct mem_block *heap;
+
+       unsigned buf_nr;        /* for generating ids */
+
+       /**
+        * List of blocks which are currently in the GART but haven't been
+        * fenced yet.
+        */
+       struct block on_hardware;
+       /**
+        * List of blocks which are in the GART and have an active fence on
+        * them.
+        */
+       struct block fenced;
+       /**
+        * List of blocks which have an expired fence and are ready to be
+        * evicted.
+        */
+       struct block lru;
+
+       unsigned int last_fence;
+
+       unsigned fail:1;
+       unsigned need_fence:1;
+       int thrashing;
+
+       /**
+        * Driver callback to emit a fence, returning the cookie.
+        *
+        * This allows the driver to hook in a replacement for the DRM usage in
+        * bufmgr_fake.
+        *
+        * Currently, this also requires that a write flush be emitted before
+        * emitting the fence, but this should change.
+        */
+       unsigned int (*fence_emit) (void *private);
+       /** Driver callback to wait for a fence cookie to have passed. */
+       void (*fence_wait) (unsigned int fence, void *private);
+       void *fence_priv;
+
+       /**
+        * Driver callback to execute a buffer.
+        *
+        * This allows the driver to hook in a replacement for the DRM usage in
+        * bufmgr_fake.
+        */
+       int (*exec) (drm_intel_bo *bo, unsigned int used, void *priv);
+       void *exec_priv;
+
+       /** Driver-supplied argument to driver callbacks */
+       void *driver_priv;
+       /**
+        * Pointer to kernel-updated sarea data for the last completed user irq
+        */
+       volatile int *last_dispatch;
+
+       int fd;
+
+       int debug;
+
+       int performed_rendering;
 } drm_intel_bufmgr_fake;
 
 typedef struct _drm_intel_bo_fake {
-   drm_intel_bo bo;
-
-   unsigned id;                        /* debug only */
-   const char *name;
-
-   unsigned dirty:1;
-   /** has the card written to this buffer - we make need to copy it back */
-   unsigned card_dirty:1;
-   unsigned int refcount;
-   /* Flags may consist of any of the DRM_BO flags, plus
-    * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the first two
-    * driver private flags.
-    */
-   uint64_t flags;
-   /** Cache domains the target buffer is read into. */
-   uint32_t read_domains;
-   /** Cache domain the target buffer will have dirty cachelines in. */
-   uint32_t write_domain;
-
-   unsigned int alignment;
-   int is_static, validated;
-   unsigned int map_count;
-
-   /** relocation list */
-   struct fake_buffer_reloc *relocs;
-   int nr_relocs;
-   /**
-    * Total size of the target_bos of this buffer.
-    *
-    * Used for estimation in check_aperture.
-    */
-   unsigned int child_size;
-
-   struct block *block;
-   void *backing_store;
-   void (*invalidate_cb)(drm_intel_bo *bo, void *ptr);
-   void *invalidate_ptr;
+       drm_intel_bo bo;
+
+       unsigned id;            /* debug only */
+       const char *name;
+
+       unsigned dirty:1;
+       /**
+        * has the card written to this buffer - we make need to copy it back
+        */
+       unsigned card_dirty:1;
+       unsigned int refcount;
+       /* Flags may consist of any of the DRM_BO flags, plus
+        * DRM_BO_NO_BACKING_STORE and BM_NO_FENCE_SUBDATA, which are the
+        * first two driver private flags.
+        */
+       uint64_t flags;
+       /** Cache domains the target buffer is read into. */
+       uint32_t read_domains;
+       /** Cache domain the target buffer will have dirty cachelines in. */
+       uint32_t write_domain;
+
+       unsigned int alignment;
+       int is_static, validated;
+       unsigned int map_count;
+
+       /** relocation list */
+       struct fake_buffer_reloc *relocs;
+       int nr_relocs;
+       /**
+        * Total size of the target_bos of this buffer.
+        *
+        * Used for estimation in check_aperture.
+        */
+       unsigned int child_size;
+
+       struct block *block;
+       void *backing_store;
+       void (*invalidate_cb) (drm_intel_bo *bo, void *ptr);
+       void *invalidate_ptr;
 } drm_intel_bo_fake;
 
 static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
@@ -223,185 +230,190 @@ static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
 
 #define MAXFENCE 0x7fffffff
 
-static int FENCE_LTE( unsigned a, unsigned b )
+static int
+FENCE_LTE(unsigned a, unsigned b)
 {
-   if (a == b)
-      return 1;
+       if (a == b)
+               return 1;
 
-   if (a < b && b - a < (1<<24))
-      return 1;
+       if (a < b && b - a < (1 << 24))
+               return 1;
 
-   if (a > b && MAXFENCE - a + b < (1<<24))
-      return 1;
+       if (a > b && MAXFENCE - a + b < (1 << 24))
+               return 1;
 
-   return 0;
+       return 0;
 }
 
-void drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
-                                             unsigned int (*emit)(void *priv),
-                                             void (*wait)(unsigned int fence,
-                                                          void *priv),
-                                             void *priv)
+void
+drm_intel_bufmgr_fake_set_fence_callback(drm_intel_bufmgr *bufmgr,
+                                        unsigned int (*emit) (void *priv),
+                                        void (*wait) (unsigned int fence,
+                                                      void *priv),
+                                        void *priv)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
+       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
 
-   bufmgr_fake->fence_emit = emit;
-   bufmgr_fake->fence_wait = wait;
-   bufmgr_fake->fence_priv = priv;
+       bufmgr_fake->fence_emit = emit;
+       bufmgr_fake->fence_wait = wait;
+       bufmgr_fake->fence_priv = priv;
 }
 
 static unsigned int
 _fence_emit_internal(drm_intel_bufmgr_fake *bufmgr_fake)
 {
-   struct drm_i915_irq_emit ie;
-   int ret, seq = 1;
-
-   if (bufmgr_fake->fence_emit != NULL) {
-      seq = bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
-      return seq;
-   }
-
-   ie.irq_seq = &seq;
-   ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
-                            &ie, sizeof(ie));
-   if (ret) {
-      drmMsg("%s: drm_i915_irq_emit: %d\n", __FUNCTION__, ret);
-      abort();
-   }
-
-   DBG("emit 0x%08x\n", seq);
-   return seq;
+       struct drm_i915_irq_emit ie;
+       int ret, seq = 1;
+
+       if (bufmgr_fake->fence_emit != NULL) {
+               seq = bufmgr_fake->fence_emit(bufmgr_fake->fence_priv);
+               return seq;
+       }
+
+       ie.irq_seq = &seq;
+       ret = drmCommandWriteRead(bufmgr_fake->fd, DRM_I915_IRQ_EMIT,
+                                 &ie, sizeof(ie));
+       if (ret) {
+               drmMsg("%s: drm_i915_irq_emit: %d\n", __FUNCTION__, ret);
+               abort();
+       }
+
+       DBG("emit 0x%08x\n", seq);
+       return seq;
 }
 
 static void
 _fence_wait_internal(drm_intel_bufmgr_fake *bufmgr_fake, int seq)
 {
-   struct drm_i915_irq_wait iw;
-   int hw_seq, busy_count = 0;
-   int ret;
-   int kernel_lied;
-
-   if (bufmgr_fake->fence_wait != NULL) {
-      bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv);
-      clear_fenced(bufmgr_fake, seq);
-      return;
-   }
-
-   DBG("wait 0x%08x\n", iw.irq_seq);
-
-   iw.irq_seq = seq;
-
-   /* The kernel IRQ_WAIT implementation is all sorts of broken.
-    * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit unsigned
-    *    range.
-    * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
-    *    signed range.
-    * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
-    *    signed range.
-    * 4) It returns -EBUSY in 3 seconds even if the hardware is still
-    *    successfully chewing through buffers.
-    *
-    * Assume that in userland we treat sequence numbers as ints, which makes
-    * some of the comparisons convenient, since the sequence numbers are
-    * all postive signed integers.
-    *
-    * From this we get several cases we need to handle.  Here's a timeline.
-    * 0x2   0x7                                         0x7ffffff8   0x7ffffffd
-    *   |    |                                                   |    |
-    * -------------------------------------------------------------------
-    *
-    * A) Normal wait for hw to catch up
-    * hw_seq seq
-    *   |    |
-    * -------------------------------------------------------------------
-    * seq - hw_seq = 5.  If we call IRQ_WAIT, it will wait for hw to catch up.
-    *
-    * B) Normal wait for a sequence number that's already passed.
-    * seq    hw_seq
-    *   |    |
-    * -------------------------------------------------------------------
-    * seq - hw_seq = -5.  If we call IRQ_WAIT, it returns 0 quickly.
-    *
-    * C) Hardware has already wrapped around ahead of us
-    * hw_seq                                                         seq
-    *   |                                                             |
-    * -------------------------------------------------------------------
-    * seq - hw_seq = 0x80000000 - 5.  If we called IRQ_WAIT, it would wait
-    * for hw_seq >= seq, which may never occur.  Thus, we want to catch this
-    * in userland and return 0.
-    *
-    * D) We've wrapped around ahead of the hardware.
-    * seq                                                           hw_seq
-    *   |                                                             |
-    * -------------------------------------------------------------------
-    * seq - hw_seq = -(0x80000000 - 5).  If we called IRQ_WAIT, it would return
-    * 0 quickly because hw_seq >= seq, even though the hardware isn't caught up.
-    * Thus, we need to catch this early return in userland and bother the
-    * kernel until the hardware really does catch up.
-    *
-    * E) Hardware might wrap after we test in userland.
-    *                                                         hw_seq  seq
-    *                                                            |    |
-    * -------------------------------------------------------------------
-    * seq - hw_seq = 5.  If we call IRQ_WAIT, it will likely see seq >= hw_seq
-    * and wait.  However, suppose hw_seq wraps before we make it into the
-    * kernel.  The kernel sees hw_seq >= seq and waits for 3 seconds then
-    * returns -EBUSY.  This is case C).  We should catch this and then return
-    * successfully.
-    *
-    * F) Hardware might take a long time on a buffer.
-    * hw_seq seq
-    *   |    |
-    * -------------------------------------------------------------------
-    * seq - hw_seq = 5.  If we call IRQ_WAIT, if sequence 2 through 5 take too
-    * long, it will return -EBUSY.  Batchbuffers in the gltestperf demo were
-    * seen to take up to 7 seconds.  We should catch early -EBUSY return
-    * and keep trying.
-    */
-
-   do {
-      /* Keep a copy of last_dispatch so that if the wait -EBUSYs because the
-       * hardware didn't catch up in 3 seconds, we can see if it at least made
-       * progress and retry.
-       */
-      hw_seq = *bufmgr_fake->last_dispatch;
-
-      /* Catch case C */
-      if (seq - hw_seq > 0x40000000)
-        return;
-
-      ret = drmCommandWrite(bufmgr_fake->fd, DRM_I915_IRQ_WAIT,
-                           &iw, sizeof(iw));
-      /* Catch case D */
-      kernel_lied = (ret == 0) && (seq - *bufmgr_fake->last_dispatch <
-                                  -0x40000000);
-
-      /* Catch case E */
-      if (ret == -EBUSY && (seq - *bufmgr_fake->last_dispatch > 0x40000000))
-        ret = 0;
-
-      /* Catch case F: Allow up to 15 seconds chewing on one buffer. */
-      if ((ret == -EBUSY) && (hw_seq != *bufmgr_fake->last_dispatch))
-        busy_count = 0;
-      else
-        busy_count++;
-   } while (kernel_lied || ret == -EAGAIN || ret == -EINTR ||
-           (ret == -EBUSY && busy_count < 5));
-
-   if (ret != 0) {
-      drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__, __LINE__,
-            strerror(-ret));
-      abort();
-   }
-   clear_fenced(bufmgr_fake, seq);
+       struct drm_i915_irq_wait iw;
+       int hw_seq, busy_count = 0;
+       int ret;
+       int kernel_lied;
+
+       if (bufmgr_fake->fence_wait != NULL) {
+               bufmgr_fake->fence_wait(seq, bufmgr_fake->fence_priv);
+               clear_fenced(bufmgr_fake, seq);
+               return;
+       }
+
+       DBG("wait 0x%08x\n", iw.irq_seq);
+
+       iw.irq_seq = seq;
+
+       /* The kernel IRQ_WAIT implementation is all sorts of broken.
+        * 1) It returns 1 to 0x7fffffff instead of using the full 32-bit
+        *    unsigned range.
+        * 2) It returns 0 if hw_seq >= seq, not seq - hw_seq < 0 on the 32-bit
+        *    signed range.
+        * 3) It waits if seq < hw_seq, not seq - hw_seq > 0 on the 32-bit
+        *    signed range.
+        * 4) It returns -EBUSY in 3 seconds even if the hardware is still
+        *    successfully chewing through buffers.
+        *
+        * Assume that in userland we treat sequence numbers as ints, which
+        * makes some of the comparisons convenient, since the sequence
+        * numbers are all postive signed integers.
+        *
+        * From this we get several cases we need to handle.  Here's a timeline.
+        * 0x2   0x7                                    0x7ffffff8   0x7ffffffd
+        *   |    |                                             |    |
+        * ------------------------------------------------------------
+        *
+        * A) Normal wait for hw to catch up
+        * hw_seq seq
+        *   |    |
+        * ------------------------------------------------------------
+        * seq - hw_seq = 5.  If we call IRQ_WAIT, it will wait for hw to
+        * catch up.
+        *
+        * B) Normal wait for a sequence number that's already passed.
+        * seq    hw_seq
+        *   |    |
+        * ------------------------------------------------------------
+        * seq - hw_seq = -5.  If we call IRQ_WAIT, it returns 0 quickly.
+        *
+        * C) Hardware has already wrapped around ahead of us
+        * hw_seq                                                    seq
+        *   |                                                       |
+        * ------------------------------------------------------------
+        * seq - hw_seq = 0x80000000 - 5.  If we called IRQ_WAIT, it would wait
+        * for hw_seq >= seq, which may never occur.  Thus, we want to catch
+        * this in userland and return 0.
+        *
+        * D) We've wrapped around ahead of the hardware.
+        * seq                                                      hw_seq
+        *   |                                                       |
+        * ------------------------------------------------------------
+        * seq - hw_seq = -(0x80000000 - 5).  If we called IRQ_WAIT, it would
+        * return 0 quickly because hw_seq >= seq, even though the hardware
+        * isn't caught up. Thus, we need to catch this early return in
+        * userland and bother the kernel until the hardware really does
+        * catch up.
+        *
+        * E) Hardware might wrap after we test in userland.
+        *                                                  hw_seq  seq
+        *                                                      |    |
+        * ------------------------------------------------------------
+        * seq - hw_seq = 5.  If we call IRQ_WAIT, it will likely see seq >=
+        * hw_seq and wait.  However, suppose hw_seq wraps before we make it
+        * into the kernel.  The kernel sees hw_seq >= seq and waits for 3
+        * seconds then returns -EBUSY.  This is case C).  We should catch
+        * this and then return successfully.
+        *
+        * F) Hardware might take a long time on a buffer.
+        * hw_seq seq
+        *   |    |
+        * -------------------------------------------------------------------
+        * seq - hw_seq = 5.  If we call IRQ_WAIT, if sequence 2 through 5
+        * take too long, it will return -EBUSY.  Batchbuffers in the
+        * gltestperf demo were seen to take up to 7 seconds.  We should
+        * catch early -EBUSY return and keep trying.
+        */
+
+       do {
+               /* Keep a copy of last_dispatch so that if the wait -EBUSYs
+                * because the hardware didn't catch up in 3 seconds, we can
+                * see if it at least made progress and retry.
+                */
+               hw_seq = *bufmgr_fake->last_dispatch;
+
+               /* Catch case C */
+               if (seq - hw_seq > 0x40000000)
+                       return;
+
+               ret = drmCommandWrite(bufmgr_fake->fd, DRM_I915_IRQ_WAIT,
+                                     &iw, sizeof(iw));
+               /* Catch case D */
+               kernel_lied = (ret == 0) && (seq - *bufmgr_fake->last_dispatch <
+                                            -0x40000000);
+
+               /* Catch case E */
+               if (ret == -EBUSY
+                   && (seq - *bufmgr_fake->last_dispatch > 0x40000000))
+                       ret = 0;
+
+               /* Catch case F: Allow up to 15 seconds chewing on one buffer. */
+               if ((ret == -EBUSY) && (hw_seq != *bufmgr_fake->last_dispatch))
+                       busy_count = 0;
+               else
+                       busy_count++;
+       } while (kernel_lied || ret == -EAGAIN || ret == -EINTR ||
+                (ret == -EBUSY && busy_count < 5));
+
+       if (ret != 0) {
+               drmMsg("%s:%d: Error waiting for fence: %s.\n", __FILE__,
+                      __LINE__, strerror(-ret));
+               abort();
+       }
+       clear_fenced(bufmgr_fake, seq);
 }
 
 static int
 _fence_test(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
 {
-   /* Slight problem with wrap-around:
-    */
-   return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
+       /* Slight problem with wrap-around:
+        */
+       return fence == 0 || FENCE_LTE(fence, bufmgr_fake->last_fence);
 }
 
 /**
@@ -410,293 +422,300 @@ _fence_test(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
 static int
 alloc_block(drm_intel_bo *bo)
 {
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-   drm_intel_bufmgr_fake *bufmgr_fake= (drm_intel_bufmgr_fake *)bo->bufmgr;
-   struct block *block = (struct block *)calloc(sizeof *block, 1);
-   unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
-   unsigned int sz;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       struct block *block = (struct block *)calloc(sizeof *block, 1);
+       unsigned int align_log2 = ffs(bo_fake->alignment) - 1;
+       unsigned int sz;
 
-   if (!block)
-      return 1;
+       if (!block)
+               return 1;
 
-   sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
+       sz = (bo->size + bo_fake->alignment - 1) & ~(bo_fake->alignment - 1);
 
-   block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
-   if (!block->mem) {
-      free(block);
-      return 0;
-   }
+       block->mem = mmAllocMem(bufmgr_fake->heap, sz, align_log2, 0);
+       if (!block->mem) {
+               free(block);
+               return 0;
+       }
 
-   DRMINITLISTHEAD(block);
+       DRMINITLISTHEAD(block);
 
-   /* Insert at head or at tail???   
-    */
-   DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+       /* Insert at head or at tail??? */
+       DRMLISTADDTAIL(block, &bufmgr_fake->lru);
 
-   block->virtual = (uint8_t *)bufmgr_fake->virtual +
-      block->mem->ofs - bufmgr_fake->low_offset;
-   block->bo = bo;
+       block->virtual = (uint8_t *) bufmgr_fake->virtual +
+           block->mem->ofs - bufmgr_fake->low_offset;
+       block->bo = bo;
 
-   bo_fake->block = block;
+       bo_fake->block = block;
 
-   return 1;
+       return 1;
 }
 
 /* Release the card storage associated with buf:
  */
-static void free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
-                      int skip_dirty_copy)
+static void
+free_block(drm_intel_bufmgr_fake *bufmgr_fake, struct block *block,
+          int skip_dirty_copy)
 {
-   drm_intel_bo_fake *bo_fake;
-   DBG("free block %p %08x %d %d\n", block, block->mem->ofs, block->on_hardware, block->fenced);
-
-   if (!block)
-      return;
-
-   bo_fake = (drm_intel_bo_fake *)block->bo;
-
-   if (bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE))
-      skip_dirty_copy = 1;
-
-   if (!skip_dirty_copy && (bo_fake->card_dirty == 1)) {
-     memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
-     bo_fake->card_dirty = 0;
-     bo_fake->dirty = 1;
-   }
-
-   if (block->on_hardware) {
-      block->bo = NULL;
-   }
-   else if (block->fenced) {
-      block->bo = NULL;
-   }
-   else {
-      DBG("    - free immediately\n");
-      DRMLISTDEL(block);
-
-      mmFreeMem(block->mem);
-      free(block);
-   }
+       drm_intel_bo_fake *bo_fake;
+       DBG("free block %p %08x %d %d\n", block, block->mem->ofs,
+           block->on_hardware, block->fenced);
+
+       if (!block)
+               return;
+
+       bo_fake = (drm_intel_bo_fake *) block->bo;
+
+       if (bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE))
+               skip_dirty_copy = 1;
+
+       if (!skip_dirty_copy && (bo_fake->card_dirty == 1)) {
+               memcpy(bo_fake->backing_store, block->virtual, block->bo->size);
+               bo_fake->card_dirty = 0;
+               bo_fake->dirty = 1;
+       }
+
+       if (block->on_hardware) {
+               block->bo = NULL;
+       } else if (block->fenced) {
+               block->bo = NULL;
+       } else {
+               DBG("    - free immediately\n");
+               DRMLISTDEL(block);
+
+               mmFreeMem(block->mem);
+               free(block);
+       }
 }
 
 static void
 alloc_backing_store(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-   assert(!bo_fake->backing_store);
-   assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+       assert(!bo_fake->backing_store);
+       assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
 
-   bo_fake->backing_store = malloc(bo->size);
+       bo_fake->backing_store = malloc(bo->size);
 
-   DBG("alloc_backing - buf %d %p %d\n", bo_fake->id, bo_fake->backing_store, bo->size);
-   assert(bo_fake->backing_store);
+       DBG("alloc_backing - buf %d %p %d\n", bo_fake->id,
+           bo_fake->backing_store, bo->size);
+       assert(bo_fake->backing_store);
 }
 
 static void
 free_backing_store(drm_intel_bo *bo)
 {
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
 
-   if (bo_fake->backing_store) {
-      assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
-      free(bo_fake->backing_store);
-      bo_fake->backing_store = NULL;
-   }
+       if (bo_fake->backing_store) {
+               assert(!(bo_fake->flags & (BM_PINNED | BM_NO_BACKING_STORE)));
+               free(bo_fake->backing_store);
+               bo_fake->backing_store = NULL;
+       }
 }
 
 static void
 set_dirty(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
 
-   if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL)
-      bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
+       if (bo_fake->flags & BM_NO_BACKING_STORE
+           && bo_fake->invalidate_cb != NULL)
+               bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
 
-   assert(!(bo_fake->flags & BM_PINNED));
+       assert(!(bo_fake->flags & BM_PINNED));
 
-   DBG("set_dirty - buf %d\n", bo_fake->id);
-   bo_fake->dirty = 1;
+       DBG("set_dirty - buf %d\n", bo_fake->id);
+       bo_fake->dirty = 1;
 }
 
 static int
 evict_lru(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int max_fence)
 {
-   struct block *block, *tmp;
+       struct block *block, *tmp;
 
-   DBG("%s\n", __FUNCTION__);
+       DBG("%s\n", __FUNCTION__);
 
-   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
-      drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
+       DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+               drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
 
-      if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
-        continue;
+               if (bo_fake != NULL && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+                       continue;
 
-      if (block->fence && max_fence && !FENCE_LTE(block->fence, max_fence))
-        return 0;
+               if (block->fence && max_fence && !FENCE_LTE(block->fence,
+                                                           max_fence))
+                       return 0;
 
-      set_dirty(&bo_fake->bo);
-      bo_fake->block = NULL;
+               set_dirty(&bo_fake->bo);
+               bo_fake->block = NULL;
 
-      free_block(bufmgr_fake, block, 0);
-      return 1;
-   }
+               free_block(bufmgr_fake, block, 0);
+               return 1;
+       }
 
-   return 0;
+       return 0;
 }
 
 static int
 evict_mru(drm_intel_bufmgr_fake *bufmgr_fake)
 {
-   struct block *block, *tmp;
+       struct block *block, *tmp;
 
-   DBG("%s\n", __FUNCTION__);
+       DBG("%s\n", __FUNCTION__);
 
-   DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
-      drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
+       DRMLISTFOREACHSAFEREVERSE(block, tmp, &bufmgr_fake->lru) {
+               drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
 
-      if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
-        continue;
+               if (bo_fake && (bo_fake->flags & BM_NO_FENCE_SUBDATA))
+                       continue;
 
-      set_dirty(&bo_fake->bo);
-      bo_fake->block = NULL;
+               set_dirty(&bo_fake->bo);
+               bo_fake->block = NULL;
 
-      free_block(bufmgr_fake, block, 0);
-      return 1;
-   }
+               free_block(bufmgr_fake, block, 0);
+               return 1;
+       }
 
-   return 0;
+       return 0;
 }
 
 /**
  * Removes all objects from the fenced list older than the given fence.
  */
-static int clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake,
-                       unsigned int fence_cookie)
+static int
+clear_fenced(drm_intel_bufmgr_fake *bufmgr_fake, unsigned int fence_cookie)
 {
-   struct block *block, *tmp;
-   int ret = 0;
-
-   bufmgr_fake->last_fence = fence_cookie;
-   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
-      assert(block->fenced);
-
-      if (_fence_test(bufmgr_fake, block->fence)) {
-
-        block->fenced = 0;
-
-        if (!block->bo) {
-           DBG("delayed free: offset %x sz %x\n",
-               block->mem->ofs, block->mem->size);
-           DRMLISTDEL(block);
-           mmFreeMem(block->mem);
-           free(block);
-        }
-        else {
-           DBG("return to lru: offset %x sz %x\n",
-               block->mem->ofs, block->mem->size);
-           DRMLISTDEL(block);
-           DRMLISTADDTAIL(block, &bufmgr_fake->lru);
-        }
-
-        ret = 1;
-      }
-      else {
-        /* Blocks are ordered by fence, so if one fails, all from
-         * here will fail also:
-         */
-       DBG("fence not passed: offset %x sz %x %d %d \n",
-           block->mem->ofs, block->mem->size, block->fence, bufmgr_fake->last_fence);
-        break;
-      }
-   }
-
-   DBG("%s: %d\n", __FUNCTION__, ret);
-   return ret;
+       struct block *block, *tmp;
+       int ret = 0;
+
+       bufmgr_fake->last_fence = fence_cookie;
+       DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->fenced) {
+               assert(block->fenced);
+
+               if (_fence_test(bufmgr_fake, block->fence)) {
+
+                       block->fenced = 0;
+
+                       if (!block->bo) {
+                               DBG("delayed free: offset %x sz %x\n",
+                                   block->mem->ofs, block->mem->size);
+                               DRMLISTDEL(block);
+                               mmFreeMem(block->mem);
+                               free(block);
+                       } else {
+                               DBG("return to lru: offset %x sz %x\n",
+                                   block->mem->ofs, block->mem->size);
+                               DRMLISTDEL(block);
+                               DRMLISTADDTAIL(block, &bufmgr_fake->lru);
+                       }
+
+                       ret = 1;
+               } else {
+                       /* Blocks are ordered by fence, so if one fails, all
+                        * from here will fail also:
+                        */
+                       DBG("fence not passed: offset %x sz %x %d %d \n",
+                           block->mem->ofs, block->mem->size, block->fence,
+                           bufmgr_fake->last_fence);
+                       break;
+               }
+       }
+
+       DBG("%s: %d\n", __FUNCTION__, ret);
+       return ret;
 }
 
-static void fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
+static void
+fence_blocks(drm_intel_bufmgr_fake *bufmgr_fake, unsigned fence)
 {
-   struct block *block, *tmp;
+       struct block *block, *tmp;
 
-   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
-      DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n", block,
-         block->mem->size, block->mem->ofs, block->bo, fence);
-      block->fence = fence;
+       DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+               DBG("Fence block %p (sz 0x%x ofs %x buf %p) with fence %d\n",
+                   block, block->mem->size, block->mem->ofs, block->bo, fence);
+               block->fence = fence;
 
-      block->on_hardware = 0;
-      block->fenced = 1;
+               block->on_hardware = 0;
+               block->fenced = 1;
 
-      /* Move to tail of pending list here
-       */
-      DRMLISTDEL(block);
-      DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
-   }
+               /* Move to tail of pending list here
+                */
+               DRMLISTDEL(block);
+               DRMLISTADDTAIL(block, &bufmgr_fake->fenced);
+       }
 
-   assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+       assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
 }
 
-static int evict_and_alloc_block(drm_intel_bo *bo)
+static int
+evict_and_alloc_block(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-
-   assert(bo_fake->block == NULL);
-
-   /* Search for already free memory:
-    */
-   if (alloc_block(bo))
-      return 1;
-
-   /* If we're not thrashing, allow lru eviction to dig deeper into
-    * recently used textures.  We'll probably be thrashing soon:
-    */
-   if (!bufmgr_fake->thrashing) {
-      while (evict_lru(bufmgr_fake, 0))
-        if (alloc_block(bo))
-           return 1;
-   }
-
-   /* Keep thrashing counter alive?
-    */
-   if (bufmgr_fake->thrashing)
-      bufmgr_fake->thrashing = 20;
-
-   /* Wait on any already pending fences - here we are waiting for any
-    * freed memory that has been submitted to hardware and fenced to
-    * become available:
-    */
-   while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
-      uint32_t fence = bufmgr_fake->fenced.next->fence;
-      _fence_wait_internal(bufmgr_fake, fence);
-
-      if (alloc_block(bo))
-        return 1;
-   }
-
-   if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
-      while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
-        uint32_t fence = bufmgr_fake->fenced.next->fence;
-        _fence_wait_internal(bufmgr_fake, fence);
-      }
-
-      if (!bufmgr_fake->thrashing) {
-        DBG("thrashing\n");
-      }
-      bufmgr_fake->thrashing = 20;
-
-      if (alloc_block(bo))
-        return 1;
-   }
-
-   while (evict_mru(bufmgr_fake))
-      if (alloc_block(bo))
-        return 1;
-
-   DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size);
-
-   return 0;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+       assert(bo_fake->block == NULL);
+
+       /* Search for already free memory:
+        */
+       if (alloc_block(bo))
+               return 1;
+
+       /* If we're not thrashing, allow lru eviction to dig deeper into
+        * recently used textures.  We'll probably be thrashing soon:
+        */
+       if (!bufmgr_fake->thrashing) {
+               while (evict_lru(bufmgr_fake, 0))
+                       if (alloc_block(bo))
+                               return 1;
+       }
+
+       /* Keep thrashing counter alive?
+        */
+       if (bufmgr_fake->thrashing)
+               bufmgr_fake->thrashing = 20;
+
+       /* Wait on any already pending fences - here we are waiting for any
+        * freed memory that has been submitted to hardware and fenced to
+        * become available:
+        */
+       while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+               uint32_t fence = bufmgr_fake->fenced.next->fence;
+               _fence_wait_internal(bufmgr_fake, fence);
+
+               if (alloc_block(bo))
+                       return 1;
+       }
+
+       if (!DRMLISTEMPTY(&bufmgr_fake->on_hardware)) {
+               while (!DRMLISTEMPTY(&bufmgr_fake->fenced)) {
+                       uint32_t fence = bufmgr_fake->fenced.next->fence;
+                       _fence_wait_internal(bufmgr_fake, fence);
+               }
+
+               if (!bufmgr_fake->thrashing) {
+                       DBG("thrashing\n");
+               }
+               bufmgr_fake->thrashing = 20;
+
+               if (alloc_block(bo))
+                       return 1;
+       }
+
+       while (evict_mru(bufmgr_fake))
+               if (alloc_block(bo))
+                       return 1;
+
+       DBG("%s 0x%x bytes failed\n", __FUNCTION__, bo->size);
+
+       return 0;
 }
 
 /***********************************************************************
@@ -709,10 +728,10 @@ static int evict_and_alloc_block(drm_intel_bo *bo)
 static void
 drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
 {
-   unsigned int cookie;
+       unsigned int cookie;
 
-   cookie = _fence_emit_internal(bufmgr_fake);
-   _fence_wait_internal(bufmgr_fake, cookie);
+       cookie = _fence_emit_internal(bufmgr_fake);
+       _fence_wait_internal(bufmgr_fake, cookie);
 }
 
 /**
@@ -724,23 +743,25 @@ drm_intel_bufmgr_fake_wait_idle(drm_intel_bufmgr_fake *bufmgr_fake)
 static void
 drm_intel_fake_bo_wait_rendering_locked(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
 
-   if (bo_fake->block == NULL || !bo_fake->block->fenced)
-      return;
+       if (bo_fake->block == NULL || !bo_fake->block->fenced)
+               return;
 
-   _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
+       _fence_wait_internal(bufmgr_fake, bo_fake->block->fence);
 }
 
 static void
 drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
 
-   pthread_mutex_lock(&bufmgr_fake->lock);
-   drm_intel_fake_bo_wait_rendering_locked(bo);
-   pthread_mutex_unlock(&bufmgr_fake->lock);
+       pthread_mutex_lock(&bufmgr_fake->lock);
+       drm_intel_fake_bo_wait_rendering_locked(bo);
+       pthread_mutex_unlock(&bufmgr_fake->lock);
 }
 
 /* Specifically ignore texture memory sharing.
@@ -750,188 +771,198 @@ drm_intel_fake_bo_wait_rendering(drm_intel_bo *bo)
 void
 drm_intel_bufmgr_fake_contended_lock_take(drm_intel_bufmgr *bufmgr)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
-   struct block *block, *tmp;
+       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+       struct block *block, *tmp;
 
-   pthread_mutex_lock(&bufmgr_fake->lock);
+       pthread_mutex_lock(&bufmgr_fake->lock);
 
-   bufmgr_fake->need_fence = 1;
-   bufmgr_fake->fail = 0;
+       bufmgr_fake->need_fence = 1;
+       bufmgr_fake->fail = 0;
 
-   /* Wait for hardware idle.  We don't know where acceleration has been
-    * happening, so we'll need to wait anyway before letting anything get
-    * put on the card again.
-    */
-   drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+       /* Wait for hardware idle.  We don't know where acceleration has been
+        * happening, so we'll need to wait anyway before letting anything get
+        * put on the card again.
+        */
+       drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
 
-   /* Check that we hadn't released the lock without having fenced the last
-    * set of buffers.
-    */
-   assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
-   assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+       /* Check that we hadn't released the lock without having fenced the last
+        * set of buffers.
+        */
+       assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+       assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
 
-   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
-      assert(_fence_test(bufmgr_fake, block->fence));
-      set_dirty(block->bo);
-   }
+       DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+               assert(_fence_test(bufmgr_fake, block->fence));
+               set_dirty(block->bo);
+       }
 
-   pthread_mutex_unlock(&bufmgr_fake->lock);
+       pthread_mutex_unlock(&bufmgr_fake->lock);
 }
 
 static drm_intel_bo *
-drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
-                       unsigned long size, unsigned int alignment)
+drm_intel_fake_bo_alloc(drm_intel_bufmgr *bufmgr,
+                       const char *name,
+                       unsigned long size,
+                       unsigned int alignment)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake;
-   drm_intel_bo_fake *bo_fake;
+       drm_intel_bufmgr_fake *bufmgr_fake;
+       drm_intel_bo_fake *bo_fake;
 
-   bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
+       bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
 
-   assert(size != 0);
+       assert(size != 0);
 
-   bo_fake = calloc(1, sizeof(*bo_fake));
-   if (!bo_fake)
-      return NULL;
+       bo_fake = calloc(1, sizeof(*bo_fake));
+       if (!bo_fake)
+               return NULL;
 
-   bo_fake->bo.size = size;
-   bo_fake->bo.offset = -1;
-   bo_fake->bo.virtual = NULL;
-   bo_fake->bo.bufmgr = bufmgr;
-   bo_fake->refcount = 1;
+       bo_fake->bo.size = size;
+       bo_fake->bo.offset = -1;
+       bo_fake->bo.virtual = NULL;
+       bo_fake->bo.bufmgr = bufmgr;
+       bo_fake->refcount = 1;
 
-   /* Alignment must be a power of two */
-   assert((alignment & (alignment - 1)) == 0);
-   if (alignment == 0)
-      alignment = 1;
-   bo_fake->alignment = alignment;
-   bo_fake->id = ++bufmgr_fake->buf_nr;
-   bo_fake->name = name;
-   bo_fake->flags = 0;
-   bo_fake->is_static = 0;
+       /* Alignment must be a power of two */
+       assert((alignment & (alignment - 1)) == 0);
+       if (alignment == 0)
+               alignment = 1;
+       bo_fake->alignment = alignment;
+       bo_fake->id = ++bufmgr_fake->buf_nr;
+       bo_fake->name = name;
+       bo_fake->flags = 0;
+       bo_fake->is_static = 0;
 
-   DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
-       bo_fake->bo.size / 1024);
+       DBG("drm_bo_alloc: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+           bo_fake->bo.size / 1024);
 
-   return &bo_fake->bo;
+       return &bo_fake->bo;
 }
 
 drm_intel_bo *
-drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr, const char *name,
-                              unsigned long offset, unsigned long size,
-                              void *virtual)
+drm_intel_bo_fake_alloc_static(drm_intel_bufmgr *bufmgr,
+                              const char *name,
+                              unsigned long offset,
+                              unsigned long size, void *virtual)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake;
-   drm_intel_bo_fake *bo_fake;
+       drm_intel_bufmgr_fake *bufmgr_fake;
+       drm_intel_bo_fake *bo_fake;
 
-   bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
+       bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
 
-   assert(size != 0);
+       assert(size != 0);
 
-   bo_fake = calloc(1, sizeof(*bo_fake));
-   if (!bo_fake)
-      return NULL;
+       bo_fake = calloc(1, sizeof(*bo_fake));
+       if (!bo_fake)
+               return NULL;
 
-   bo_fake->bo.size = size;
-   bo_fake->bo.offset = offset;
-   bo_fake->bo.virtual = virtual;
-   bo_fake->bo.bufmgr = bufmgr;
-   bo_fake->refcount = 1;
-   bo_fake->id = ++bufmgr_fake->buf_nr;
-   bo_fake->name = name;
-   bo_fake->flags = BM_PINNED;
-   bo_fake->is_static = 1;
+       bo_fake->bo.size = size;
+       bo_fake->bo.offset = offset;
+       bo_fake->bo.virtual = virtual;
+       bo_fake->bo.bufmgr = bufmgr;
+       bo_fake->refcount = 1;
+       bo_fake->id = ++bufmgr_fake->buf_nr;
+       bo_fake->name = name;
+       bo_fake->flags = BM_PINNED;
+       bo_fake->is_static = 1;
 
-   DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
-       bo_fake->bo.size / 1024);
+       DBG("drm_bo_alloc_static: (buf %d: %s, %d kb)\n", bo_fake->id,
+           bo_fake->name, bo_fake->bo.size / 1024);
 
-   return &bo_fake->bo;
+       return &bo_fake->bo;
 }
 
 static void
 drm_intel_fake_bo_reference(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
 
-   pthread_mutex_lock(&bufmgr_fake->lock);
-   bo_fake->refcount++;
-   pthread_mutex_unlock(&bufmgr_fake->lock);
+       pthread_mutex_lock(&bufmgr_fake->lock);
+       bo_fake->refcount++;
+       pthread_mutex_unlock(&bufmgr_fake->lock);
 }
 
 static void
 drm_intel_fake_bo_reference_locked(drm_intel_bo *bo)
 {
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
 
-   bo_fake->refcount++;
+       bo_fake->refcount++;
 }
 
 static void
 drm_intel_fake_bo_unreference_locked(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-   int i;
-
-   if (--bo_fake->refcount == 0) {
-      assert(bo_fake->map_count == 0);
-      /* No remaining references, so free it */
-      if (bo_fake->block)
-        free_block(bufmgr_fake, bo_fake->block, 1);
-      free_backing_store(bo);
-
-      for (i = 0; i < bo_fake->nr_relocs; i++)
-        drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].target_buf);
-
-      DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id, bo_fake->name);
-
-      free(bo_fake->relocs);
-      free(bo);
-   }
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+       int i;
+
+       if (--bo_fake->refcount == 0) {
+               assert(bo_fake->map_count == 0);
+               /* No remaining references, so free it */
+               if (bo_fake->block)
+                       free_block(bufmgr_fake, bo_fake->block, 1);
+               free_backing_store(bo);
+
+               for (i = 0; i < bo_fake->nr_relocs; i++)
+                       drm_intel_fake_bo_unreference_locked(bo_fake->relocs[i].
+                                                            target_buf);
+
+               DBG("drm_bo_unreference: free buf %d %s\n", bo_fake->id,
+                   bo_fake->name);
+
+               free(bo_fake->relocs);
+               free(bo);
+       }
 }
 
 static void
 drm_intel_fake_bo_unreference(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
 
-   pthread_mutex_lock(&bufmgr_fake->lock);
-   drm_intel_fake_bo_unreference_locked(bo);
-   pthread_mutex_unlock(&bufmgr_fake->lock);
+       pthread_mutex_lock(&bufmgr_fake->lock);
+       drm_intel_fake_bo_unreference_locked(bo);
+       pthread_mutex_unlock(&bufmgr_fake->lock);
 }
 
 /**
  * Set the buffer as not requiring backing store, and instead get the callback
  * invoked whenever it would be set dirty.
  */
-void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
-                                            void (*invalidate_cb)(drm_intel_bo *bo,
-                                                                  void *ptr),
-                                            void *ptr)
+void
+drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
+                                       void (*invalidate_cb) (drm_intel_bo *bo,
+                                                              void *ptr),
+                                       void *ptr)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
 
-   pthread_mutex_lock(&bufmgr_fake->lock);
+       pthread_mutex_lock(&bufmgr_fake->lock);
 
-   if (bo_fake->backing_store)
-      free_backing_store(bo);
+       if (bo_fake->backing_store)
+               free_backing_store(bo);
 
-   bo_fake->flags |= BM_NO_BACKING_STORE;
+       bo_fake->flags |= BM_NO_BACKING_STORE;
 
-   DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
-   bo_fake->dirty = 1;
-   bo_fake->invalidate_cb = invalidate_cb;
-   bo_fake->invalidate_ptr = ptr;
+       DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
+       bo_fake->dirty = 1;
+       bo_fake->invalidate_cb = invalidate_cb;
+       bo_fake->invalidate_ptr = ptr;
 
-   /* Note that it is invalid right from the start.  Also note
-    * invalidate_cb is called with the bufmgr locked, so cannot
-    * itself make bufmgr calls.
-    */
-   if (invalidate_cb != NULL)
-      invalidate_cb(bo, ptr);
+       /* Note that it is invalid right from the start.  Also note
+        * invalidate_cb is called with the bufmgr locked, so cannot
+        * itself make bufmgr calls.
+        */
+       if (invalidate_cb != NULL)
+               invalidate_cb(bo, ptr);
 
-   pthread_mutex_unlock(&bufmgr_fake->lock);
+       pthread_mutex_unlock(&bufmgr_fake->lock);
 }
 
 /**
@@ -939,236 +970,241 @@ void drm_intel_bo_fake_disable_backing_store(drm_intel_bo *bo,
  * BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
  */
 static int
-drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
+ drm_intel_fake_bo_map_locked(drm_intel_bo *bo, int write_enable)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-
-   /* Static buffers are always mapped. */
-   if (bo_fake->is_static) {
-      if (bo_fake->card_dirty) {
-         drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
-         bo_fake->card_dirty = 0;
-      }
-      return 0;
-   }
-
-   /* Allow recursive mapping.  Mesa may recursively map buffers with
-    * nested display loops, and it is used internally in bufmgr_fake
-    * for relocation.
-    */
-   if (bo_fake->map_count++ != 0)
-      return 0;
-
-   {
-      DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
-         bo_fake->bo.size / 1024);
-
-      if (bo->virtual != NULL) {
-        drmMsg("%s: already mapped\n", __FUNCTION__);
-        abort();
-      }
-      else if (bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED)) {
-
-        if (!bo_fake->block && !evict_and_alloc_block(bo)) {
-           DBG("%s: alloc failed\n", __FUNCTION__);
-           bufmgr_fake->fail = 1;
-           return 1;
-        }
-        else {
-           assert(bo_fake->block);
-           bo_fake->dirty = 0;
-
-           if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
-               bo_fake->block->fenced) {
-              drm_intel_fake_bo_wait_rendering_locked(bo);
-           }
-
-           bo->virtual = bo_fake->block->virtual;
-        }
-      }
-      else {
-        if (write_enable)
-           set_dirty(bo);
-
-        if (bo_fake->backing_store == 0)
-           alloc_backing_store(bo);
-
-         if ((bo_fake->card_dirty == 1) && bo_fake->block) {
-            if (bo_fake->block->fenced)
-               drm_intel_fake_bo_wait_rendering_locked(bo);
-
-            memcpy(bo_fake->backing_store, bo_fake->block->virtual, bo_fake->block->bo->size);
-            bo_fake->card_dirty = 0;
-         }
-
-        bo->virtual = bo_fake->backing_store;
-      }
-   }
-
-   return 0;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+       /* Static buffers are always mapped. */
+       if (bo_fake->is_static) {
+               if (bo_fake->card_dirty) {
+                       drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+                       bo_fake->card_dirty = 0;
+               }
+               return 0;
+       }
+
+       /* Allow recursive mapping.  Mesa may recursively map buffers with
+        * nested display loops, and it is used internally in bufmgr_fake
+        * for relocation.
+        */
+       if (bo_fake->map_count++ != 0)
+               return 0;
+
+       {
+               DBG("drm_bo_map: (buf %d: %s, %d kb)\n", bo_fake->id,
+                   bo_fake->name, bo_fake->bo.size / 1024);
+
+               if (bo->virtual != NULL) {
+                       drmMsg("%s: already mapped\n", __FUNCTION__);
+                       abort();
+               } else if (bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)) {
+
+                       if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+                               DBG("%s: alloc failed\n", __FUNCTION__);
+                               bufmgr_fake->fail = 1;
+                               return 1;
+                       } else {
+                               assert(bo_fake->block);
+                               bo_fake->dirty = 0;
+
+                               if (!(bo_fake->flags & BM_NO_FENCE_SUBDATA) &&
+                                   bo_fake->block->fenced) {
+                                       drm_intel_fake_bo_wait_rendering_locked
+                                           (bo);
+                               }
+
+                               bo->virtual = bo_fake->block->virtual;
+                       }
+               } else {
+                       if (write_enable)
+                               set_dirty(bo);
+
+                       if (bo_fake->backing_store == 0)
+                               alloc_backing_store(bo);
+
+                       if ((bo_fake->card_dirty == 1) && bo_fake->block) {
+                               if (bo_fake->block->fenced)
+                                       drm_intel_fake_bo_wait_rendering_locked
+                                           (bo);
+
+                               memcpy(bo_fake->backing_store,
+                                      bo_fake->block->virtual,
+                                      bo_fake->block->bo->size);
+                               bo_fake->card_dirty = 0;
+                       }
+
+                       bo->virtual = bo_fake->backing_store;
+               }
+       }
+
+       return 0;
 }
 
 static int
-drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
+ drm_intel_fake_bo_map(drm_intel_bo *bo, int write_enable)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   int ret;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       int ret;
 
-   pthread_mutex_lock(&bufmgr_fake->lock);
-   ret = drm_intel_fake_bo_map_locked(bo, write_enable);
-   pthread_mutex_unlock(&bufmgr_fake->lock);
+       pthread_mutex_lock(&bufmgr_fake->lock);
+       ret = drm_intel_fake_bo_map_locked(bo, write_enable);
+       pthread_mutex_unlock(&bufmgr_fake->lock);
 
-   return ret;
+       return ret;
 }
 
 static int
-drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
+ drm_intel_fake_bo_unmap_locked(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
 
-   /* Static buffers are always mapped. */
-   if (bo_fake->is_static)
-      return 0;
+       /* Static buffers are always mapped. */
+       if (bo_fake->is_static)
+               return 0;
 
-   assert(bo_fake->map_count != 0);
-   if (--bo_fake->map_count != 0)
-      return 0;
+       assert(bo_fake->map_count != 0);
+       if (--bo_fake->map_count != 0)
+               return 0;
 
-   DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
-       bo_fake->bo.size / 1024);
+       DBG("drm_bo_unmap: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
+           bo_fake->bo.size / 1024);
 
-   bo->virtual = NULL;
+       bo->virtual = NULL;
 
-   return 0;
+       return 0;
 }
 
-static int
-drm_intel_fake_bo_unmap(drm_intel_bo *bo)
+static int drm_intel_fake_bo_unmap(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   int ret;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       int ret;
 
-   pthread_mutex_lock(&bufmgr_fake->lock);
-   ret = drm_intel_fake_bo_unmap_locked(bo);
-   pthread_mutex_unlock(&bufmgr_fake->lock);
+       pthread_mutex_lock(&bufmgr_fake->lock);
+       ret = drm_intel_fake_bo_unmap_locked(bo);
+       pthread_mutex_unlock(&bufmgr_fake->lock);
 
-   return ret;
+       return ret;
 }
 
 static void
-drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
+ drm_intel_fake_kick_all_locked(drm_intel_bufmgr_fake *bufmgr_fake)
 {
-   struct block *block, *tmp;
-
-   bufmgr_fake->performed_rendering = 0;
-   /* okay for ever BO that is on the HW kick it off.
-      seriously not afraid of the POLICE right now */
-   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
-      drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
-
-      block->on_hardware = 0;
-      free_block(bufmgr_fake, block, 0);
-      bo_fake->block = NULL;
-      bo_fake->validated = 0;
-      if (!(bo_fake->flags & BM_NO_BACKING_STORE))
-         bo_fake->dirty = 1;
-   }
+       struct block *block, *tmp;
+
+       bufmgr_fake->performed_rendering = 0;
+       /* okay for ever BO that is on the HW kick it off.
+          seriously not afraid of the POLICE right now */
+       DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->on_hardware) {
+               drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
+
+               block->on_hardware = 0;
+               free_block(bufmgr_fake, block, 0);
+               bo_fake->block = NULL;
+               bo_fake->validated = 0;
+               if (!(bo_fake->flags & BM_NO_BACKING_STORE))
+                       bo_fake->dirty = 1;
+       }
 
 }
 
 static int
-drm_intel_fake_bo_validate(drm_intel_bo *bo)
+ drm_intel_fake_bo_validate(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-
-   bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-
-   DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id, bo_fake->name,
-       bo_fake->bo.size / 1024);
-
-   /* Sanity check: Buffers should be unmapped before being validated.
-    * This is not so much of a problem for bufmgr_fake, but TTM refuses,
-    * and the problem is harder to debug there.
-    */
-   assert(bo_fake->map_count == 0);
-
-   if (bo_fake->is_static) {
-      /* Add it to the needs-fence list */
-      bufmgr_fake->need_fence = 1;
-      return 0;
-   }
-
-   /* Allocate the card memory */
-   if (!bo_fake->block && !evict_and_alloc_block(bo)) {
-      bufmgr_fake->fail = 1;
-      DBG("Failed to validate buf %d:%s\n", bo_fake->id, bo_fake->name);
-      return -1;
-   }
-
-   assert(bo_fake->block);
-   assert(bo_fake->block->bo == &bo_fake->bo);
-
-   bo->offset = bo_fake->block->mem->ofs;
-
-   /* Upload the buffer contents if necessary */
-   if (bo_fake->dirty) {
-      DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id,
-         bo_fake->name, bo->size, bo_fake->block->mem->ofs);
-
-      assert(!(bo_fake->flags &
-              (BM_NO_BACKING_STORE|BM_PINNED)));
-
-      /* Actually, should be able to just wait for a fence on the memory,
-       * which we would be tracking when we free it.  Waiting for idle is
-       * a sufficiently large hammer for now.
-       */
-      drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
-
-      /* we may never have mapped this BO so it might not have any backing
-       * store if this happens it should be rare, but 0 the card memory
-       * in any case */
-      if (bo_fake->backing_store)
-         memcpy(bo_fake->block->virtual, bo_fake->backing_store, bo->size);
-      else
-         memset(bo_fake->block->virtual, 0, bo->size);
-
-      bo_fake->dirty = 0;
-   }
-
-   bo_fake->block->fenced = 0;
-   bo_fake->block->on_hardware = 1;
-   DRMLISTDEL(bo_fake->block);
-   DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
-
-   bo_fake->validated = 1;
-   bufmgr_fake->need_fence = 1;
-
-   return 0;
+       drm_intel_bufmgr_fake *bufmgr_fake;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+
+       bufmgr_fake = (drm_intel_bufmgr_fake *) bo->bufmgr;
+
+       DBG("drm_bo_validate: (buf %d: %s, %d kb)\n", bo_fake->id,
+           bo_fake->name, bo_fake->bo.size / 1024);
+
+       /* Sanity check: Buffers should be unmapped before being validated.
+        * This is not so much of a problem for bufmgr_fake, but TTM refuses,
+        * and the problem is harder to debug there.
+        */
+       assert(bo_fake->map_count == 0);
+
+       if (bo_fake->is_static) {
+               /* Add it to the needs-fence list */
+               bufmgr_fake->need_fence = 1;
+               return 0;
+       }
+
+       /* Allocate the card memory */
+       if (!bo_fake->block && !evict_and_alloc_block(bo)) {
+               bufmgr_fake->fail = 1;
+               DBG("Failed to validate buf %d:%s\n", bo_fake->id,
+                   bo_fake->name);
+               return -1;
+       }
+
+       assert(bo_fake->block);
+       assert(bo_fake->block->bo == &bo_fake->bo);
+
+       bo->offset = bo_fake->block->mem->ofs;
+
+       /* Upload the buffer contents if necessary */
+       if (bo_fake->dirty) {
+               DBG("Upload dirty buf %d:%s, sz %d offset 0x%x\n", bo_fake->id,
+                   bo_fake->name, bo->size, bo_fake->block->mem->ofs);
+
+               assert(!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED)));
+
+               /* Actually, should be able to just wait for a fence on the
+                * mmory, hich we would be tracking when we free it.  Waiting
+                * for idle is a sufficiently large hammer for now.
+                */
+               drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+
+               /* we may never have mapped this BO so it might not have any
+                * backing store if this happens it should be rare, but 0 the
+                * card memory in any case */
+               if (bo_fake->backing_store)
+                       memcpy(bo_fake->block->virtual, bo_fake->backing_store,
+                              bo->size);
+               else
+                       memset(bo_fake->block->virtual, 0, bo->size);
+
+               bo_fake->dirty = 0;
+       }
+
+       bo_fake->block->fenced = 0;
+       bo_fake->block->on_hardware = 1;
+       DRMLISTDEL(bo_fake->block);
+       DRMLISTADDTAIL(bo_fake->block, &bufmgr_fake->on_hardware);
+
+       bo_fake->validated = 1;
+       bufmgr_fake->need_fence = 1;
+
+       return 0;
 }
 
 static void
 drm_intel_fake_fence_validated(drm_intel_bufmgr *bufmgr)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
-   unsigned int cookie;
+       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+       unsigned int cookie;
 
-   cookie = _fence_emit_internal(bufmgr_fake);
-   fence_blocks(bufmgr_fake, cookie);
+       cookie = _fence_emit_internal(bufmgr_fake);
+       fence_blocks(bufmgr_fake, cookie);
 
-   DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
+       DBG("drm_fence_validated: 0x%08x cookie\n", cookie);
 }
 
 static void
 drm_intel_fake_destroy(drm_intel_bufmgr *bufmgr)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
+       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
 
-   pthread_mutex_destroy(&bufmgr_fake->lock);
-   mmDestroy(bufmgr_fake->heap);
-   free(bufmgr);
+       pthread_mutex_destroy(&bufmgr_fake->lock);
+       mmDestroy(bufmgr_fake->heap);
+       free(bufmgr);
 }
 
 static int
@@ -1176,50 +1212,55 @@ drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
                          drm_intel_bo *target_bo, uint32_t target_offset,
                          uint32_t read_domains, uint32_t write_domain)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   struct fake_buffer_reloc *r;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-   drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)target_bo;
-   int i;
-
-   pthread_mutex_lock(&bufmgr_fake->lock);
-
-   assert(bo);
-   assert(target_bo);
-
-   if (bo_fake->relocs == NULL) {
-      bo_fake->relocs = malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
-   }
-
-   r = &bo_fake->relocs[bo_fake->nr_relocs++];
-
-   assert(bo_fake->nr_relocs <= MAX_RELOCS);
-
-   drm_intel_fake_bo_reference_locked(target_bo);
-
-   if (!target_fake->is_static) {
-      bo_fake->child_size += ALIGN(target_bo->size, target_fake->alignment);
-      bo_fake->child_size += target_fake->child_size;
-   }
-   r->target_buf = target_bo;
-   r->offset = offset;
-   r->last_target_offset = target_bo->offset;
-   r->delta = target_offset;
-   r->read_domains = read_domains;
-   r->write_domain = write_domain;
-
-   if (bufmgr_fake->debug) {
-      /* Check that a conflicting relocation hasn't already been emitted. */
-      for (i = 0; i < bo_fake->nr_relocs - 1; i++) {
-        struct fake_buffer_reloc *r2 = &bo_fake->relocs[i];
-
-        assert(r->offset != r2->offset);
-      }
-   }
-
-   pthread_mutex_unlock(&bufmgr_fake->lock);
-
-   return 0;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       struct fake_buffer_reloc *r;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+       drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *) target_bo;
+       int i;
+
+       pthread_mutex_lock(&bufmgr_fake->lock);
+
+       assert(bo);
+       assert(target_bo);
+
+       if (bo_fake->relocs == NULL) {
+               bo_fake->relocs =
+                   malloc(sizeof(struct fake_buffer_reloc) * MAX_RELOCS);
+       }
+
+       r = &bo_fake->relocs[bo_fake->nr_relocs++];
+
+       assert(bo_fake->nr_relocs <= MAX_RELOCS);
+
+       drm_intel_fake_bo_reference_locked(target_bo);
+
+       if (!target_fake->is_static) {
+               bo_fake->child_size +=
+                   ALIGN(target_bo->size, target_fake->alignment);
+               bo_fake->child_size += target_fake->child_size;
+       }
+       r->target_buf = target_bo;
+       r->offset = offset;
+       r->last_target_offset = target_bo->offset;
+       r->delta = target_offset;
+       r->read_domains = read_domains;
+       r->write_domain = write_domain;
+
+       if (bufmgr_fake->debug) {
+               /* Check that a conflicting relocation hasn't already been
+                * emitted.
+                */
+               for (i = 0; i < bo_fake->nr_relocs - 1; i++) {
+                       struct fake_buffer_reloc *r2 = &bo_fake->relocs[i];
+
+                       assert(r->offset != r2->offset);
+               }
+       }
+
+       pthread_mutex_unlock(&bufmgr_fake->lock);
+
+       return 0;
 }
 
 /**
@@ -1229,175 +1270,183 @@ drm_intel_fake_emit_reloc(drm_intel_bo *bo, uint32_t offset,
 static void
 drm_intel_fake_calculate_domains(drm_intel_bo *bo)
 {
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-   int i;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+       int i;
 
-   for (i = 0; i < bo_fake->nr_relocs; i++) {
-      struct fake_buffer_reloc *r = &bo_fake->relocs[i];
-      drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
+       for (i = 0; i < bo_fake->nr_relocs; i++) {
+               struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+               drm_intel_bo_fake *target_fake =
+                   (drm_intel_bo_fake *) r->target_buf;
 
-      /* Do the same for the tree of buffers we depend on */
-      drm_intel_fake_calculate_domains(r->target_buf);
+               /* Do the same for the tree of buffers we depend on */
+               drm_intel_fake_calculate_domains(r->target_buf);
 
-      target_fake->read_domains |= r->read_domains;
-      target_fake->write_domain |= r->write_domain;
-   }
+               target_fake->read_domains |= r->read_domains;
+               target_fake->write_domain |= r->write_domain;
+       }
 }
 
-
 static int
 drm_intel_fake_reloc_and_validate_buffer(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-   int i, ret;
-
-   assert(bo_fake->map_count == 0);
-
-   for (i = 0; i < bo_fake->nr_relocs; i++) {
-      struct fake_buffer_reloc *r = &bo_fake->relocs[i];
-      drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
-      uint32_t reloc_data;
-
-      /* Validate the target buffer if that hasn't been done. */
-      if (!target_fake->validated) {
-         ret = drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
-         if (ret != 0) {
-            if (bo->virtual != NULL)
-                drm_intel_fake_bo_unmap_locked(bo);
-            return ret;
-         }
-      }
-
-      /* Calculate the value of the relocation entry. */
-      if (r->target_buf->offset != r->last_target_offset) {
-        reloc_data = r->target_buf->offset + r->delta;
-
-        if (bo->virtual == NULL)
-           drm_intel_fake_bo_map_locked(bo, 1);
-
-        *(uint32_t *)((uint8_t *)bo->virtual + r->offset) = reloc_data;
-
-        r->last_target_offset = r->target_buf->offset;
-      }
-   }
-
-   if (bo->virtual != NULL)
-      drm_intel_fake_bo_unmap_locked(bo);
-
-   if (bo_fake->write_domain != 0) {
-      if (!(bo_fake->flags & (BM_NO_BACKING_STORE|BM_PINNED))) {
-         if (bo_fake->backing_store == 0)
-            alloc_backing_store(bo);
-      }
-      bo_fake->card_dirty = 1;
-      bufmgr_fake->performed_rendering = 1;
-   }
-
-   return drm_intel_fake_bo_validate(bo);
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+       int i, ret;
+
+       assert(bo_fake->map_count == 0);
+
+       for (i = 0; i < bo_fake->nr_relocs; i++) {
+               struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+               drm_intel_bo_fake *target_fake =
+                   (drm_intel_bo_fake *) r->target_buf;
+               uint32_t reloc_data;
+
+               /* Validate the target buffer if that hasn't been done. */
+               if (!target_fake->validated) {
+                       ret =
+                           drm_intel_fake_reloc_and_validate_buffer(r->target_buf);
+                       if (ret != 0) {
+                               if (bo->virtual != NULL)
+                                       drm_intel_fake_bo_unmap_locked(bo);
+                               return ret;
+                       }
+               }
+
+               /* Calculate the value of the relocation entry. */
+               if (r->target_buf->offset != r->last_target_offset) {
+                       reloc_data = r->target_buf->offset + r->delta;
+
+                       if (bo->virtual == NULL)
+                               drm_intel_fake_bo_map_locked(bo, 1);
+
+                       *(uint32_t *) ((uint8_t *) bo->virtual + r->offset) =
+                           reloc_data;
+
+                       r->last_target_offset = r->target_buf->offset;
+               }
+       }
+
+       if (bo->virtual != NULL)
+               drm_intel_fake_bo_unmap_locked(bo);
+
+       if (bo_fake->write_domain != 0) {
+               if (!(bo_fake->flags & (BM_NO_BACKING_STORE | BM_PINNED))) {
+                       if (bo_fake->backing_store == 0)
+                               alloc_backing_store(bo);
+               }
+               bo_fake->card_dirty = 1;
+               bufmgr_fake->performed_rendering = 1;
+       }
+
+       return drm_intel_fake_bo_validate(bo);
 }
 
 static void
 drm_intel_bo_fake_post_submit(drm_intel_bo *bo)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo;
-   int i;
-
-   for (i = 0; i < bo_fake->nr_relocs; i++) {
-      struct fake_buffer_reloc *r = &bo_fake->relocs[i];
-      drm_intel_bo_fake *target_fake = (drm_intel_bo_fake *)r->target_buf;
-
-      if (target_fake->validated)
-        drm_intel_bo_fake_post_submit(r->target_buf);
-
-      DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
-         bo_fake->name, (uint32_t)bo->offset, r->offset,
-         target_fake->name, (uint32_t)r->target_buf->offset, r->delta);
-   }
-
-   assert(bo_fake->map_count == 0);
-   bo_fake->validated = 0;
-   bo_fake->read_domains = 0;
-   bo_fake->write_domain = 0;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo;
+       int i;
+
+       for (i = 0; i < bo_fake->nr_relocs; i++) {
+               struct fake_buffer_reloc *r = &bo_fake->relocs[i];
+               drm_intel_bo_fake *target_fake =
+                   (drm_intel_bo_fake *) r->target_buf;
+
+               if (target_fake->validated)
+                       drm_intel_bo_fake_post_submit(r->target_buf);
+
+               DBG("%s@0x%08x + 0x%08x -> %s@0x%08x + 0x%08x\n",
+                   bo_fake->name, (uint32_t) bo->offset, r->offset,
+                   target_fake->name, (uint32_t) r->target_buf->offset,
+                   r->delta);
+       }
+
+       assert(bo_fake->map_count == 0);
+       bo_fake->validated = 0;
+       bo_fake->read_domains = 0;
+       bo_fake->write_domain = 0;
 }
 
-
-void drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
-                                            int (*exec)(drm_intel_bo *bo,
-                                                        unsigned int used,
-                                                        void *priv),
+void
+drm_intel_bufmgr_fake_set_exec_callback(drm_intel_bufmgr *bufmgr,
+                                            int (*exec) (drm_intel_bo *bo,
+                                                         unsigned int used,
+                                                         void *priv),
                                             void *priv)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
+       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
 
-   bufmgr_fake->exec = exec;
-   bufmgr_fake->exec_priv = priv;
+       bufmgr_fake->exec = exec;
+       bufmgr_fake->exec_priv = priv;
 }
 
 static int
 drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
-                      drm_clip_rect_t *cliprects, int num_cliprects,
-                      int DR4)
+                      drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo->bufmgr;
-   drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *)bo;
-   struct drm_i915_batchbuffer batch;
-   int ret;
-   int retry_count = 0;
-
-   pthread_mutex_lock(&bufmgr_fake->lock);
-
-   bufmgr_fake->performed_rendering = 0;
-
-   drm_intel_fake_calculate_domains(bo);
-
-   batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
-
-   /* we've ran out of RAM so blow the whole lot away and retry */
- restart:
-   ret = drm_intel_fake_reloc_and_validate_buffer(bo);
-   if (bufmgr_fake->fail == 1) {
-      if (retry_count == 0) {
-         retry_count++;
-         drm_intel_fake_kick_all_locked(bufmgr_fake);
-         bufmgr_fake->fail = 0;
-         goto restart;
-      } else /* dump out the memory here */
-         mmDumpMemInfo(bufmgr_fake->heap);
-   }
-
-   assert(ret == 0);
-
-   if (bufmgr_fake->exec != NULL) {
-      int ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
-      if (ret != 0) {
-        pthread_mutex_unlock(&bufmgr_fake->lock);
-        return ret;
-      }
-   } else {
-      batch.start = bo->offset;
-      batch.used = used;
-      batch.cliprects = cliprects;
-      batch.num_cliprects = num_cliprects;
-      batch.DR1 = 0;
-      batch.DR4 = DR4;
-
-      if (drmCommandWrite(bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
-                         sizeof(batch))) {
-        drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
-        pthread_mutex_unlock(&bufmgr_fake->lock);
-        return -errno;
-      }
-   }
-
-   drm_intel_fake_fence_validated(bo->bufmgr);
-
-   drm_intel_bo_fake_post_submit(bo);
-
-   pthread_mutex_unlock(&bufmgr_fake->lock);
-
-   return 0;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo->bufmgr;
+       drm_intel_bo_fake *batch_fake = (drm_intel_bo_fake *) bo;
+       struct drm_i915_batchbuffer batch;
+       int ret;
+       int retry_count = 0;
+
+       pthread_mutex_lock(&bufmgr_fake->lock);
+
+       bufmgr_fake->performed_rendering = 0;
+
+       drm_intel_fake_calculate_domains(bo);
+
+       batch_fake->read_domains = I915_GEM_DOMAIN_COMMAND;
+
+       /* we've ran out of RAM so blow the whole lot away and retry */
+restart:
+       ret = drm_intel_fake_reloc_and_validate_buffer(bo);
+       if (bufmgr_fake->fail == 1) {
+               if (retry_count == 0) {
+                       retry_count++;
+                       drm_intel_fake_kick_all_locked(bufmgr_fake);
+                       bufmgr_fake->fail = 0;
+                       goto restart;
+               } else          /* dump out the memory here */
+                       mmDumpMemInfo(bufmgr_fake->heap);
+       }
+
+       assert(ret == 0);
+
+       if (bufmgr_fake->exec != NULL) {
+               int ret = bufmgr_fake->exec(bo, used, bufmgr_fake->exec_priv);
+               if (ret != 0) {
+                       pthread_mutex_unlock(&bufmgr_fake->lock);
+                       return ret;
+               }
+       } else {
+               batch.start = bo->offset;
+               batch.used = used;
+               batch.cliprects = cliprects;
+               batch.num_cliprects = num_cliprects;
+               batch.DR1 = 0;
+               batch.DR4 = DR4;
+
+               if (drmCommandWrite
+                   (bufmgr_fake->fd, DRM_I915_BATCHBUFFER, &batch,
+                    sizeof(batch))) {
+                       drmMsg("DRM_I915_BATCHBUFFER: %d\n", -errno);
+                       pthread_mutex_unlock(&bufmgr_fake->lock);
+                       return -errno;
+               }
+       }
+
+       drm_intel_fake_fence_validated(bo->bufmgr);
+
+       drm_intel_bo_fake_post_submit(bo);
+
+       pthread_mutex_unlock(&bufmgr_fake->lock);
+
+       return 0;
 }
 
 /**
@@ -1408,32 +1457,33 @@ drm_intel_fake_bo_exec(drm_intel_bo *bo, int used,
  * a set smaller than the aperture.
  */
 static int
-drm_intel_fake_check_aperture_space(drm_intel_bo **bo_array, int count)
+drm_intel_fake_check_aperture_space(drm_intel_bo ** bo_array, int count)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bo_array[0]->bufmgr;
-   unsigned int sz = 0;
-   int i;
-
-   for (i = 0; i < count; i++) {
-      drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)bo_array[i];
-
-      if (bo_fake == NULL)
-        continue;
-
-      if (!bo_fake->is_static)
-        sz += ALIGN(bo_array[i]->size, bo_fake->alignment);
-      sz += bo_fake->child_size;
-   }
-
-   if (sz > bufmgr_fake->size) {
-      DBG("check_space: overflowed bufmgr size, %dkb vs %dkb\n",
-         sz / 1024, bufmgr_fake->size / 1024);
-      return -1;
-   }
-
-   DBG("drm_check_space: sz %dkb vs bufgr %dkb\n", sz / 1024 ,
-       bufmgr_fake->size / 1024);
-   return 0;
+       drm_intel_bufmgr_fake *bufmgr_fake =
+           (drm_intel_bufmgr_fake *) bo_array[0]->bufmgr;
+       unsigned int sz = 0;
+       int i;
+
+       for (i = 0; i < count; i++) {
+               drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) bo_array[i];
+
+               if (bo_fake == NULL)
+                       continue;
+
+               if (!bo_fake->is_static)
+                       sz += ALIGN(bo_array[i]->size, bo_fake->alignment);
+               sz += bo_fake->child_size;
+       }
+
+       if (sz > bufmgr_fake->size) {
+               DBG("check_space: overflowed bufmgr size, %dkb vs %dkb\n",
+                   sz / 1024, bufmgr_fake->size / 1024);
+               return -1;
+       }
+
+       DBG("drm_check_space: sz %dkb vs bufgr %dkb\n", sz / 1024,
+           bufmgr_fake->size / 1024);
+       return 0;
 }
 
 /**
@@ -1443,88 +1493,93 @@ drm_intel_fake_check_aperture_space(drm_intel_bo **bo_array, int count)
  * Used by the X Server on LeaveVT, when the card memory is no longer our
  * own.
  */
-void
-drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
+void drm_intel_bufmgr_fake_evict_all(drm_intel_bufmgr *bufmgr)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
-   struct block *block, *tmp;
-
-   pthread_mutex_lock(&bufmgr_fake->lock);
-
-   bufmgr_fake->need_fence = 1;
-   bufmgr_fake->fail = 0;
-
-   /* Wait for hardware idle.  We don't know where acceleration has been
-    * happening, so we'll need to wait anyway before letting anything get
-    * put on the card again.
-    */
-   drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
-
-   /* Check that we hadn't released the lock without having fenced the last
-    * set of buffers.
-    */
-   assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
-   assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
-
-   DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
-      drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *)block->bo;
-      /* Releases the memory, and memcpys dirty contents out if necessary. */
-      free_block(bufmgr_fake, block, 0);
-      bo_fake->block = NULL;
-   }
-
-   pthread_mutex_unlock(&bufmgr_fake->lock);
+       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
+       struct block *block, *tmp;
+
+       pthread_mutex_lock(&bufmgr_fake->lock);
+
+       bufmgr_fake->need_fence = 1;
+       bufmgr_fake->fail = 0;
+
+       /* Wait for hardware idle.  We don't know where acceleration has been
+        * happening, so we'll need to wait anyway before letting anything get
+        * put on the card again.
+        */
+       drm_intel_bufmgr_fake_wait_idle(bufmgr_fake);
+
+       /* Check that we hadn't released the lock without having fenced the last
+        * set of buffers.
+        */
+       assert(DRMLISTEMPTY(&bufmgr_fake->fenced));
+       assert(DRMLISTEMPTY(&bufmgr_fake->on_hardware));
+
+       DRMLISTFOREACHSAFE(block, tmp, &bufmgr_fake->lru) {
+               drm_intel_bo_fake *bo_fake = (drm_intel_bo_fake *) block->bo;
+               /* Releases the memory, and memcpys dirty contents out if
+                * necessary.
+                */
+               free_block(bufmgr_fake, block, 0);
+               bo_fake->block = NULL;
+       }
+
+       pthread_mutex_unlock(&bufmgr_fake->lock);
 }
+
 void drm_intel_bufmgr_fake_set_last_dispatch(drm_intel_bufmgr *bufmgr,
-                                        volatile unsigned int *last_dispatch)
+                                            volatile unsigned int
+                                            *last_dispatch)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *)bufmgr;
+       drm_intel_bufmgr_fake *bufmgr_fake = (drm_intel_bufmgr_fake *) bufmgr;
 
-   bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
+       bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
 }
 
-drm_intel_bufmgr *
-drm_intel_bufmgr_fake_init(int fd,
-                      unsigned long low_offset, void *low_virtual,
-                      unsigned long size,
-                      volatile unsigned int *last_dispatch)
+drm_intel_bufmgr *drm_intel_bufmgr_fake_init(int fd,
+                                            unsigned long low_offset,
+                                            void *low_virtual,
+                                            unsigned long size,
+                                            volatile unsigned int
+                                            *last_dispatch)
 {
-   drm_intel_bufmgr_fake *bufmgr_fake;
-
-   bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
-
-   if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) {
-      free(bufmgr_fake);
-      return NULL;
-   }
-
-   /* Initialize allocator */
-   DRMINITLISTHEAD(&bufmgr_fake->fenced);
-   DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
-   DRMINITLISTHEAD(&bufmgr_fake->lru);
-
-   bufmgr_fake->low_offset = low_offset;
-   bufmgr_fake->virtual = low_virtual;
-   bufmgr_fake->size = size;
-   bufmgr_fake->heap = mmInit(low_offset, size);
-
-   /* Hook in methods */
-   bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
-   bufmgr_fake->bufmgr.bo_alloc_for_render = drm_intel_fake_bo_alloc;
-   bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
-   bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
-   bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
-   bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
-   bufmgr_fake->bufmgr.bo_wait_rendering = drm_intel_fake_bo_wait_rendering;
-   bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
-   bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
-   bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
-   bufmgr_fake->bufmgr.check_aperture_space = drm_intel_fake_check_aperture_space;
-   bufmgr_fake->bufmgr.debug = 0;
-
-   bufmgr_fake->fd = fd;
-   bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
-
-   return &bufmgr_fake->bufmgr;
+       drm_intel_bufmgr_fake *bufmgr_fake;
+
+       bufmgr_fake = calloc(1, sizeof(*bufmgr_fake));
+
+       if (pthread_mutex_init(&bufmgr_fake->lock, NULL) != 0) {
+               free(bufmgr_fake);
+               return NULL;
+       }
+
+       /* Initialize allocator */
+       DRMINITLISTHEAD(&bufmgr_fake->fenced);
+       DRMINITLISTHEAD(&bufmgr_fake->on_hardware);
+       DRMINITLISTHEAD(&bufmgr_fake->lru);
+
+       bufmgr_fake->low_offset = low_offset;
+       bufmgr_fake->virtual = low_virtual;
+       bufmgr_fake->size = size;
+       bufmgr_fake->heap = mmInit(low_offset, size);
+
+       /* Hook in methods */
+       bufmgr_fake->bufmgr.bo_alloc = drm_intel_fake_bo_alloc;
+       bufmgr_fake->bufmgr.bo_alloc_for_render = drm_intel_fake_bo_alloc;
+       bufmgr_fake->bufmgr.bo_reference = drm_intel_fake_bo_reference;
+       bufmgr_fake->bufmgr.bo_unreference = drm_intel_fake_bo_unreference;
+       bufmgr_fake->bufmgr.bo_map = drm_intel_fake_bo_map;
+       bufmgr_fake->bufmgr.bo_unmap = drm_intel_fake_bo_unmap;
+       bufmgr_fake->bufmgr.bo_wait_rendering =
+           drm_intel_fake_bo_wait_rendering;
+       bufmgr_fake->bufmgr.bo_emit_reloc = drm_intel_fake_emit_reloc;
+       bufmgr_fake->bufmgr.destroy = drm_intel_fake_destroy;
+       bufmgr_fake->bufmgr.bo_exec = drm_intel_fake_bo_exec;
+       bufmgr_fake->bufmgr.check_aperture_space =
+           drm_intel_fake_check_aperture_space;
+       bufmgr_fake->bufmgr.debug = 0;
+
+       bufmgr_fake->fd = fd;
+       bufmgr_fake->last_dispatch = (volatile int *)last_dispatch;
+
+       return &bufmgr_fake->bufmgr;
 }
-
index cf35c81..aa55f2d 100644 (file)
 #include "i915_drm.h"
 
 #define DBG(...) do {                                  \
-   if (bufmgr_gem->bufmgr.debug)                       \
-      fprintf(stderr, __VA_ARGS__);                    \
+       if (bufmgr_gem->bufmgr.debug)                   \
+               fprintf(stderr, __VA_ARGS__);           \
 } while (0)
 
 typedef struct _drm_intel_bo_gem drm_intel_bo_gem;
 
 struct drm_intel_gem_bo_bucket {
-   drmMMListHead head;
-   unsigned long size;
+       drmMMListHead head;
+       unsigned long size;
 };
 
 /* Only cache objects up to 64MB.  Bigger than that, and the rounding of the
@@ -78,174 +78,178 @@ struct drm_intel_gem_bo_bucket {
  */
 #define DRM_INTEL_GEM_BO_BUCKETS       14
 typedef struct _drm_intel_bufmgr_gem {
-    drm_intel_bufmgr bufmgr;
+       drm_intel_bufmgr bufmgr;
 
-    int fd;
+       int fd;
 
-    int max_relocs;
+       int max_relocs;
 
-    pthread_mutex_t lock;
+       pthread_mutex_t lock;
 
-    struct drm_i915_gem_exec_object *exec_objects;
-    drm_intel_bo **exec_bos;
-    int exec_size;
-    int exec_count;
+       struct drm_i915_gem_exec_object *exec_objects;
+       drm_intel_bo **exec_bos;
+       int exec_size;
+       int exec_count;
 
-    /** Array of lists of cached gem objects of power-of-two sizes */
-    struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
+       /** Array of lists of cached gem objects of power-of-two sizes */
+       struct drm_intel_gem_bo_bucket cache_bucket[DRM_INTEL_GEM_BO_BUCKETS];
 
-    uint64_t gtt_size;
-    int available_fences;
-    int pci_device;
-    char bo_reuse;
+       uint64_t gtt_size;
+       int available_fences;
+       int pci_device;
+       char bo_reuse;
 } drm_intel_bufmgr_gem;
 
 struct _drm_intel_bo_gem {
-    drm_intel_bo bo;
-
-    atomic_t refcount;
-    /** Boolean whether the mmap ioctl has been called for this buffer yet. */
-    uint32_t gem_handle;
-    const char *name;
-
-    /**
-     * Kenel-assigned global name for this object
-     */
-    unsigned int global_name;
-    
-    /**
-     * Index of the buffer within the validation list while preparing a
-     * batchbuffer execution.
-     */
-    int validate_index;
-
-    /**
-     * Current tiling mode
-     */
-    uint32_t tiling_mode;
-    uint32_t swizzle_mode;
-
-    time_t free_time;
-
-    /** Array passed to the DRM containing relocation information. */
-    struct drm_i915_gem_relocation_entry *relocs;
-    /** Array of bos corresponding to relocs[i].target_handle */
-    drm_intel_bo **reloc_target_bo;
-    /** Number of entries in relocs */
-    int reloc_count;
-    /** Mapped address for the buffer, saved across map/unmap cycles */
-    void *mem_virtual;
-    /** GTT virtual address for the buffer, saved across map/unmap cycles */
-    void *gtt_virtual;
-
-    /** BO cache list */
-    drmMMListHead head;
-
-    /**
-     * Boolean of whether this BO and its children have been included in
-     * the current drm_intel_bufmgr_check_aperture_space() total.
-     */
-    char included_in_check_aperture;
-
-    /**
-     * Boolean of whether this buffer has been used as a relocation
-     * target and had its size accounted for, and thus can't have any
-     * further relocations added to it.
-     */
-     char used_as_reloc_target;
-
-    /**
-     * Boolean of whether this buffer can be re-used
-     */
-    char reusable;
-
-    /**
-     * Size in bytes of this buffer and its relocation descendents.
-     *
-     * Used to avoid costly tree walking in drm_intel_bufmgr_check_aperture in
-     * the common case.
-     */
-    int reloc_tree_size;
-    /**
-     * Number of potential fence registers required by this buffer and its
-     * relocations.
-     */
-    int reloc_tree_fences;
+       drm_intel_bo bo;
+
+       atomic_t refcount;
+       uint32_t gem_handle;
+       const char *name;
+
+       /**
+        * Kenel-assigned global name for this object
+        */
+       unsigned int global_name;
+
+       /**
+        * Index of the buffer within the validation list while preparing a
+        * batchbuffer execution.
+        */
+       int validate_index;
+
+       /**
+        * Current tiling mode
+        */
+       uint32_t tiling_mode;
+       uint32_t swizzle_mode;
+
+       time_t free_time;
+
+       /** Array passed to the DRM containing relocation information. */
+       struct drm_i915_gem_relocation_entry *relocs;
+       /** Array of bos corresponding to relocs[i].target_handle */
+       drm_intel_bo **reloc_target_bo;
+       /** Number of entries in relocs */
+       int reloc_count;
+       /** Mapped address for the buffer, saved across map/unmap cycles */
+       void *mem_virtual;
+       /** GTT virtual address for the buffer, saved across map/unmap cycles */
+       void *gtt_virtual;
+
+       /** BO cache list */
+       drmMMListHead head;
+
+       /**
+        * Boolean of whether this BO and its children have been included in
+        * the current drm_intel_bufmgr_check_aperture_space() total.
+        */
+       char included_in_check_aperture;
+
+       /**
+        * Boolean of whether this buffer has been used as a relocation
+        * target and had its size accounted for, and thus can't have any
+        * further relocations added to it.
+        */
+       char used_as_reloc_target;
+
+       /**
+        * Boolean of whether this buffer can be re-used
+        */
+       char reusable;
+
+       /**
+        * Size in bytes of this buffer and its relocation descendents.
+        *
+        * Used to avoid costly tree walking in
+        * drm_intel_bufmgr_check_aperture in the common case.
+        */
+       int reloc_tree_size;
+
+       /**
+        * Number of potential fence registers required by this buffer and its
+        * relocations.
+        */
+       int reloc_tree_fences;
 };
 
 static unsigned int
-drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count);
+drm_intel_gem_estimate_batch_space(drm_intel_bo ** bo_array, int count);
 
 static unsigned int
-drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count);
+drm_intel_gem_compute_batch_space(drm_intel_bo ** bo_array, int count);
 
 static int
-drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
-                           uint32_t *swizzle_mode);
+drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+                           uint32_t * swizzle_mode);
 
 static int
-drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
+drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
                            uint32_t stride);
 
-static void
-drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo);
+static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo);
 
-static void
-drm_intel_gem_bo_unreference(drm_intel_bo *bo);
+static void drm_intel_gem_bo_unreference(drm_intel_bo *bo);
 
-static void
-drm_intel_gem_bo_free(drm_intel_bo *bo);
+static void drm_intel_gem_bo_free(drm_intel_bo *bo);
 
 static struct drm_intel_gem_bo_bucket *
 drm_intel_gem_bo_bucket_for_size(drm_intel_bufmgr_gem *bufmgr_gem,
                                 unsigned long size)
 {
-    int i;
+       int i;
 
-    for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
-       struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
-       if (bucket->size >= size) {
-           return bucket;
+       for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+               struct drm_intel_gem_bo_bucket *bucket =
+                   &bufmgr_gem->cache_bucket[i];
+               if (bucket->size >= size) {
+                       return bucket;
+               }
        }
-    }
 
-    return NULL;
+       return NULL;
 }
 
-static void drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
+static void
+drm_intel_gem_dump_validation_list(drm_intel_bufmgr_gem *bufmgr_gem)
 {
-    int i, j;
-
-    for (i = 0; i < bufmgr_gem->exec_count; i++) {
-       drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
-       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-
-       if (bo_gem->relocs == NULL) {
-           DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle, bo_gem->name);
-           continue;
-       }
-
-       for (j = 0; j < bo_gem->reloc_count; j++) {
-           drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
-           drm_intel_bo_gem *target_gem = (drm_intel_bo_gem *)target_bo;
-
-           DBG("%2d: %d (%s)@0x%08llx -> %d (%s)@0x%08lx + 0x%08x\n",
-               i,
-               bo_gem->gem_handle, bo_gem->name,
-               (unsigned long long)bo_gem->relocs[j].offset,
-               target_gem->gem_handle, target_gem->name, target_bo->offset,
-               bo_gem->relocs[j].delta);
+       int i, j;
+
+       for (i = 0; i < bufmgr_gem->exec_count; i++) {
+               drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+               drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+               if (bo_gem->relocs == NULL) {
+                       DBG("%2d: %d (%s)\n", i, bo_gem->gem_handle,
+                           bo_gem->name);
+                       continue;
+               }
+
+               for (j = 0; j < bo_gem->reloc_count; j++) {
+                       drm_intel_bo *target_bo = bo_gem->reloc_target_bo[j];
+                       drm_intel_bo_gem *target_gem =
+                           (drm_intel_bo_gem *) target_bo;
+
+                       DBG("%2d: %d (%s)@0x%08llx -> "
+                           "%d (%s)@0x%08lx + 0x%08x\n",
+                           i,
+                           bo_gem->gem_handle, bo_gem->name,
+                           (unsigned long long)bo_gem->relocs[j].offset,
+                           target_gem->gem_handle,
+                           target_gem->name,
+                           target_bo->offset,
+                           bo_gem->relocs[j].delta);
+               }
        }
-    }
 }
 
 static void
 drm_intel_gem_bo_reference(drm_intel_bo *bo)
 {
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
-    assert(atomic_read(&bo_gem->refcount) > 0);
-    atomic_inc(&bo_gem->refcount);
+       assert(atomic_read(&bo_gem->refcount) > 0);
+       atomic_inc(&bo_gem->refcount);
 }
 
 /**
@@ -259,42 +263,41 @@ drm_intel_gem_bo_reference(drm_intel_bo *bo)
 static void
 drm_intel_add_validate_buffer(drm_intel_bo *bo)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    int index;
-
-    if (bo_gem->validate_index != -1)
-       return;
-
-    /* Extend the array of validation entries as necessary. */
-    if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
-       int new_size = bufmgr_gem->exec_size * 2;
-
-       if (new_size == 0)
-           new_size = 5;
-
-       bufmgr_gem->exec_objects =
-           realloc(bufmgr_gem->exec_objects,
-                   sizeof(*bufmgr_gem->exec_objects) * new_size);
-       bufmgr_gem->exec_bos =
-           realloc(bufmgr_gem->exec_bos,
-                   sizeof(*bufmgr_gem->exec_bos) * new_size);
-       bufmgr_gem->exec_size = new_size;
-    }
-
-    index = bufmgr_gem->exec_count;
-    bo_gem->validate_index = index;
-    /* Fill in array entry */
-    bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
-    bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
-    bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t)bo_gem->relocs;
-    bufmgr_gem->exec_objects[index].alignment = 0;
-    bufmgr_gem->exec_objects[index].offset = 0;
-    bufmgr_gem->exec_bos[index] = bo;
-    drm_intel_gem_bo_reference(bo);
-    bufmgr_gem->exec_count++;
-}
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       int index;
+
+       if (bo_gem->validate_index != -1)
+               return;
+
+       /* Extend the array of validation entries as necessary. */
+       if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
+               int new_size = bufmgr_gem->exec_size * 2;
+
+               if (new_size == 0)
+                       new_size = 5;
+
+               bufmgr_gem->exec_objects =
+                   realloc(bufmgr_gem->exec_objects,
+                           sizeof(*bufmgr_gem->exec_objects) * new_size);
+               bufmgr_gem->exec_bos =
+                   realloc(bufmgr_gem->exec_bos,
+                           sizeof(*bufmgr_gem->exec_bos) * new_size);
+               bufmgr_gem->exec_size = new_size;
+       }
 
+       index = bufmgr_gem->exec_count;
+       bo_gem->validate_index = index;
+       /* Fill in array entry */
+       bufmgr_gem->exec_objects[index].handle = bo_gem->gem_handle;
+       bufmgr_gem->exec_objects[index].relocation_count = bo_gem->reloc_count;
+       bufmgr_gem->exec_objects[index].relocs_ptr = (uintptr_t) bo_gem->relocs;
+       bufmgr_gem->exec_objects[index].alignment = 0;
+       bufmgr_gem->exec_objects[index].offset = 0;
+       bufmgr_gem->exec_bos[index] = bo;
+       drm_intel_gem_bo_reference(bo);
+       bufmgr_gem->exec_count++;
+}
 
 #define RELOC_BUF_SIZE(x) ((I915_RELOC_HEADER + x * I915_RELOC0_STRIDE) * \
        sizeof(uint32_t))
@@ -302,50 +305,48 @@ drm_intel_add_validate_buffer(drm_intel_bo *bo)
 static int
 drm_intel_setup_reloc_list(drm_intel_bo *bo)
 {
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    unsigned int max_relocs = bufmgr_gem->max_relocs;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       unsigned int max_relocs = bufmgr_gem->max_relocs;
 
-    if (bo->size / 4 < max_relocs)
-           max_relocs = bo->size / 4;
+       if (bo->size / 4 < max_relocs)
+               max_relocs = bo->size / 4;
 
-    bo_gem->relocs = malloc(max_relocs *
-                           sizeof(struct drm_i915_gem_relocation_entry));
-    bo_gem->reloc_target_bo = malloc(max_relocs *
-                                    sizeof(drm_intel_bo *));
+       bo_gem->relocs = malloc(max_relocs *
+                               sizeof(struct drm_i915_gem_relocation_entry));
+       bo_gem->reloc_target_bo = malloc(max_relocs * sizeof(drm_intel_bo *));
 
-    return 0;
+       return 0;
 }
 
 static int
 drm_intel_gem_bo_busy(drm_intel_bo *bo)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_i915_gem_busy busy;
-    int ret;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_busy busy;
+       int ret;
 
-    memset(&busy, 0, sizeof(busy));
-    busy.handle = bo_gem->gem_handle;
+       memset(&busy, 0, sizeof(busy));
+       busy.handle = bo_gem->gem_handle;
 
-    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
 
-    return (ret == 0 && busy.busy);
+       return (ret == 0 && busy.busy);
 }
 
 static int
 drm_intel_gem_bo_madvise(drm_intel_bufmgr_gem *bufmgr_gem,
-                        drm_intel_bo_gem *bo_gem,
-                        int state)
+                        drm_intel_bo_gem *bo_gem, int state)
 {
-    struct drm_i915_gem_madvise madv;
+       struct drm_i915_gem_madvise madv;
 
-    madv.handle = bo_gem->gem_handle;
-    madv.madv = state;
-    madv.retained = 1;
-    ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+       madv.handle = bo_gem->gem_handle;
+       madv.madv = state;
+       madv.retained = 1;
+       ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
 
-    return madv.retained;
+       return madv.retained;
 }
 
 /* drop the oldest entries that have been purged by the kernel */
@@ -353,131 +354,146 @@ static void
 drm_intel_gem_bo_cache_purge_bucket(drm_intel_bufmgr_gem *bufmgr_gem,
                                    struct drm_intel_gem_bo_bucket *bucket)
 {
-    while (!DRMLISTEMPTY(&bucket->head)) {
-       drm_intel_bo_gem *bo_gem;
+       while (!DRMLISTEMPTY(&bucket->head)) {
+               drm_intel_bo_gem *bo_gem;
 
-       bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
-       if (drm_intel_gem_bo_madvise (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
-           break;
+               bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+                                     bucket->head.next, head);
+               if (drm_intel_gem_bo_madvise
+                   (bufmgr_gem, bo_gem, I915_MADV_DONTNEED))
+                       break;
 
-       DRMLISTDEL(&bo_gem->head);
-       drm_intel_gem_bo_free(&bo_gem->bo);
-    }
+               DRMLISTDEL(&bo_gem->head);
+               drm_intel_gem_bo_free(&bo_gem->bo);
+       }
 }
 
 static drm_intel_bo *
-drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr, const char *name,
-                               unsigned long size, unsigned int alignment,
+drm_intel_gem_bo_alloc_internal(drm_intel_bufmgr *bufmgr,
+                               const char *name,
+                               unsigned long size,
+                               unsigned int alignment,
                                int for_render)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
-    drm_intel_bo_gem *bo_gem;
-    unsigned int page_size = getpagesize();
-    int ret;
-    struct drm_intel_gem_bo_bucket *bucket;
-    int alloc_from_cache;
-    unsigned long bo_size;
-
-    /* Round the allocated size up to a power of two number of pages. */
-    bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
-
-    /* If we don't have caching at this size, don't actually round the
-     * allocation up.
-     */
-    if (bucket == NULL) {
-       bo_size = size;
-       if (bo_size < page_size)
-           bo_size = page_size;
-    } else {
-       bo_size = bucket->size;
-    }
-
-    pthread_mutex_lock(&bufmgr_gem->lock);
-    /* Get a buffer out of the cache if available */
-retry:
-    alloc_from_cache = 0;
-    if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
-       if (for_render) {
-           /* Allocate new render-target BOs from the tail (MRU)
-            * of the list, as it will likely be hot in the GPU cache
-            * and in the aperture for us.
-            */
-           bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.prev, head);
-           DRMLISTDEL(&bo_gem->head);
-           alloc_from_cache = 1;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+       drm_intel_bo_gem *bo_gem;
+       unsigned int page_size = getpagesize();
+       int ret;
+       struct drm_intel_gem_bo_bucket *bucket;
+       int alloc_from_cache;
+       unsigned long bo_size;
+
+       /* Round the allocated size up to a power of two number of pages. */
+       bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, size);
+
+       /* If we don't have caching at this size, don't actually round the
+        * allocation up.
+        */
+       if (bucket == NULL) {
+               bo_size = size;
+               if (bo_size < page_size)
+                       bo_size = page_size;
        } else {
-           /* For non-render-target BOs (where we're probably going to map it
-            * first thing in order to fill it with data), check if the
-            * last BO in the cache is unbusy, and only reuse in that case.
-            * Otherwise, allocating a new buffer is probably faster than
-            * waiting for the GPU to finish.
-            */
-           bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
-           if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
-               alloc_from_cache = 1;
-               DRMLISTDEL(&bo_gem->head);
-           }
+               bo_size = bucket->size;
        }
 
-       if (alloc_from_cache) {
-           if(!drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
-               drm_intel_gem_bo_free(&bo_gem->bo);
-               drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem, bucket);
-               goto retry;
-           }
+       pthread_mutex_lock(&bufmgr_gem->lock);
+       /* Get a buffer out of the cache if available */
+retry:
+       alloc_from_cache = 0;
+       if (bucket != NULL && !DRMLISTEMPTY(&bucket->head)) {
+               if (for_render) {
+                       /* Allocate new render-target BOs from the tail (MRU)
+                        * of the list, as it will likely be hot in the GPU
+                        * cache and in the aperture for us.
+                        */
+                       bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+                                             bucket->head.prev, head);
+                       DRMLISTDEL(&bo_gem->head);
+                       alloc_from_cache = 1;
+               } else {
+                       /* For non-render-target BOs (where we're probably
+                        * going to map it first thing in order to fill it
+                        * with data), check if the last BO in the cache is
+                        * unbusy, and only reuse in that case. Otherwise,
+                        * allocating a new buffer is probably faster than
+                        * waiting for the GPU to finish.
+                        */
+                       bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+                                             bucket->head.next, head);
+                       if (!drm_intel_gem_bo_busy(&bo_gem->bo)) {
+                               alloc_from_cache = 1;
+                               DRMLISTDEL(&bo_gem->head);
+                       }
+               }
+
+               if (alloc_from_cache) {
+                       if (!drm_intel_gem_bo_madvise
+                           (bufmgr_gem, bo_gem, I915_MADV_WILLNEED)) {
+                               drm_intel_gem_bo_free(&bo_gem->bo);
+                               drm_intel_gem_bo_cache_purge_bucket(bufmgr_gem,
+                                                                   bucket);
+                               goto retry;
+                       }
+               }
        }
-    }
-    pthread_mutex_unlock(&bufmgr_gem->lock);
+       pthread_mutex_unlock(&bufmgr_gem->lock);
 
-    if (!alloc_from_cache) {
-       struct drm_i915_gem_create create;
+       if (!alloc_from_cache) {
+               struct drm_i915_gem_create create;
+
+               bo_gem = calloc(1, sizeof(*bo_gem));
+               if (!bo_gem)
+                       return NULL;
+
+               bo_gem->bo.size = bo_size;
+               memset(&create, 0, sizeof(create));
+               create.size = bo_size;
+
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+               bo_gem->gem_handle = create.handle;
+               bo_gem->bo.handle = bo_gem->gem_handle;
+               if (ret != 0) {
+                       free(bo_gem);
+                       return NULL;
+               }
+               bo_gem->bo.bufmgr = bufmgr;
+       }
 
-       bo_gem = calloc(1, sizeof(*bo_gem));
-       if (!bo_gem)
-           return NULL;
+       bo_gem->name = name;
+       atomic_set(&bo_gem->refcount, 1);
+       bo_gem->validate_index = -1;
+       bo_gem->reloc_tree_size = bo_gem->bo.size;
+       bo_gem->reloc_tree_fences = 0;
+       bo_gem->used_as_reloc_target = 0;
+       bo_gem->tiling_mode = I915_TILING_NONE;
+       bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+       bo_gem->reusable = 1;
 
-       bo_gem->bo.size = bo_size;
-       memset(&create, 0, sizeof(create));
-       create.size = bo_size;
+       DBG("bo_create: buf %d (%s) %ldb\n",
+           bo_gem->gem_handle, bo_gem->name, size);
 
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_CREATE, &create);
-       bo_gem->gem_handle = create.handle;
-       bo_gem->bo.handle = bo_gem->gem_handle;
-       if (ret != 0) {
-           free(bo_gem);
-           return NULL;
-       }
-       bo_gem->bo.bufmgr = bufmgr;
-    }
-
-    bo_gem->name = name;
-    atomic_set(&bo_gem->refcount, 1);
-    bo_gem->validate_index = -1;
-    bo_gem->reloc_tree_size = bo_gem->bo.size;
-    bo_gem->reloc_tree_fences = 0;
-    bo_gem->used_as_reloc_target = 0;
-    bo_gem->tiling_mode = I915_TILING_NONE;
-    bo_gem->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
-    bo_gem->reusable = 1;
-
-    DBG("bo_create: buf %d (%s) %ldb\n",
-       bo_gem->gem_handle, bo_gem->name, size);
-
-    return &bo_gem->bo;
+       return &bo_gem->bo;
 }
 
 static drm_intel_bo *
-drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr, const char *name,
-                                 unsigned long size, unsigned int alignment)
+drm_intel_gem_bo_alloc_for_render(drm_intel_bufmgr *bufmgr,
+                                 const char *name,
+                                 unsigned long size,
+                                 unsigned int alignment)
 {
-    return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment, 1);
+       return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment,
+                                              1);
 }
 
 static drm_intel_bo *
-drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
-                      unsigned long size, unsigned int alignment)
+drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr,
+                      const char *name,
+                      unsigned long size,
+                      unsigned int alignment)
 {
-    return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment, 0);
+       return drm_intel_gem_bo_alloc_internal(bufmgr, name, size, alignment,
+                                              0);
 }
 
 /**
@@ -487,432 +503,435 @@ drm_intel_gem_bo_alloc(drm_intel_bufmgr *bufmgr, const char *name,
  * to another.
  */
 drm_intel_bo *
-drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr, const char *name,
+drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
+                                 const char *name,
                                  unsigned int handle)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
-    drm_intel_bo_gem *bo_gem;
-    int ret;
-    struct drm_gem_open open_arg;
-    struct drm_i915_gem_get_tiling get_tiling;
-
-    bo_gem = calloc(1, sizeof(*bo_gem));
-    if (!bo_gem)
-       return NULL;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+       drm_intel_bo_gem *bo_gem;
+       int ret;
+       struct drm_gem_open open_arg;
+       struct drm_i915_gem_get_tiling get_tiling;
 
-    memset(&open_arg, 0, sizeof(open_arg));
-    open_arg.name = handle;
-    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
-    if (ret != 0) {
-       fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
-              name, handle, strerror(errno));
-       free(bo_gem);
-       return NULL;
-    }
-    bo_gem->bo.size = open_arg.size;
-    bo_gem->bo.offset = 0;
-    bo_gem->bo.virtual = NULL;
-    bo_gem->bo.bufmgr = bufmgr;
-    bo_gem->name = name;
-    atomic_set (&bo_gem->refcount, 1);
-    bo_gem->validate_index = -1;
-    bo_gem->gem_handle = open_arg.handle;
-    bo_gem->global_name = handle;
-    bo_gem->reusable = 0;
-
-    memset(&get_tiling, 0, sizeof(get_tiling));
-    get_tiling.handle = bo_gem->gem_handle;
-    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
-    if (ret != 0) {
-       drm_intel_gem_bo_unreference(&bo_gem->bo);
-       return NULL;
-    }
-    bo_gem->tiling_mode = get_tiling.tiling_mode;
-    bo_gem->swizzle_mode = get_tiling.swizzle_mode;
-    if (bo_gem->tiling_mode == I915_TILING_NONE)
-       bo_gem->reloc_tree_fences = 0;
-    else
-       bo_gem->reloc_tree_fences = 1;
+       bo_gem = calloc(1, sizeof(*bo_gem));
+       if (!bo_gem)
+               return NULL;
+
+       memset(&open_arg, 0, sizeof(open_arg));
+       open_arg.name = handle;
+       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_OPEN, &open_arg);
+       if (ret != 0) {
+               fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
+                       name, handle, strerror(errno));
+               free(bo_gem);
+               return NULL;
+       }
+       bo_gem->bo.size = open_arg.size;
+       bo_gem->bo.offset = 0;
+       bo_gem->bo.virtual = NULL;
+       bo_gem->bo.bufmgr = bufmgr;
+       bo_gem->name = name;
+       atomic_set(&bo_gem->refcount, 1);
+       bo_gem->validate_index = -1;
+       bo_gem->gem_handle = open_arg.handle;
+       bo_gem->global_name = handle;
+       bo_gem->reusable = 0;
 
-    DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
+       memset(&get_tiling, 0, sizeof(get_tiling));
+       get_tiling.handle = bo_gem->gem_handle;
+       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
+       if (ret != 0) {
+               drm_intel_gem_bo_unreference(&bo_gem->bo);
+               return NULL;
+       }
+       bo_gem->tiling_mode = get_tiling.tiling_mode;
+       bo_gem->swizzle_mode = get_tiling.swizzle_mode;
+       if (bo_gem->tiling_mode == I915_TILING_NONE)
+               bo_gem->reloc_tree_fences = 0;
+       else
+               bo_gem->reloc_tree_fences = 1;
 
-    return &bo_gem->bo;
+       DBG("bo_create_from_handle: %d (%s)\n", handle, bo_gem->name);
+
+       return &bo_gem->bo;
 }
 
 static void
 drm_intel_gem_bo_free(drm_intel_bo *bo)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_gem_close close;
-    int ret;
-
-    if (bo_gem->mem_virtual)
-       munmap (bo_gem->mem_virtual, bo_gem->bo.size);
-    if (bo_gem->gtt_virtual)
-       munmap (bo_gem->gtt_virtual, bo_gem->bo.size);
-
-    free(bo_gem->reloc_target_bo);
-    free(bo_gem->relocs);
-
-    /* Close this object */
-    memset(&close, 0, sizeof(close));
-    close.handle = bo_gem->gem_handle;
-    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
-    if (ret != 0) {
-       fprintf(stderr,
-               "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
-               bo_gem->gem_handle, bo_gem->name, strerror(errno));
-    }
-    free(bo);
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_gem_close close;
+       int ret;
+
+       if (bo_gem->mem_virtual)
+               munmap(bo_gem->mem_virtual, bo_gem->bo.size);
+       if (bo_gem->gtt_virtual)
+               munmap(bo_gem->gtt_virtual, bo_gem->bo.size);
+
+       free(bo_gem->reloc_target_bo);
+       free(bo_gem->relocs);
+
+       /* Close this object */
+       memset(&close, 0, sizeof(close));
+       close.handle = bo_gem->gem_handle;
+       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
+       if (ret != 0) {
+               fprintf(stderr,
+                       "DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
+                       bo_gem->gem_handle, bo_gem->name, strerror(errno));
+       }
+       free(bo);
 }
 
 /** Frees all cached buffers significantly older than @time. */
 static void
 drm_intel_gem_cleanup_bo_cache(drm_intel_bufmgr_gem *bufmgr_gem, time_t time)
 {
-    int i;
+       int i;
 
-    for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
-       struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
+       for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+               struct drm_intel_gem_bo_bucket *bucket =
+                   &bufmgr_gem->cache_bucket[i];
 
-       while (!DRMLISTEMPTY(&bucket->head)) {
-           drm_intel_bo_gem *bo_gem;
+               while (!DRMLISTEMPTY(&bucket->head)) {
+                       drm_intel_bo_gem *bo_gem;
 
-           bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
-           if (time - bo_gem->free_time <= 1)
-               break;
+                       bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+                                             bucket->head.next, head);
+                       if (time - bo_gem->free_time <= 1)
+                               break;
 
-           DRMLISTDEL(&bo_gem->head);
+                       DRMLISTDEL(&bo_gem->head);
 
-           drm_intel_gem_bo_free(&bo_gem->bo);
+                       drm_intel_gem_bo_free(&bo_gem->bo);
+               }
        }
-    }
 }
 
-static void
-drm_intel_gem_bo_unreference_final(drm_intel_bo *bo)
+static void drm_intel_gem_bo_unreference_final(drm_intel_bo *bo)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_intel_gem_bo_bucket *bucket;
-    uint32_t tiling_mode;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_intel_gem_bo_bucket *bucket;
+       uint32_t tiling_mode;
+
+       if (bo_gem->relocs != NULL) {
+               int i;
+
+               /* Unreference all the target buffers */
+               for (i = 0; i < bo_gem->reloc_count; i++)
+                       drm_intel_gem_bo_unreference_locked(bo_gem->
+                                                           reloc_target_bo[i]);
+       }
 
-    if (bo_gem->relocs != NULL) {
-       int i;
+       DBG("bo_unreference final: %d (%s)\n",
+           bo_gem->gem_handle, bo_gem->name);
 
-       /* Unreference all the target buffers */
-       for (i = 0; i < bo_gem->reloc_count; i++)
-           drm_intel_gem_bo_unreference_locked(bo_gem->reloc_target_bo[i]);
-    }
+       bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
+       /* Put the buffer into our internal cache for reuse if we can. */
+       tiling_mode = I915_TILING_NONE;
+       if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
+           drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0) {
+               struct timespec time;
 
-    DBG("bo_unreference final: %d (%s)\n",
-       bo_gem->gem_handle, bo_gem->name);
+               clock_gettime(CLOCK_MONOTONIC, &time);
+               bo_gem->free_time = time.tv_sec;
 
-    bucket = drm_intel_gem_bo_bucket_for_size(bufmgr_gem, bo->size);
-    /* Put the buffer into our internal cache for reuse if we can. */
-    tiling_mode = I915_TILING_NONE;
-    if (bufmgr_gem->bo_reuse && bo_gem->reusable && bucket != NULL &&
-       drm_intel_gem_bo_set_tiling(bo, &tiling_mode, 0) == 0)
-    {
-       struct timespec time;
+               bo_gem->name = NULL;
+               bo_gem->validate_index = -1;
+               bo_gem->reloc_count = 0;
 
-       clock_gettime(CLOCK_MONOTONIC, &time);
-       bo_gem->free_time = time.tv_sec;
+               DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
 
-       bo_gem->name = NULL;
-       bo_gem->validate_index = -1;
-       bo_gem->reloc_count = 0;
+               drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem,
+                                        I915_MADV_DONTNEED);
+               drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
+       } else {
+               drm_intel_gem_bo_free(bo);
+       }
+}
 
-       DRMLISTADDTAIL(&bo_gem->head, &bucket->head);
+static void drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
+{
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
-       drm_intel_gem_bo_madvise(bufmgr_gem, bo_gem, I915_MADV_DONTNEED);
-       drm_intel_gem_cleanup_bo_cache(bufmgr_gem, time.tv_sec);
-    } else {
-       drm_intel_gem_bo_free(bo);
-    }
+       assert(atomic_read(&bo_gem->refcount) > 0);
+       if (atomic_dec_and_test(&bo_gem->refcount))
+               drm_intel_gem_bo_unreference_final(bo);
 }
 
-static void
-drm_intel_gem_bo_unreference_locked(drm_intel_bo *bo)
+static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
 {
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-
-    assert(atomic_read(&bo_gem->refcount) > 0);
-    if (atomic_dec_and_test (&bo_gem->refcount))
-       drm_intel_gem_bo_unreference_final(bo);
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+       assert(atomic_read(&bo_gem->refcount) > 0);
+       if (atomic_dec_and_test(&bo_gem->refcount)) {
+               drm_intel_bufmgr_gem *bufmgr_gem =
+                   (drm_intel_bufmgr_gem *) bo->bufmgr;
+               pthread_mutex_lock(&bufmgr_gem->lock);
+               drm_intel_gem_bo_unreference_final(bo);
+               pthread_mutex_unlock(&bufmgr_gem->lock);
+       }
 }
 
-static void
-drm_intel_gem_bo_unreference(drm_intel_bo *bo)
+static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
 {
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_set_domain set_domain;
+       int ret;
 
-    assert(atomic_read(&bo_gem->refcount) > 0);
-    if (atomic_dec_and_test (&bo_gem->refcount)) {
-       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
        pthread_mutex_lock(&bufmgr_gem->lock);
-       drm_intel_gem_bo_unreference_final(bo);
-       pthread_mutex_unlock(&bufmgr_gem->lock);
-    }
-}
 
-static int
-drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
-{
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_i915_gem_set_domain set_domain;
-    int ret;
-
-    pthread_mutex_lock(&bufmgr_gem->lock);
-
-    /* Allow recursive mapping. Mesa may recursively map buffers with
-     * nested display loops.
-     */
-    if (!bo_gem->mem_virtual) {
-       struct drm_i915_gem_mmap mmap_arg;
-
-       DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
-
-       memset(&mmap_arg, 0, sizeof(mmap_arg));
-       mmap_arg.handle = bo_gem->gem_handle;
-       mmap_arg.offset = 0;
-       mmap_arg.size = bo->size;
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+       /* Allow recursive mapping. Mesa may recursively map buffers with
+        * nested display loops.
+        */
+       if (!bo_gem->mem_virtual) {
+               struct drm_i915_gem_mmap mmap_arg;
+
+               DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
+
+               memset(&mmap_arg, 0, sizeof(mmap_arg));
+               mmap_arg.handle = bo_gem->gem_handle;
+               mmap_arg.offset = 0;
+               mmap_arg.size = bo->size;
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+               if (ret != 0) {
+                       fprintf(stderr,
+                               "%s:%d: Error mapping buffer %d (%s): %s .\n",
+                               __FILE__, __LINE__, bo_gem->gem_handle,
+                               bo_gem->name, strerror(errno));
+                       pthread_mutex_unlock(&bufmgr_gem->lock);
+                       return ret;
+               }
+               bo_gem->mem_virtual = (void *)(uintptr_t) mmap_arg.addr_ptr;
+       }
+       DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
+           bo_gem->mem_virtual);
+       bo->virtual = bo_gem->mem_virtual;
+
+       set_domain.handle = bo_gem->gem_handle;
+       set_domain.read_domains = I915_GEM_DOMAIN_CPU;
+       if (write_enable)
+               set_domain.write_domain = I915_GEM_DOMAIN_CPU;
+       else
+               set_domain.write_domain = 0;
+       do {
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+                           &set_domain);
+       } while (ret == -1 && errno == EINTR);
        if (ret != 0) {
-           fprintf(stderr, "%s:%d: Error mapping buffer %d (%s): %s .\n",
-                   __FILE__, __LINE__,
-                   bo_gem->gem_handle, bo_gem->name, strerror(errno));
-           pthread_mutex_unlock(&bufmgr_gem->lock);
-           return ret;
+               fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
+                       __FILE__, __LINE__, bo_gem->gem_handle,
+                       strerror(errno));
+               pthread_mutex_unlock(&bufmgr_gem->lock);
+               return ret;
        }
-       bo_gem->mem_virtual = (void *)(uintptr_t)mmap_arg.addr_ptr;
-    }
-    DBG("bo_map: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
-       bo_gem->mem_virtual);
-    bo->virtual = bo_gem->mem_virtual;
-
-    set_domain.handle = bo_gem->gem_handle;
-    set_domain.read_domains = I915_GEM_DOMAIN_CPU;
-    if (write_enable)
-       set_domain.write_domain = I915_GEM_DOMAIN_CPU;
-    else
-       set_domain.write_domain = 0;
-    do {
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
-                   &set_domain);
-    } while (ret == -1 && errno == EINTR);
-    if (ret != 0) {
-       fprintf (stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
-                __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
-       pthread_mutex_unlock(&bufmgr_gem->lock);
-       return ret;
-    }
 
-    pthread_mutex_unlock(&bufmgr_gem->lock);
+       pthread_mutex_unlock(&bufmgr_gem->lock);
 
-    return 0;
+       return 0;
 }
 
-int
-drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
+int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_i915_gem_set_domain set_domain;
-    int ret;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_set_domain set_domain;
+       int ret;
 
-    pthread_mutex_lock(&bufmgr_gem->lock);
-
-    /* Get a mapping of the buffer if we haven't before. */
-    if (bo_gem->gtt_virtual == NULL) {
-       struct drm_i915_gem_mmap_gtt mmap_arg;
-
-       DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
-
-       memset(&mmap_arg, 0, sizeof(mmap_arg));
-       mmap_arg.handle = bo_gem->gem_handle;
-
-       /* Get the fake offset back... */
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
-       if (ret != 0) {
-           fprintf(stderr,
-                   "%s:%d: Error preparing buffer map %d (%s): %s .\n",
-                   __FILE__, __LINE__,
-                   bo_gem->gem_handle, bo_gem->name,
-                   strerror(errno));
-           pthread_mutex_unlock(&bufmgr_gem->lock);
-           return ret;
-       }
+       pthread_mutex_lock(&bufmgr_gem->lock);
 
-       /* and mmap it */
-       bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
-                                  MAP_SHARED, bufmgr_gem->fd,
-                                  mmap_arg.offset);
-       if (bo_gem->gtt_virtual == MAP_FAILED) {
-           fprintf(stderr,
-                   "%s:%d: Error mapping buffer %d (%s): %s .\n",
-                   __FILE__, __LINE__,
-                   bo_gem->gem_handle, bo_gem->name,
-                   strerror(errno));
-           pthread_mutex_unlock(&bufmgr_gem->lock);
-           return errno;
+       /* Get a mapping of the buffer if we haven't before. */
+       if (bo_gem->gtt_virtual == NULL) {
+               struct drm_i915_gem_mmap_gtt mmap_arg;
+
+               DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
+                   bo_gem->name);
+
+               memset(&mmap_arg, 0, sizeof(mmap_arg));
+               mmap_arg.handle = bo_gem->gem_handle;
+
+               /* Get the fake offset back... */
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_MMAP_GTT,
+                           &mmap_arg);
+               if (ret != 0) {
+                       fprintf(stderr,
+                               "%s:%d: Error preparing buffer map %d (%s): %s .\n",
+                               __FILE__, __LINE__,
+                               bo_gem->gem_handle, bo_gem->name,
+                               strerror(errno));
+                       pthread_mutex_unlock(&bufmgr_gem->lock);
+                       return ret;
+               }
+
+               /* and mmap it */
+               bo_gem->gtt_virtual = mmap(0, bo->size, PROT_READ | PROT_WRITE,
+                                          MAP_SHARED, bufmgr_gem->fd,
+                                          mmap_arg.offset);
+               if (bo_gem->gtt_virtual == MAP_FAILED) {
+                       fprintf(stderr,
+                               "%s:%d: Error mapping buffer %d (%s): %s .\n",
+                               __FILE__, __LINE__,
+                               bo_gem->gem_handle, bo_gem->name,
+                               strerror(errno));
+                       pthread_mutex_unlock(&bufmgr_gem->lock);
+                       return errno;
+               }
        }
-    }
 
-    bo->virtual = bo_gem->gtt_virtual;
+       bo->virtual = bo_gem->gtt_virtual;
 
-    DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
-       bo_gem->gtt_virtual);
+       DBG("bo_map_gtt: %d (%s) -> %p\n", bo_gem->gem_handle, bo_gem->name,
+           bo_gem->gtt_virtual);
 
-    /* Now move it to the GTT domain so that the CPU caches are flushed */
-    set_domain.handle = bo_gem->gem_handle;
-    set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-    set_domain.write_domain = I915_GEM_DOMAIN_GTT;
-    do {
-           ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
-                       &set_domain);
-    } while (ret == -1 && errno == EINTR);
+       /* Now move it to the GTT domain so that the CPU caches are flushed */
+       set_domain.handle = bo_gem->gem_handle;
+       set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+       set_domain.write_domain = I915_GEM_DOMAIN_GTT;
+       do {
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+                           &set_domain);
+       } while (ret == -1 && errno == EINTR);
 
-    if (ret != 0) {
-           fprintf (stderr, "%s:%d: Error setting domain %d: %s\n",
-                    __FILE__, __LINE__, bo_gem->gem_handle, strerror (errno));
-    }
+       if (ret != 0) {
+               fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
+                       __FILE__, __LINE__, bo_gem->gem_handle,
+                       strerror(errno));
+       }
 
-    pthread_mutex_unlock(&bufmgr_gem->lock);
+       pthread_mutex_unlock(&bufmgr_gem->lock);
 
-    return 0;
+       return 0;
 }
 
-int
-drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
+int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    int ret = 0;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       int ret = 0;
 
-    if (bo == NULL)
-       return 0;
+       if (bo == NULL)
+               return 0;
 
-    assert(bo_gem->gtt_virtual != NULL);
+       assert(bo_gem->gtt_virtual != NULL);
 
-    pthread_mutex_lock(&bufmgr_gem->lock);
-    bo->virtual = NULL;
-    pthread_mutex_unlock(&bufmgr_gem->lock);
+       pthread_mutex_lock(&bufmgr_gem->lock);
+       bo->virtual = NULL;
+       pthread_mutex_unlock(&bufmgr_gem->lock);
 
-    return ret;
+       return ret;
 }
 
-static int
-drm_intel_gem_bo_unmap(drm_intel_bo *bo)
+static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_i915_gem_sw_finish sw_finish;
-    int ret;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_sw_finish sw_finish;
+       int ret;
 
-    if (bo == NULL)
-       return 0;
+       if (bo == NULL)
+               return 0;
 
-    assert(bo_gem->mem_virtual != NULL);
+       assert(bo_gem->mem_virtual != NULL);
 
-    pthread_mutex_lock(&bufmgr_gem->lock);
+       pthread_mutex_lock(&bufmgr_gem->lock);
 
-    /* Cause a flush to happen if the buffer's pinned for scanout, so the
-     * results show up in a timely manner.
-     */
-    sw_finish.handle = bo_gem->gem_handle;
-    do {
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
-                   &sw_finish);
-    } while (ret == -1 && errno == EINTR);
+       /* Cause a flush to happen if the buffer's pinned for scanout, so the
+        * results show up in a timely manner.
+        */
+       sw_finish.handle = bo_gem->gem_handle;
+       do {
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SW_FINISH,
+                           &sw_finish);
+       } while (ret == -1 && errno == EINTR);
 
-    bo->virtual = NULL;
-    pthread_mutex_unlock(&bufmgr_gem->lock);
-    return 0;
+       bo->virtual = NULL;
+       pthread_mutex_unlock(&bufmgr_gem->lock);
+       return 0;
 }
 
 static int
-drm_intel_gem_bo_subdata (drm_intel_bo *bo, unsigned long offset,
-                         unsigned long size, const void *data)
+drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
+                        unsigned long size, const void *data)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_i915_gem_pwrite pwrite;
-    int ret;
-
-    memset (&pwrite, 0, sizeof (pwrite));
-    pwrite.handle = bo_gem->gem_handle;
-    pwrite.offset = offset;
-    pwrite.size = size;
-    pwrite.data_ptr = (uint64_t) (uintptr_t) data;
-    do {
-       ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
-    } while (ret == -1 && errno == EINTR);
-    if (ret != 0) {
-       fprintf (stderr, "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
-                __FILE__, __LINE__,
-                bo_gem->gem_handle, (int) offset, (int) size,
-                strerror (errno));
-    }
-    return 0;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_pwrite pwrite;
+       int ret;
+
+       memset(&pwrite, 0, sizeof(pwrite));
+       pwrite.handle = bo_gem->gem_handle;
+       pwrite.offset = offset;
+       pwrite.size = size;
+       pwrite.data_ptr = (uint64_t) (uintptr_t) data;
+       do {
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
+       } while (ret == -1 && errno == EINTR);
+       if (ret != 0) {
+               fprintf(stderr,
+                       "%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
+                       __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
+                       (int)size, strerror(errno));
+       }
+       return 0;
 }
 
 static int
-drm_intel_gem_get_pipe_from_crtc_id (drm_intel_bufmgr *bufmgr, int crtc_id)
+drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
-    struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
-    int ret;
-
-    get_pipe_from_crtc_id.crtc_id = crtc_id;
-    ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
-                &get_pipe_from_crtc_id);
-    if (ret != 0) {
-       /* We return -1 here to signal that we don't
-        * know which pipe is associated with this crtc.
-        * This lets the caller know that this information
-        * isn't available; using the wrong pipe for
-        * vblank waiting can cause the chipset to lock up
-        */
-       return -1;
-    }
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+       struct drm_i915_get_pipe_from_crtc_id get_pipe_from_crtc_id;
+       int ret;
 
-    return get_pipe_from_crtc_id.pipe;
+       get_pipe_from_crtc_id.crtc_id = crtc_id;
+       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
+                   &get_pipe_from_crtc_id);
+       if (ret != 0) {
+               /* We return -1 here to signal that we don't
+                * know which pipe is associated with this crtc.
+                * This lets the caller know that this information
+                * isn't available; using the wrong pipe for
+                * vblank waiting can cause the chipset to lock up
+                */
+               return -1;
+       }
+
+       return get_pipe_from_crtc_id.pipe;
 }
 
 static int
-drm_intel_gem_bo_get_subdata (drm_intel_bo *bo, unsigned long offset,
-                             unsigned long size, void *data)
+drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
+                            unsigned long size, void *data)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_i915_gem_pread pread;
-    int ret;
-
-    memset (&pread, 0, sizeof (pread));
-    pread.handle = bo_gem->gem_handle;
-    pread.offset = offset;
-    pread.size = size;
-    pread.data_ptr = (uint64_t) (uintptr_t) data;
-    do {
-       ret = ioctl (bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
-    } while (ret == -1 && errno == EINTR);
-    if (ret != 0) {
-       fprintf (stderr, "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
-                __FILE__, __LINE__,
-                bo_gem->gem_handle, (int) offset, (int) size,
-                strerror (errno));
-    }
-    return 0;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_pread pread;
+       int ret;
+
+       memset(&pread, 0, sizeof(pread));
+       pread.handle = bo_gem->gem_handle;
+       pread.offset = offset;
+       pread.size = size;
+       pread.data_ptr = (uint64_t) (uintptr_t) data;
+       do {
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
+       } while (ret == -1 && errno == EINTR);
+       if (ret != 0) {
+               fprintf(stderr,
+                       "%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
+                       __FILE__, __LINE__, bo_gem->gem_handle, (int)offset,
+                       (int)size, strerror(errno));
+       }
+       return 0;
 }
 
 /** Waits for all GPU rendering to the object to have completed. */
 static void
 drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
 {
-    drm_intel_gem_bo_start_gtt_access(bo, 0);
+       drm_intel_gem_bo_start_gtt_access(bo, 0);
 }
 
 /**
@@ -925,50 +944,54 @@ drm_intel_gem_bo_wait_rendering(drm_intel_bo *bo)
 void
 drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_i915_gem_set_domain set_domain;
-    int ret;
-
-    set_domain.handle = bo_gem->gem_handle;
-    set_domain.read_domains = I915_GEM_DOMAIN_GTT;
-    set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
-    do {
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
-    } while (ret == -1 && errno == EINTR);
-    if (ret != 0) {
-       fprintf (stderr, "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
-                __FILE__, __LINE__,
-                bo_gem->gem_handle, set_domain.read_domains, set_domain.write_domain,
-                strerror (errno));
-    }
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_set_domain set_domain;
+       int ret;
+
+       set_domain.handle = bo_gem->gem_handle;
+       set_domain.read_domains = I915_GEM_DOMAIN_GTT;
+       set_domain.write_domain = write_enable ? I915_GEM_DOMAIN_GTT : 0;
+       do {
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_DOMAIN,
+                           &set_domain);
+       } while (ret == -1 && errno == EINTR);
+       if (ret != 0) {
+               fprintf(stderr,
+                       "%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
+                       __FILE__, __LINE__, bo_gem->gem_handle,
+                       set_domain.read_domains, set_domain.write_domain,
+                       strerror(errno));
+       }
 }
 
 static void
 drm_intel_bufmgr_gem_destroy(drm_intel_bufmgr *bufmgr)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
-    int i;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
+       int i;
 
-    free(bufmgr_gem->exec_objects);
-    free(bufmgr_gem->exec_bos);
+       free(bufmgr_gem->exec_objects);
+       free(bufmgr_gem->exec_bos);
 
-    pthread_mutex_destroy(&bufmgr_gem->lock);
+       pthread_mutex_destroy(&bufmgr_gem->lock);
 
-    /* Free any cached buffer objects we were going to reuse */
-    for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
-       struct drm_intel_gem_bo_bucket *bucket = &bufmgr_gem->cache_bucket[i];
-       drm_intel_bo_gem *bo_gem;
+       /* Free any cached buffer objects we were going to reuse */
+       for (i = 0; i < DRM_INTEL_GEM_BO_BUCKETS; i++) {
+               struct drm_intel_gem_bo_bucket *bucket =
+                   &bufmgr_gem->cache_bucket[i];
+               drm_intel_bo_gem *bo_gem;
 
-       while (!DRMLISTEMPTY(&bucket->head)) {
-           bo_gem = DRMLISTENTRY(drm_intel_bo_gem, bucket->head.next, head);
-           DRMLISTDEL(&bo_gem->head);
+               while (!DRMLISTEMPTY(&bucket->head)) {
+                       bo_gem = DRMLISTENTRY(drm_intel_bo_gem,
+                                             bucket->head.next, head);
+                       DRMLISTDEL(&bo_gem->head);
 
-           drm_intel_gem_bo_free(&bo_gem->bo);
+                       drm_intel_gem_bo_free(&bo_gem->bo);
+               }
        }
-    }
 
-    free(bufmgr);
+       free(bufmgr);
 }
 
 /**
@@ -985,49 +1008,49 @@ drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
                            drm_intel_bo *target_bo, uint32_t target_offset,
                            uint32_t read_domains, uint32_t write_domain)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *)target_bo;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
 
-    pthread_mutex_lock(&bufmgr_gem->lock);
+       pthread_mutex_lock(&bufmgr_gem->lock);
 
-    /* Create a new relocation list if needed */
-    if (bo_gem->relocs == NULL)
-       drm_intel_setup_reloc_list(bo);
+       /* Create a new relocation list if needed */
+       if (bo_gem->relocs == NULL)
+               drm_intel_setup_reloc_list(bo);
 
-    /* Check overflow */
-    assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
+       /* Check overflow */
+       assert(bo_gem->reloc_count < bufmgr_gem->max_relocs);
 
-    /* Check args */
-    assert (offset <= bo->size - 4);
-    assert ((write_domain & (write_domain-1)) == 0);
+       /* Check args */
+       assert(offset <= bo->size - 4);
+       assert((write_domain & (write_domain - 1)) == 0);
 
-    /* Make sure that we're not adding a reloc to something whose size has
-     * already been accounted for.
-     */
-    assert(!bo_gem->used_as_reloc_target);
-    bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
-    bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
+       /* Make sure that we're not adding a reloc to something whose size has
+        * already been accounted for.
+        */
+       assert(!bo_gem->used_as_reloc_target);
+       bo_gem->reloc_tree_size += target_bo_gem->reloc_tree_size;
+       bo_gem->reloc_tree_fences += target_bo_gem->reloc_tree_fences;
 
-    /* Flag the target to disallow further relocations in it. */
-    target_bo_gem->used_as_reloc_target = 1;
+       /* Flag the target to disallow further relocations in it. */
+       target_bo_gem->used_as_reloc_target = 1;
 
-    bo_gem->relocs[bo_gem->reloc_count].offset = offset;
-    bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
-    bo_gem->relocs[bo_gem->reloc_count].target_handle =
-       target_bo_gem->gem_handle;
-    bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
-    bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
-    bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
+       bo_gem->relocs[bo_gem->reloc_count].offset = offset;
+       bo_gem->relocs[bo_gem->reloc_count].delta = target_offset;
+       bo_gem->relocs[bo_gem->reloc_count].target_handle =
+           target_bo_gem->gem_handle;
+       bo_gem->relocs[bo_gem->reloc_count].read_domains = read_domains;
+       bo_gem->relocs[bo_gem->reloc_count].write_domain = write_domain;
+       bo_gem->relocs[bo_gem->reloc_count].presumed_offset = target_bo->offset;
 
-    bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
-    drm_intel_gem_bo_reference(target_bo);
+       bo_gem->reloc_target_bo[bo_gem->reloc_count] = target_bo;
+       drm_intel_gem_bo_reference(target_bo);
 
-    bo_gem->reloc_count++;
+       bo_gem->reloc_count++;
 
-    pthread_mutex_unlock(&bufmgr_gem->lock);
+       pthread_mutex_unlock(&bufmgr_gem->lock);
 
-    return 0;
+       return 0;
 }
 
 /**
@@ -1038,211 +1061,216 @@ drm_intel_gem_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
 static void
 drm_intel_gem_bo_process_reloc(drm_intel_bo *bo)
 {
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    int i;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       int i;
 
-    if (bo_gem->relocs == NULL)
-       return;
+       if (bo_gem->relocs == NULL)
+               return;
 
-    for (i = 0; i < bo_gem->reloc_count; i++) {
-       drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
+       for (i = 0; i < bo_gem->reloc_count; i++) {
+               drm_intel_bo *target_bo = bo_gem->reloc_target_bo[i];
 
-       /* Continue walking the tree depth-first. */
-       drm_intel_gem_bo_process_reloc(target_bo);
+               /* Continue walking the tree depth-first. */
+               drm_intel_gem_bo_process_reloc(target_bo);
 
-       /* Add the target to the validate list */
-       drm_intel_add_validate_buffer(target_bo);
-    }
+               /* Add the target to the validate list */
+               drm_intel_add_validate_buffer(target_bo);
+       }
 }
 
 static void
-drm_intel_update_buffer_offsets (drm_intel_bufmgr_gem *bufmgr_gem)
+drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
 {
-    int i;
-
-    for (i = 0; i < bufmgr_gem->exec_count; i++) {
-       drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
-       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-
-       /* Update the buffer offset */
-       if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
-           DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
-               bo_gem->gem_handle, bo_gem->name, bo->offset,
-               (unsigned long long)bufmgr_gem->exec_objects[i].offset);
-           bo->offset = bufmgr_gem->exec_objects[i].offset;
+       int i;
+
+       for (i = 0; i < bufmgr_gem->exec_count; i++) {
+               drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+               drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+               /* Update the buffer offset */
+               if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
+                       DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
+                           bo_gem->gem_handle, bo_gem->name, bo->offset,
+                           (unsigned long long)bufmgr_gem->exec_objects[i].
+                           offset);
+                       bo->offset = bufmgr_gem->exec_objects[i].offset;
+               }
        }
-    }
 }
 
 static int
 drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
-                     drm_clip_rect_t *cliprects, int num_cliprects,
-                     int DR4)
+                     drm_clip_rect_t * cliprects, int num_cliprects, int DR4)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    struct drm_i915_gem_execbuffer execbuf;
-    int ret, i;
-
-    pthread_mutex_lock(&bufmgr_gem->lock);
-    /* Update indices and set up the validate list. */
-    drm_intel_gem_bo_process_reloc(bo);
-
-    /* Add the batch buffer to the validation list.  There are no relocations
-     * pointing to it.
-     */
-    drm_intel_add_validate_buffer(bo);
-
-    execbuf.buffers_ptr = (uintptr_t)bufmgr_gem->exec_objects;
-    execbuf.buffer_count = bufmgr_gem->exec_count;
-    execbuf.batch_start_offset = 0;
-    execbuf.batch_len = used;
-    execbuf.cliprects_ptr = (uintptr_t)cliprects;
-    execbuf.num_cliprects = num_cliprects;
-    execbuf.DR1 = 0;
-    execbuf.DR4 = DR4;
-
-    do {
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER, &execbuf);
-    } while (ret != 0 && errno == EAGAIN);
-
-    if (ret != 0 && errno == ENOMEM) {
-       fprintf(stderr, "Execbuffer fails to pin. Estimate: %u. Actual: %u. Available: %u\n",
-               drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
-                                                  bufmgr_gem->exec_count),
-               drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
-                                                 bufmgr_gem->exec_count),
-               (unsigned int) bufmgr_gem->gtt_size);
-    }
-    drm_intel_update_buffer_offsets (bufmgr_gem);
-
-    if (bufmgr_gem->bufmgr.debug)
-       drm_intel_gem_dump_validation_list(bufmgr_gem);
-
-    for (i = 0; i < bufmgr_gem->exec_count; i++) {
-       drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
-       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-
-       /* Disconnect the buffer from the validate list */
-       bo_gem->validate_index = -1;
-       drm_intel_gem_bo_unreference_locked(bo);
-       bufmgr_gem->exec_bos[i] = NULL;
-    }
-    bufmgr_gem->exec_count = 0;
-    pthread_mutex_unlock(&bufmgr_gem->lock);
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       struct drm_i915_gem_execbuffer execbuf;
+       int ret, i;
 
-    return 0;
+       pthread_mutex_lock(&bufmgr_gem->lock);
+       /* Update indices and set up the validate list. */
+       drm_intel_gem_bo_process_reloc(bo);
+
+       /* Add the batch buffer to the validation list.  There are no
+        * relocations pointing to it.
+        */
+       drm_intel_add_validate_buffer(bo);
+
+       execbuf.buffers_ptr = (uintptr_t) bufmgr_gem->exec_objects;
+       execbuf.buffer_count = bufmgr_gem->exec_count;
+       execbuf.batch_start_offset = 0;
+       execbuf.batch_len = used;
+       execbuf.cliprects_ptr = (uintptr_t) cliprects;
+       execbuf.num_cliprects = num_cliprects;
+       execbuf.DR1 = 0;
+       execbuf.DR4 = DR4;
+
+       do {
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER,
+                           &execbuf);
+       } while (ret != 0 && errno == EAGAIN);
+
+       if (ret != 0 && errno == ENOMEM) {
+               fprintf(stderr,
+                       "Execbuffer fails to pin. "
+                       "Estimate: %u. Actual: %u. Available: %u\n",
+                       drm_intel_gem_estimate_batch_space(bufmgr_gem->exec_bos,
+                                                          bufmgr_gem->
+                                                          exec_count),
+                       drm_intel_gem_compute_batch_space(bufmgr_gem->exec_bos,
+                                                         bufmgr_gem->
+                                                         exec_count),
+                       (unsigned int)bufmgr_gem->gtt_size);
+       }
+       drm_intel_update_buffer_offsets(bufmgr_gem);
+
+       if (bufmgr_gem->bufmgr.debug)
+               drm_intel_gem_dump_validation_list(bufmgr_gem);
+
+       for (i = 0; i < bufmgr_gem->exec_count; i++) {
+               drm_intel_bo *bo = bufmgr_gem->exec_bos[i];
+               drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+
+               /* Disconnect the buffer from the validate list */
+               bo_gem->validate_index = -1;
+               drm_intel_gem_bo_unreference_locked(bo);
+               bufmgr_gem->exec_bos[i] = NULL;
+       }
+       bufmgr_gem->exec_count = 0;
+       pthread_mutex_unlock(&bufmgr_gem->lock);
+
+       return 0;
 }
 
 static int
 drm_intel_gem_bo_pin(drm_intel_bo *bo, uint32_t alignment)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_i915_gem_pin pin;
-    int ret;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_pin pin;
+       int ret;
 
-    memset(&pin, 0, sizeof(pin));
-    pin.handle = bo_gem->gem_handle;
-    pin.alignment = alignment;
+       memset(&pin, 0, sizeof(pin));
+       pin.handle = bo_gem->gem_handle;
+       pin.alignment = alignment;
 
-    do {
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
-    } while (ret == -1 && errno == EINTR);
+       do {
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_PIN, &pin);
+       } while (ret == -1 && errno == EINTR);
 
-    if (ret != 0)
-       return -errno;
+       if (ret != 0)
+               return -errno;
 
-    bo->offset = pin.offset;
-    return 0;
+       bo->offset = pin.offset;
+       return 0;
 }
 
 static int
 drm_intel_gem_bo_unpin(drm_intel_bo *bo)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_i915_gem_unpin unpin;
-    int ret;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_unpin unpin;
+       int ret;
 
-    memset(&unpin, 0, sizeof(unpin));
-    unpin.handle = bo_gem->gem_handle;
+       memset(&unpin, 0, sizeof(unpin));
+       unpin.handle = bo_gem->gem_handle;
 
-    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
-    if (ret != 0)
-       return -errno;
+       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_UNPIN, &unpin);
+       if (ret != 0)
+               return -errno;
 
-    return 0;
+       return 0;
 }
 
 static int
-drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
+drm_intel_gem_bo_set_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
                            uint32_t stride)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_i915_gem_set_tiling set_tiling;
-    int ret;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_i915_gem_set_tiling set_tiling;
+       int ret;
 
-    if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
-       return 0;
+       if (bo_gem->global_name == 0 && *tiling_mode == bo_gem->tiling_mode)
+               return 0;
 
-    /* If we're going from non-tiling to tiling, bump fence count */
-    if (bo_gem->tiling_mode == I915_TILING_NONE)
-       bo_gem->reloc_tree_fences++;
+       /* If we're going from non-tiling to tiling, bump fence count */
+       if (bo_gem->tiling_mode == I915_TILING_NONE)
+               bo_gem->reloc_tree_fences++;
 
-    memset(&set_tiling, 0, sizeof(set_tiling));
-    set_tiling.handle = bo_gem->gem_handle;
-    set_tiling.tiling_mode = *tiling_mode;
-    set_tiling.stride = stride;
+       memset(&set_tiling, 0, sizeof(set_tiling));
+       set_tiling.handle = bo_gem->gem_handle;
+       set_tiling.tiling_mode = *tiling_mode;
+       set_tiling.stride = stride;
 
-    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
-    if (ret != 0) {
-       *tiling_mode = bo_gem->tiling_mode;
-       return -errno;
-    }
-    bo_gem->tiling_mode = set_tiling.tiling_mode;
-    bo_gem->swizzle_mode = set_tiling.swizzle_mode;
+       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
+       if (ret != 0) {
+               *tiling_mode = bo_gem->tiling_mode;
+               return -errno;
+       }
+       bo_gem->tiling_mode = set_tiling.tiling_mode;
+       bo_gem->swizzle_mode = set_tiling.swizzle_mode;
 
-    /* If we're going from tiling to non-tiling, drop fence count */
-    if (bo_gem->tiling_mode == I915_TILING_NONE)
-       bo_gem->reloc_tree_fences--;
+       /* If we're going from tiling to non-tiling, drop fence count */
+       if (bo_gem->tiling_mode == I915_TILING_NONE)
+               bo_gem->reloc_tree_fences--;
 
-    *tiling_mode = bo_gem->tiling_mode;
-    return 0;
+       *tiling_mode = bo_gem->tiling_mode;
+       return 0;
 }
 
 static int
-drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t *tiling_mode,
-                           uint32_t *swizzle_mode)
+drm_intel_gem_bo_get_tiling(drm_intel_bo *bo, uint32_t * tiling_mode,
+                           uint32_t * swizzle_mode)
 {
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
-    *tiling_mode = bo_gem->tiling_mode;
-    *swizzle_mode = bo_gem->swizzle_mode;
-    return 0;
+       *tiling_mode = bo_gem->tiling_mode;
+       *swizzle_mode = bo_gem->swizzle_mode;
+       return 0;
 }
 
 static int
-drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t *name)
+drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t * name)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo->bufmgr;
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    struct drm_gem_flink flink;
-    int ret;
-
-    if (!bo_gem->global_name) {
-       memset(&flink, 0, sizeof(flink));
-       flink.handle = bo_gem->gem_handle;
-    
-       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
-       if (ret != 0)
-           return -errno;
-       bo_gem->global_name = flink.name;
-       bo_gem->reusable = 0;
-    }
-    
-    *name = bo_gem->global_name;
-    return 0;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bo->bufmgr;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       struct drm_gem_flink flink;
+       int ret;
+
+       if (!bo_gem->global_name) {
+               memset(&flink, 0, sizeof(flink));
+               flink.handle = bo_gem->gem_handle;
+
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_FLINK, &flink);
+               if (ret != 0)
+                       return -errno;
+               bo_gem->global_name = flink.name;
+               bo_gem->reusable = 0;
+       }
+
+       *name = bo_gem->global_name;
+       return 0;
 }
 
 /**
@@ -1255,9 +1283,9 @@ drm_intel_gem_bo_flink(drm_intel_bo *bo, uint32_t *name)
 void
 drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bufmgr;
+       drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *) bufmgr;
 
-    bufmgr_gem->bo_reuse = 1;
+       bufmgr_gem->bo_reuse = 1;
 }
 
 /**
@@ -1267,20 +1295,22 @@ drm_intel_bufmgr_gem_enable_reuse(drm_intel_bufmgr *bufmgr)
 static int
 drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
 {
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    int i;
-    int total = 0;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       int i;
+       int total = 0;
 
-    if (bo == NULL || bo_gem->included_in_check_aperture)
-       return 0;
+       if (bo == NULL || bo_gem->included_in_check_aperture)
+               return 0;
 
-    total += bo->size;
-    bo_gem->included_in_check_aperture = 1;
+       total += bo->size;
+       bo_gem->included_in_check_aperture = 1;
 
-    for (i = 0; i < bo_gem->reloc_count; i++)
-       total += drm_intel_gem_bo_get_aperture_space(bo_gem->reloc_target_bo[i]);
+       for (i = 0; i < bo_gem->reloc_count; i++)
+               total +=
+                   drm_intel_gem_bo_get_aperture_space(bo_gem->
+                                                       reloc_target_bo[i]);
 
-    return total;
+       return total;
 }
 
 /**
@@ -1292,20 +1322,20 @@ drm_intel_gem_bo_get_aperture_space(drm_intel_bo *bo)
  * This function over-counts if the same buffer is used multiple times.
  */
 static unsigned int
-drm_intel_gem_total_fences(drm_intel_bo **bo_array, int count)
+drm_intel_gem_total_fences(drm_intel_bo ** bo_array, int count)
 {
-    int i;
-    unsigned int total = 0;
+       int i;
+       unsigned int total = 0;
 
-    for (i = 0; i < count; i++) {
-       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
+       for (i = 0; i < count; i++) {
+               drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
 
-       if (bo_gem == NULL)
-           continue;
+               if (bo_gem == NULL)
+                       continue;
 
-       total += bo_gem->reloc_tree_fences;
-    }
-    return total;
+               total += bo_gem->reloc_tree_fences;
+       }
+       return total;
 }
 
 /**
@@ -1315,16 +1345,17 @@ drm_intel_gem_total_fences(drm_intel_bo **bo_array, int count)
 static void
 drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
 {
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    int i;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       int i;
 
-    if (bo == NULL || !bo_gem->included_in_check_aperture)
-       return;
+       if (bo == NULL || !bo_gem->included_in_check_aperture)
+               return;
 
-    bo_gem->included_in_check_aperture = 0;
+       bo_gem->included_in_check_aperture = 0;
 
-    for (i = 0; i < bo_gem->reloc_count; i++)
-       drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->reloc_target_bo[i]);
+       for (i = 0; i < bo_gem->reloc_count; i++)
+               drm_intel_gem_bo_clear_aperture_space_flag(bo_gem->
+                                                          reloc_target_bo[i]);
 }
 
 /**
@@ -1334,15 +1365,15 @@ drm_intel_gem_bo_clear_aperture_space_flag(drm_intel_bo *bo)
 static unsigned int
 drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
 {
-    int i;
-    unsigned int total = 0;
-
-    for (i = 0; i < count; i++) {
-       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
-       if (bo_gem != NULL)
-               total += bo_gem->reloc_tree_size;
-    }
-    return total;
+       int i;
+       unsigned int total = 0;
+
+       for (i = 0; i < count; i++) {
+               drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
+               if (bo_gem != NULL)
+                       total += bo_gem->reloc_tree_size;
+       }
+       return total;
 }
 
 /**
@@ -1353,28 +1384,30 @@ drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
 static unsigned int
 drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
 {
-    int i;
-    unsigned int total = 0;
-
-    for (i = 0; i < count; i++) {
-       total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
-       /* For the first buffer object in the array, we get an accurate count
-        * back for its reloc_tree size (since nothing had been flagged as
-        * being counted yet).  We can save that value out as a more
-        * conservative reloc_tree_size that avoids double-counting target
-        * buffers.  Since the first buffer happens to usually be the batch
-        * buffer in our callers, this can pull us back from doing the tree
-        * walk on every new batch emit.
-        */
-       if (i == 0) {
-           drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo_array[i];
-           bo_gem->reloc_tree_size = total;
+       int i;
+       unsigned int total = 0;
+
+       for (i = 0; i < count; i++) {
+               total += drm_intel_gem_bo_get_aperture_space(bo_array[i]);
+               /* For the first buffer object in the array, we get an
+                * accurate count back for its reloc_tree size (since nothing
+                * had been flagged as being counted yet).  We can save that
+                * value out as a more conservative reloc_tree_size that
+                * avoids double-counting target buffers.  Since the first
+                * buffer happens to usually be the batch buffer in our
+                * callers, this can pull us back from doing the tree
+                * walk on every new batch emit.
+                */
+               if (i == 0) {
+                       drm_intel_bo_gem *bo_gem =
+                           (drm_intel_bo_gem *) bo_array[i];
+                       bo_gem->reloc_tree_size = total;
+               }
        }
-    }
 
-    for (i = 0; i < count; i++)
-       drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
-    return total;
+       for (i = 0; i < count; i++)
+               drm_intel_gem_bo_clear_aperture_space_flag(bo_array[i]);
+       return total;
 }
 
 /**
@@ -1396,32 +1429,34 @@ drm_intel_gem_compute_batch_space(drm_intel_bo **bo_array, int count)
 static int
 drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem = (drm_intel_bufmgr_gem *)bo_array[0]->bufmgr;
-    unsigned int total = 0;
-    unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
-    int total_fences;
-
-    /* Check for fence reg constraints if necessary */
-    if (bufmgr_gem->available_fences) {
-       total_fences = drm_intel_gem_total_fences(bo_array, count);
-       if (total_fences > bufmgr_gem->available_fences)
-           return -1;
-    }
-
-    total = drm_intel_gem_estimate_batch_space(bo_array, count);
-
-    if (total > threshold)
-       total = drm_intel_gem_compute_batch_space(bo_array, count);
-
-    if (total > threshold) {
-       DBG("check_space: overflowed available aperture, %dkb vs %dkb\n",
-           total / 1024, (int)bufmgr_gem->gtt_size / 1024);
-       return -1;
-    } else {
-       DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024 ,
-           (int)bufmgr_gem->gtt_size / 1024);
-       return 0;
-    }
+       drm_intel_bufmgr_gem *bufmgr_gem =
+           (drm_intel_bufmgr_gem *) bo_array[0]->bufmgr;
+       unsigned int total = 0;
+       unsigned int threshold = bufmgr_gem->gtt_size * 3 / 4;
+       int total_fences;
+
+       /* Check for fence reg constraints if necessary */
+       if (bufmgr_gem->available_fences) {
+               total_fences = drm_intel_gem_total_fences(bo_array, count);
+               if (total_fences > bufmgr_gem->available_fences)
+                       return -1;
+       }
+
+       total = drm_intel_gem_estimate_batch_space(bo_array, count);
+
+       if (total > threshold)
+               total = drm_intel_gem_compute_batch_space(bo_array, count);
+
+       if (total > threshold) {
+               DBG("check_space: overflowed available aperture, "
+                   "%dkb vs %dkb\n",
+                   total / 1024, (int)bufmgr_gem->gtt_size / 1024);
+               return -1;
+       } else {
+               DBG("drm_check_space: total %dkb vs bufgr %dkb\n", total / 1024,
+                   (int)bufmgr_gem->gtt_size / 1024);
+               return 0;
+       }
 }
 
 /*
@@ -1431,10 +1466,10 @@ drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
 static int
 drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
 {
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
 
-    bo_gem->reusable = 0;
-    return 0;
+       bo_gem->reusable = 0;
+       return 0;
 }
 
 /**
@@ -1444,20 +1479,21 @@ drm_intel_gem_bo_disable_reuse(drm_intel_bo *bo)
 static int
 drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
 {
-    drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
-    int i;
+       drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
+       int i;
 
-    if (bo == NULL || target_bo == NULL)
-       return 0;
+       if (bo == NULL || target_bo == NULL)
+               return 0;
 
-    for (i = 0; i < bo_gem->reloc_count; i++) {
-       if (bo_gem->reloc_target_bo[i] == target_bo)
-           return 1;
-       if (drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i], target_bo))
-           return 1;
-    }
+       for (i = 0; i < bo_gem->reloc_count; i++) {
+               if (bo_gem->reloc_target_bo[i] == target_bo)
+                       return 1;
+               if (drm_intel_gem_bo_references(bo_gem->reloc_target_bo[i],
+                                               target_bo))
+                       return 1;
+       }
 
-    return 0;
+       return 0;
 }
 
 /**
@@ -1469,90 +1505,95 @@ drm_intel_gem_bo_references(drm_intel_bo *bo, drm_intel_bo *target_bo)
 drm_intel_bufmgr *
 drm_intel_bufmgr_gem_init(int fd, int batch_size)
 {
-    drm_intel_bufmgr_gem *bufmgr_gem;
-    struct drm_i915_gem_get_aperture aperture;
-    drm_i915_getparam_t gp;
-    int ret, i;
-    unsigned long size;
-
-    bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
-    bufmgr_gem->fd = fd;
-
-    if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
-      free(bufmgr_gem);
-      return NULL;
-   }
-
-    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
-
-    if (ret == 0)
-       bufmgr_gem->gtt_size = aperture.aper_available_size;
-    else {
-       fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
-               strerror(errno));
-       bufmgr_gem->gtt_size = 128 * 1024 * 1024;
-       fprintf(stderr, "Assuming %dkB available aperture size.\n"
-               "May lead to reduced performance or incorrect rendering.\n",
-               (int)bufmgr_gem->gtt_size / 1024);
-    }
-
-    gp.param = I915_PARAM_CHIPSET_ID;
-    gp.value = &bufmgr_gem->pci_device;
-    ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
-    if (ret) {
-       fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
-       fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
-    }
-
-    if (!IS_I965G(bufmgr_gem)) {
-       gp.param = I915_PARAM_NUM_FENCES_AVAIL;
-       gp.value = &bufmgr_gem->available_fences;
+       drm_intel_bufmgr_gem *bufmgr_gem;
+       struct drm_i915_gem_get_aperture aperture;
+       drm_i915_getparam_t gp;
+       int ret, i;
+       unsigned long size;
+
+       bufmgr_gem = calloc(1, sizeof(*bufmgr_gem));
+       bufmgr_gem->fd = fd;
+
+       if (pthread_mutex_init(&bufmgr_gem->lock, NULL) != 0) {
+               free(bufmgr_gem);
+               return NULL;
+       }
+
+       ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
+
+       if (ret == 0)
+               bufmgr_gem->gtt_size = aperture.aper_available_size;
+       else {
+               fprintf(stderr, "DRM_IOCTL_I915_GEM_APERTURE failed: %s\n",
+                       strerror(errno));
+               bufmgr_gem->gtt_size = 128 * 1024 * 1024;
+               fprintf(stderr, "Assuming %dkB available aperture size.\n"
+                       "May lead to reduced performance or incorrect "
+                       "rendering.\n",
+                       (int)bufmgr_gem->gtt_size / 1024);
+       }
+
+       gp.param = I915_PARAM_CHIPSET_ID;
+       gp.value = &bufmgr_gem->pci_device;
        ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
        if (ret) {
-           fprintf(stderr, "get fences failed: %d [%d]\n", ret, errno);
-           fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
-           bufmgr_gem->available_fences = 0;
+               fprintf(stderr, "get chip id failed: %d [%d]\n", ret, errno);
+               fprintf(stderr, "param: %d, val: %d\n", gp.param, *gp.value);
        }
-    }
-
-    /* Let's go with one relocation per every 2 dwords (but round down a bit
-     * since a power of two will mean an extra page allocation for the reloc
-     * buffer).
-     *
-     * Every 4 was too few for the blender benchmark.
-     */
-    bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
-
-    bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
-    bufmgr_gem->bufmgr.bo_alloc_for_render = drm_intel_gem_bo_alloc_for_render;
-    bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
-    bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
-    bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
-    bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
-    bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
-    bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
-    bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
-    bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
-    bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
-    bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
-    bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
-    bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
-    bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
-    bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
-    bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
-    bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
-    bufmgr_gem->bufmgr.debug = 0;
-    bufmgr_gem->bufmgr.check_aperture_space = drm_intel_gem_check_aperture_space;
-    bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
-    bufmgr_gem->bufmgr.get_pipe_from_crtc_id = drm_intel_gem_get_pipe_from_crtc_id;
-    bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
-
-    /* Initialize the linked lists for BO reuse cache. */
-    for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
-       DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
-       bufmgr_gem->cache_bucket[i].size = size;
-    }
-
-    return &bufmgr_gem->bufmgr;
-}
 
+       if (!IS_I965G(bufmgr_gem)) {
+               gp.param = I915_PARAM_NUM_FENCES_AVAIL;
+               gp.value = &bufmgr_gem->available_fences;
+               ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GETPARAM, &gp);
+               if (ret) {
+                       fprintf(stderr, "get fences failed: %d [%d]\n", ret,
+                               errno);
+                       fprintf(stderr, "param: %d, val: %d\n", gp.param,
+                               *gp.value);
+                       bufmgr_gem->available_fences = 0;
+               }
+       }
+
+       /* Let's go with one relocation per every 2 dwords (but round down a bit
+        * since a power of two will mean an extra page allocation for the reloc
+        * buffer).
+        *
+        * Every 4 was too few for the blender benchmark.
+        */
+       bufmgr_gem->max_relocs = batch_size / sizeof(uint32_t) / 2 - 2;
+
+       bufmgr_gem->bufmgr.bo_alloc = drm_intel_gem_bo_alloc;
+       bufmgr_gem->bufmgr.bo_alloc_for_render =
+           drm_intel_gem_bo_alloc_for_render;
+       bufmgr_gem->bufmgr.bo_reference = drm_intel_gem_bo_reference;
+       bufmgr_gem->bufmgr.bo_unreference = drm_intel_gem_bo_unreference;
+       bufmgr_gem->bufmgr.bo_map = drm_intel_gem_bo_map;
+       bufmgr_gem->bufmgr.bo_unmap = drm_intel_gem_bo_unmap;
+       bufmgr_gem->bufmgr.bo_subdata = drm_intel_gem_bo_subdata;
+       bufmgr_gem->bufmgr.bo_get_subdata = drm_intel_gem_bo_get_subdata;
+       bufmgr_gem->bufmgr.bo_wait_rendering = drm_intel_gem_bo_wait_rendering;
+       bufmgr_gem->bufmgr.bo_emit_reloc = drm_intel_gem_bo_emit_reloc;
+       bufmgr_gem->bufmgr.bo_pin = drm_intel_gem_bo_pin;
+       bufmgr_gem->bufmgr.bo_unpin = drm_intel_gem_bo_unpin;
+       bufmgr_gem->bufmgr.bo_get_tiling = drm_intel_gem_bo_get_tiling;
+       bufmgr_gem->bufmgr.bo_set_tiling = drm_intel_gem_bo_set_tiling;
+       bufmgr_gem->bufmgr.bo_flink = drm_intel_gem_bo_flink;
+       bufmgr_gem->bufmgr.bo_exec = drm_intel_gem_bo_exec;
+       bufmgr_gem->bufmgr.bo_busy = drm_intel_gem_bo_busy;
+       bufmgr_gem->bufmgr.destroy = drm_intel_bufmgr_gem_destroy;
+       bufmgr_gem->bufmgr.debug = 0;
+       bufmgr_gem->bufmgr.check_aperture_space =
+           drm_intel_gem_check_aperture_space;
+       bufmgr_gem->bufmgr.bo_disable_reuse = drm_intel_gem_bo_disable_reuse;
+       bufmgr_gem->bufmgr.get_pipe_from_crtc_id =
+           drm_intel_gem_get_pipe_from_crtc_id;
+       bufmgr_gem->bufmgr.bo_references = drm_intel_gem_bo_references;
+
+       /* Initialize the linked lists for BO reuse cache. */
+       for (i = 0, size = 4096; i < DRM_INTEL_GEM_BO_BUCKETS; i++, size *= 2) {
+               DRMINITLISTHEAD(&bufmgr_gem->cache_bucket[i].head);
+               bufmgr_gem->cache_bucket[i].size = size;
+       }
+
+       return &bufmgr_gem->bufmgr;
+}
index 454d457..b7cae6f 100644 (file)
  * Contains public methods followed by private storage for the buffer manager.
  */
 struct _drm_intel_bufmgr {
-   /**
-    * Allocate a buffer object.
-    *
-    * Buffer objects are not necessarily initially mapped into CPU virtual
-    * address space or graphics device aperture.  They must be mapped using
-    * bo_map() to be used by the CPU, and validated for use using bo_validate()
-    * to be used from the graphics device.
-    */
-   drm_intel_bo *(*bo_alloc)(drm_intel_bufmgr *bufmgr, const char *name,
-                            unsigned long size, unsigned int alignment);
-
-   /**
-    * Allocate a buffer object, hinting that it will be used as a render target.
-    *
-    * This is otherwise the same as bo_alloc.
-    */
-   drm_intel_bo *(*bo_alloc_for_render)(drm_intel_bufmgr *bufmgr,
-                                       const char *name,
-                                       unsigned long size,
-                                       unsigned int alignment);
-
-   /** Takes a reference on a buffer object */
-   void (*bo_reference)(drm_intel_bo *bo);
-
-   /**
-    * Releases a reference on a buffer object, freeing the data if
-    * rerefences remain.
-    */
-   void (*bo_unreference)(drm_intel_bo *bo);
-
-   /**
-    * Maps the buffer into userspace.
-    *
-    * This function will block waiting for any existing execution on the
-    * buffer to complete, first.  The resulting mapping is available at
-    * buf->virtual.
-    */
-   int (*bo_map)(drm_intel_bo *bo, int write_enable);
-
-   /** Reduces the refcount on the userspace mapping of the buffer object. */
-   int (*bo_unmap)(drm_intel_bo *bo);
-
-   /**
-    * Write data into an object.
-    *
-    * This is an optional function, if missing,
-    * drm_intel_bo will map/memcpy/unmap.
-    */
-   int (*bo_subdata)(drm_intel_bo *bo, unsigned long offset,
-                    unsigned long size, const void *data);
-
-   /**
-    * Read data from an object
-    *
-    * This is an optional function, if missing,
-    * drm_intel_bo will map/memcpy/unmap.
-    */
-   int (*bo_get_subdata)(drm_intel_bo *bo, unsigned long offset,
-                        unsigned long size, void *data);
-
-   /**
-    * Waits for rendering to an object by the GPU to have completed.
-    *
-    * This is not required for any access to the BO by bo_map, bo_subdata, etc.
-    * It is merely a way for the driver to implement glFinish.
-    */
-   void (*bo_wait_rendering)(drm_intel_bo *bo);
-
-   /**
-    * Tears down the buffer manager instance.
-    */
-   void (*destroy)(drm_intel_bufmgr *bufmgr);
-
-    /**
-     * Add relocation entry in reloc_buf, which will be updated with the
-     * target buffer's real offset on on command submission.
-     *
-     * Relocations remain in place for the lifetime of the buffer object.
-     *
-     * \param bo Buffer to write the relocation into.
-     * \param offset Byte offset within reloc_bo of the pointer to target_bo.
-     * \param target_bo Buffer whose offset should be written into the
-     *                  relocation entry.
-     * \param target_offset Constant value to be added to target_bo's offset in
-     *                     relocation entry.
-     * \param read_domains GEM read domains which the buffer will be read into
-     *       by the command that this relocation is part of.
-     * \param write_domains GEM read domains which the buffer will be dirtied
-     *       in by the command that this relocation is part of.
-     */
-    int (*bo_emit_reloc)(drm_intel_bo *bo, uint32_t offset,
-                        drm_intel_bo *target_bo, uint32_t target_offset,
-                        uint32_t read_domains, uint32_t write_domain);
-
-    /** Executes the command buffer pointed to by bo. */
-    int (*bo_exec)(drm_intel_bo *bo, int used,
-                  drm_clip_rect_t *cliprects, int num_cliprects,
-                  int DR4);
-
-    /**
-     * Pin a buffer to the aperture and fix the offset until unpinned
-     *
-     * \param buf Buffer to pin
-     * \param alignment Required alignment for aperture, in bytes
-     */
-    int (*bo_pin)(drm_intel_bo *bo, uint32_t alignment);
-    /**
-     * Unpin a buffer from the aperture, allowing it to be removed
-     *
-     * \param buf Buffer to unpin
-     */
-    int (*bo_unpin)(drm_intel_bo *bo);
-    /**
-     * Ask that the buffer be placed in tiling mode
-     *
-     * \param buf Buffer to set tiling mode for
-     * \param tiling_mode desired, and returned tiling mode
-     */
-    int (*bo_set_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode,
-                        uint32_t stride);
-    /**
-     * Get the current tiling (and resulting swizzling) mode for the bo.
-     *
-     * \param buf Buffer to get tiling mode for
-     * \param tiling_mode returned tiling mode
-     * \param swizzle_mode returned swizzling mode
-     */
-    int (*bo_get_tiling)(drm_intel_bo *bo, uint32_t *tiling_mode,
-                        uint32_t *swizzle_mode);
-    /**
-     * Create a visible name for a buffer which can be used by other apps
-     *
-     * \param buf Buffer to create a name for
-     * \param name Returned name
-     */
-    int (*bo_flink)(drm_intel_bo *bo, uint32_t *name);
-
-    /**
-     * Returns 1 if mapping the buffer for write could cause the process
-     * to block, due to the object being active in the GPU.
-     */
-    int (*bo_busy)(drm_intel_bo *bo);
-
-    int (*check_aperture_space)(drm_intel_bo **bo_array, int count);
-
-    /**
-     * Disable buffer reuse for buffers which will be shared in some way,
-     * as with scanout buffers. When the buffer reference count goes to zero,
-     * it will be freed and not placed in the reuse list.
-     *
-     * \param bo Buffer to disable reuse for
-     */
-    int (*bo_disable_reuse)(drm_intel_bo *bo);
-
-    /**
-     *
-     * Return the pipe associated with a crtc_id so that vblank
-     * synchronization can use the correct data in the request.
-     * This is only supported for KMS and gem at this point, when
-     * unsupported, this function returns -1 and leaves the decision
-     * of what to do in that case to the caller
-     *
-     * \param bufmgr the associated buffer manager
-     * \param crtc_id the crtc identifier
-     */
-    int (*get_pipe_from_crtc_id)(drm_intel_bufmgr *bufmgr, int crtc_id);
-
-   /** Returns true if target_bo is in the relocation tree rooted at bo. */
-    int (*bo_references)(drm_intel_bo *bo, drm_intel_bo *target_bo);
-
-    int debug; /**< Enables verbose debugging printouts */
+       /**
+        * Allocate a buffer object.
+        *
+        * Buffer objects are not necessarily initially mapped into CPU virtual
+        * address space or graphics device aperture.  They must be mapped
+        * using bo_map() to be used by the CPU, and validated for use using
+        * bo_validate() to be used from the graphics device.
+        */
+       drm_intel_bo *(*bo_alloc) (drm_intel_bufmgr *bufmgr, const char *name,
+                                  unsigned long size, unsigned int alignment);
+
+       /**
+        * Allocate a buffer object, hinting that it will be used as a
+        * render target.
+        *
+        * This is otherwise the same as bo_alloc.
+        */
+       drm_intel_bo *(*bo_alloc_for_render) (drm_intel_bufmgr *bufmgr,
+                                             const char *name,
+                                             unsigned long size,
+                                             unsigned int alignment);
+
+       /** Takes a reference on a buffer object */
+       void (*bo_reference) (drm_intel_bo *bo);
+
+       /**
+        * Releases a reference on a buffer object, freeing the data if
+        * rerefences remain.
+        */
+       void (*bo_unreference) (drm_intel_bo *bo);
+
+       /**
+        * Maps the buffer into userspace.
+        *
+        * This function will block waiting for any existing execution on the
+        * buffer to complete, first.  The resulting mapping is available at
+        * buf->virtual.
+        */
+       int (*bo_map) (drm_intel_bo *bo, int write_enable);
+
+       /**
+        * Reduces the refcount on the userspace mapping of the buffer
+        * object.
+        */
+       int (*bo_unmap) (drm_intel_bo *bo);
+
+       /**
+        * Write data into an object.
+        *
+        * This is an optional function, if missing,
+        * drm_intel_bo will map/memcpy/unmap.
+        */
+       int (*bo_subdata) (drm_intel_bo *bo, unsigned long offset,
+                          unsigned long size, const void *data);
+
+       /**
+        * Read data from an object
+        *
+        * This is an optional function, if missing,
+        * drm_intel_bo will map/memcpy/unmap.
+        */
+       int (*bo_get_subdata) (drm_intel_bo *bo, unsigned long offset,
+                              unsigned long size, void *data);
+
+       /**
+        * Waits for rendering to an object by the GPU to have completed.
+        *
+        * This is not required for any access to the BO by bo_map,
+        * bo_subdata, etc.  It is merely a way for the driver to implement
+        * glFinish.
+        */
+       void (*bo_wait_rendering) (drm_intel_bo *bo);
+
+       /**
+        * Tears down the buffer manager instance.
+        */
+       void (*destroy) (drm_intel_bufmgr *bufmgr);
+
+       /**
+        * Add relocation entry in reloc_buf, which will be updated with the
+        * target buffer's real offset on on command submission.
+        *
+        * Relocations remain in place for the lifetime of the buffer object.
+        *
+        * \param bo Buffer to write the relocation into.
+        * \param offset Byte offset within reloc_bo of the pointer to
+        *                      target_bo.
+        * \param target_bo Buffer whose offset should be written into the
+        *                  relocation entry.
+        * \param target_offset Constant value to be added to target_bo's
+        *                      offset in relocation entry.
+        * \param read_domains GEM read domains which the buffer will be
+        *                      read into by the command that this relocation
+        *                      is part of.
+        * \param write_domains GEM read domains which the buffer will be
+        *                      dirtied in by the command that this
+        *                      relocation is part of.
+        */
+       int (*bo_emit_reloc) (drm_intel_bo *bo, uint32_t offset,
+                             drm_intel_bo *target_bo, uint32_t target_offset,
+                             uint32_t read_domains, uint32_t write_domain);
+
+       /** Executes the command buffer pointed to by bo. */
+       int (*bo_exec) (drm_intel_bo *bo, int used,
+                       drm_clip_rect_t *cliprects, int num_cliprects,
+                       int DR4);
+
+       /**
+        * Pin a buffer to the aperture and fix the offset until unpinned
+        *
+        * \param buf Buffer to pin
+        * \param alignment Required alignment for aperture, in bytes
+        */
+       int (*bo_pin) (drm_intel_bo *bo, uint32_t alignment);
+
+       /**
+        * Unpin a buffer from the aperture, allowing it to be removed
+        *
+        * \param buf Buffer to unpin
+        */
+       int (*bo_unpin) (drm_intel_bo *bo);
+
+       /**
+        * Ask that the buffer be placed in tiling mode
+        *
+        * \param buf Buffer to set tiling mode for
+        * \param tiling_mode desired, and returned tiling mode
+        */
+       int (*bo_set_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
+                             uint32_t stride);
+
+       /**
+        * Get the current tiling (and resulting swizzling) mode for the bo.
+        *
+        * \param buf Buffer to get tiling mode for
+        * \param tiling_mode returned tiling mode
+        * \param swizzle_mode returned swizzling mode
+        */
+       int (*bo_get_tiling) (drm_intel_bo *bo, uint32_t * tiling_mode,
+                             uint32_t * swizzle_mode);
+
+       /**
+        * Create a visible name for a buffer which can be used by other apps
+        *
+        * \param buf Buffer to create a name for
+        * \param name Returned name
+        */
+       int (*bo_flink) (drm_intel_bo *bo, uint32_t * name);
+
+       /**
+        * Returns 1 if mapping the buffer for write could cause the process
+        * to block, due to the object being active in the GPU.
+        */
+       int (*bo_busy) (drm_intel_bo *bo);
+
+       int (*check_aperture_space) (drm_intel_bo ** bo_array, int count);
+
+       /**
+        * Disable buffer reuse for buffers which will be shared in some way,
+        * as with scanout buffers. When the buffer reference count goes to
+        * zero, it will be freed and not placed in the reuse list.
+        *
+        * \param bo Buffer to disable reuse for
+        */
+       int (*bo_disable_reuse) (drm_intel_bo *bo);
+
+       /**
+        *
+        * Return the pipe associated with a crtc_id so that vblank
+        * synchronization can use the correct data in the request.
+        * This is only supported for KMS and gem at this point, when
+        * unsupported, this function returns -1 and leaves the decision
+        * of what to do in that case to the caller
+        *
+        * \param bufmgr the associated buffer manager
+        * \param crtc_id the crtc identifier
+        */
+       int (*get_pipe_from_crtc_id) (drm_intel_bufmgr *bufmgr, int crtc_id);
+
+       /** Returns true if target_bo is in the relocation tree rooted at bo. */
+       int (*bo_references) (drm_intel_bo *bo, drm_intel_bo *target_bo);
+
+       /**< Enables verbose debugging printouts */
+       int debug;
 };
 
 #endif /* INTEL_BUFMGR_PRIV_H */
-
index 9814640..1069745 100644 (file)
 #include "xf86drm.h"
 #include "mm.h"
 
-void
-mmDumpMemInfo(const struct mem_block *heap)
+void mmDumpMemInfo(const struct mem_block *heap)
 {
-   drmMsg("Memory heap %p:\n", (void *)heap);
-   if (heap == 0) {
-      drmMsg("  heap == 0\n");
-   } else {
-      const struct mem_block *p;
-
-      for(p = heap->next; p != heap; p = p->next) {
-        drmMsg("  Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
-               p->free ? 'F':'.',
-               p->reserved ? 'R':'.');
-      }
-
-      drmMsg("\nFree list:\n");
-
-      for(p = heap->next_free; p != heap; p = p->next_free) {
-        drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n",p->ofs,p->size,
-               p->free ? 'F':'.',
-               p->reserved ? 'R':'.');
-      }
-
-   }
-   drmMsg("End of memory blocks\n");
+       drmMsg("Memory heap %p:\n", (void *)heap);
+       if (heap == 0) {
+               drmMsg("  heap == 0\n");
+       } else {
+               const struct mem_block *p;
+
+               for (p = heap->next; p != heap; p = p->next) {
+                       drmMsg("  Offset:%08x, Size:%08x, %c%c\n", p->ofs,
+                              p->size, p->free ? 'F' : '.',
+                              p->reserved ? 'R' : '.');
+               }
+
+               drmMsg("\nFree list:\n");
+
+               for (p = heap->next_free; p != heap; p = p->next_free) {
+                       drmMsg(" FREE Offset:%08x, Size:%08x, %c%c\n", p->ofs,
+                              p->size, p->free ? 'F' : '.',
+                              p->reserved ? 'R' : '.');
+               }
+
+       }
+       drmMsg("End of memory blocks\n");
 }
 
-struct mem_block *
-mmInit(int ofs, int size)
+struct mem_block *mmInit(int ofs, int size)
 {
-   struct mem_block *heap, *block;
-  
-   if (size <= 0) 
-      return NULL;
-
-   heap = (struct mem_block *) calloc(1, sizeof(struct mem_block));
-   if (!heap) 
-      return NULL;
-   
-   block = (struct mem_block *) calloc(1, sizeof(struct mem_block));
-   if (!block) {
-      free(heap);
-      return NULL;
-   }
-
-   heap->next = block;
-   heap->prev = block;
-   heap->next_free = block;
-   heap->prev_free = block;
-
-   block->heap = heap;
-   block->next = heap;
-   block->prev = heap;
-   block->next_free = heap;
-   block->prev_free = heap;
-
-   block->ofs = ofs;
-   block->size = size;
-   block->free = 1;
-
-   return heap;
-}
+       struct mem_block *heap, *block;
 
+       if (size <= 0)
+               return NULL;
 
-static struct mem_block *
-SliceBlock(struct mem_block *p, 
-           int startofs, int size, 
-           int reserved, int alignment)
-{
-   struct mem_block *newblock;
-
-   /* break left  [p, newblock, p->next], then p = newblock */
-   if (startofs > p->ofs) {
-      newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
-      if (!newblock)
-        return NULL;
-      newblock->ofs = startofs;
-      newblock->size = p->size - (startofs - p->ofs);
-      newblock->free = 1;
-      newblock->heap = p->heap;
-
-      newblock->next = p->next;
-      newblock->prev = p;
-      p->next->prev = newblock;
-      p->next = newblock;
-
-      newblock->next_free = p->next_free;
-      newblock->prev_free = p;
-      p->next_free->prev_free = newblock;
-      p->next_free = newblock;
-
-      p->size -= newblock->size;
-      p = newblock;
-   }
-
-   /* break right, also [p, newblock, p->next] */
-   if (size < p->size) {
-      newblock = (struct mem_block*) calloc(1, sizeof(struct mem_block));
-      if (!newblock)
-        return NULL;
-      newblock->ofs = startofs + size;
-      newblock->size = p->size - size;
-      newblock->free = 1;
-      newblock->heap = p->heap;
-
-      newblock->next = p->next;
-      newblock->prev = p;
-      p->next->prev = newblock;
-      p->next = newblock;
-
-      newblock->next_free = p->next_free;
-      newblock->prev_free = p;
-      p->next_free->prev_free = newblock;
-      p->next_free = newblock;
-        
-      p->size = size;
-   }
-
-   /* p = middle block */
-   p->free = 0;
-
-   /* Remove p from the free list: 
-    */
-   p->next_free->prev_free = p->prev_free;
-   p->prev_free->next_free = p->next_free;
-
-   p->next_free = 0;
-   p->prev_free = 0;
-
-   p->reserved = reserved;
-   return p;
+       heap = (struct mem_block *)calloc(1, sizeof(struct mem_block));
+       if (!heap)
+               return NULL;
+
+       block = (struct mem_block *)calloc(1, sizeof(struct mem_block));
+       if (!block) {
+               free(heap);
+               return NULL;
+       }
+
+       heap->next = block;
+       heap->prev = block;
+       heap->next_free = block;
+       heap->prev_free = block;
+
+       block->heap = heap;
+       block->next = heap;
+       block->prev = heap;
+       block->next_free = heap;
+       block->prev_free = heap;
+
+       block->ofs = ofs;
+       block->size = size;
+       block->free = 1;
+
+       return heap;
 }
 
+static struct mem_block *SliceBlock(struct mem_block *p,
+                                   int startofs, int size,
+                                   int reserved, int alignment)
+{
+       struct mem_block *newblock;
+
+       /* break left  [p, newblock, p->next], then p = newblock */
+       if (startofs > p->ofs) {
+               newblock =
+                   (struct mem_block *)calloc(1, sizeof(struct mem_block));
+               if (!newblock)
+                       return NULL;
+               newblock->ofs = startofs;
+               newblock->size = p->size - (startofs - p->ofs);
+               newblock->free = 1;
+               newblock->heap = p->heap;
+
+               newblock->next = p->next;
+               newblock->prev = p;
+               p->next->prev = newblock;
+               p->next = newblock;
+
+               newblock->next_free = p->next_free;
+               newblock->prev_free = p;
+               p->next_free->prev_free = newblock;
+               p->next_free = newblock;
+
+               p->size -= newblock->size;
+               p = newblock;
+       }
+
+       /* break right, also [p, newblock, p->next] */
+       if (size < p->size) {
+               newblock =
+                   (struct mem_block *)calloc(1, sizeof(struct mem_block));
+               if (!newblock)
+                       return NULL;
+               newblock->ofs = startofs + size;
+               newblock->size = p->size - size;
+               newblock->free = 1;
+               newblock->heap = p->heap;
+
+               newblock->next = p->next;
+               newblock->prev = p;
+               p->next->prev = newblock;
+               p->next = newblock;
+
+               newblock->next_free = p->next_free;
+               newblock->prev_free = p;
+               p->next_free->prev_free = newblock;
+               p->next_free = newblock;
+
+               p->size = size;
+       }
+
+       /* p = middle block */
+       p->free = 0;
+
+       /* Remove p from the free list: 
+        */
+       p->next_free->prev_free = p->prev_free;
+       p->prev_free->next_free = p->next_free;
+
+       p->next_free = 0;
+       p->prev_free = 0;
+
+       p->reserved = reserved;
+       return p;
+}
 
-struct mem_block *
-mmAllocMem(struct mem_block *heap, int size, int align2, int startSearch)
+struct mem_block *mmAllocMem(struct mem_block *heap, int size, int align2,
+                            int startSearch)
 {
-   struct mem_block *p;
-   const int mask = (1 << align2)-1;
-   int startofs = 0;
-   int endofs;
+       struct mem_block *p;
+       const int mask = (1 << align2) - 1;
+       int startofs = 0;
+       int endofs;
 
-   if (!heap || align2 < 0 || size <= 0)
-      return NULL;
+       if (!heap || align2 < 0 || size <= 0)
+               return NULL;
 
-   for (p = heap->next_free; p != heap; p = p->next_free) {
-      assert(p->free);
+       for (p = heap->next_free; p != heap; p = p->next_free) {
+               assert(p->free);
 
-      startofs = (p->ofs + mask) & ~mask;
-      if ( startofs < startSearch ) {
-        startofs = startSearch;
-      }
-      endofs = startofs+size;
-      if (endofs <= (p->ofs+p->size))
-        break;
-   }
+               startofs = (p->ofs + mask) & ~mask;
+               if (startofs < startSearch) {
+                       startofs = startSearch;
+               }
+               endofs = startofs + size;
+               if (endofs <= (p->ofs + p->size))
+                       break;
+       }
 
-   if (p == heap) 
-      return NULL;
+       if (p == heap)
+               return NULL;
 
-   assert(p->free);
-   p = SliceBlock(p,startofs,size,0,mask+1);
+       assert(p->free);
+       p = SliceBlock(p, startofs, size, 0, mask + 1);
 
-   return p;
+       return p;
 }
 
-
-struct mem_block *
-mmFindBlock(struct mem_block *heap, int start)
+struct mem_block *mmFindBlock(struct mem_block *heap, int start)
 {
-   struct mem_block *p;
+       struct mem_block *p;
 
-   for (p = heap->next; p != heap; p = p->next) {
-      if (p->ofs == start) 
-        return p;
-   }
+       for (p = heap->next; p != heap; p = p->next) {
+               if (p->ofs == start)
+                       return p;
+       }
 
-   return NULL;
+       return NULL;
 }
 
-
-static int
-Join2Blocks(struct mem_block *p)
+static int Join2Blocks(struct mem_block *p)
 {
-   /* XXX there should be some assertions here */
+       /* XXX there should be some assertions here */
+
+       /* NOTE: heap->free == 0 */
 
-   /* NOTE: heap->free == 0 */
+       if (p->free && p->next->free) {
+               struct mem_block *q = p->next;
 
-   if (p->free && p->next->free) {
-      struct mem_block *q = p->next;
+               assert(p->ofs + p->size == q->ofs);
+               p->size += q->size;
 
-      assert(p->ofs + p->size == q->ofs);
-      p->size += q->size;
+               p->next = q->next;
+               q->next->prev = p;
 
-      p->next = q->next;
-      q->next->prev = p;
+               q->next_free->prev_free = q->prev_free;
+               q->prev_free->next_free = q->next_free;
 
-      q->next_free->prev_free = q->prev_free; 
-      q->prev_free->next_free = q->next_free;
-     
-      free(q);
-      return 1;
-   }
-   return 0;
+               free(q);
+               return 1;
+       }
+       return 0;
 }
 
-int
-mmFreeMem(struct mem_block *b)
+int mmFreeMem(struct mem_block *b)
 {
-   if (!b)
-      return 0;
-
-   if (b->free) {
-      drmMsg("block already free\n");
-      return -1;
-   }
-   if (b->reserved) {
-      drmMsg("block is reserved\n");
-      return -1;
-   }
-
-   b->free = 1;
-   b->next_free = b->heap->next_free;
-   b->prev_free = b->heap;
-   b->next_free->prev_free = b;
-   b->prev_free->next_free = b;
-
-   Join2Blocks(b);
-   if (b->prev != b->heap)
-      Join2Blocks(b->prev);
-
-   return 0;
+       if (!b)
+               return 0;
+
+       if (b->free) {
+               drmMsg("block already free\n");
+               return -1;
+       }
+       if (b->reserved) {
+               drmMsg("block is reserved\n");
+               return -1;
+       }
+
+       b->free = 1;
+       b->next_free = b->heap->next_free;
+       b->prev_free = b->heap;
+       b->next_free->prev_free = b;
+       b->prev_free->next_free = b;
+
+       Join2Blocks(b);
+       if (b->prev != b->heap)
+               Join2Blocks(b->prev);
+
+       return 0;
 }
 
-
-void
-mmDestroy(struct mem_block *heap)
+void mmDestroy(struct mem_block *heap)
 {
-   struct mem_block *p;
+       struct mem_block *p;
 
-   if (!heap)
-      return;
+       if (!heap)
+               return;
 
-   for (p = heap->next; p != heap; ) {
-      struct mem_block *next = p->next;
-      free(p);
-      p = next;
-   }
+       for (p = heap->next; p != heap;) {
+               struct mem_block *next = p->next;
+               free(p);
+               p = next;
+       }
 
-   free(heap);
+       free(heap);
 }
index 49e3eec..8a5235b 100644 (file)
  * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  */
 
-
 /**
  * Memory manager code.  Primarily used by device drivers to manage texture
  * heaps, etc.
  */
 
-
 #ifndef MM_H
 #define MM_H
 
 struct mem_block {
-   struct mem_block *next, *prev;
-   struct mem_block *next_free, *prev_free;
-   struct mem_block *heap;
-   int ofs,size;
-   unsigned int free:1;
-   unsigned int reserved:1;
+       struct mem_block *next, *prev;
+       struct mem_block *next_free, *prev_free;
+       struct mem_block *heap;
+       int ofs, size;
+       unsigned int free:1;
+       unsigned int reserved:1;
 };
 
 /* Rename the variables in the drm copy of this code so that it doesn't
@@ -67,7 +65,7 @@ extern struct mem_block *mmInit(int ofs, int size);
  * return: pointer to the allocated block, 0 if error
  */
 extern struct mem_block *mmAllocMem(struct mem_block *heap, int size,
-                                      int align2, int startSearch);
+                                   int align2, int startSearch);
 
 /**
  * Free block starts at offset