OSDN Git Service

radeon: fix fence race condition hopefully
[android-x86/external-libdrm.git] / linux-core / drm_bo.c
index 43e4abb..9cf23f2 100644 (file)
  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
  * when there is a chance that it can be zero before or after the operation.
  *
- * dev->struct_mutex also protects all lists and list heads. Hash tables and hash
- * heads.
+ * dev->struct_mutex also protects all lists and list heads,
+ * Hash tables and hash heads.
  *
  * bo->mutex protects the buffer object itself excluding the usage field.
- * bo->mutex does also protect the buffer list heads, so to manipulate those, we need
- * both the bo->mutex and the dev->struct_mutex.
+ * bo->mutex does also protect the buffer list heads, so to manipulate those,
+ * we need both the bo->mutex and the dev->struct_mutex.
  *
- * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal is a bit
- * complicated. When dev->struct_mutex is released to grab bo->mutex, the list
- * traversal will, in general, need to be restarted.
+ * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
+ * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
+ * the list traversal will, in general, need to be restarted.
  *
  */
 
-static void drm_bo_destroy_locked(struct drm_buffer_object * bo);
-static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo);
-static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo);
-static void drm_bo_unmap_virtual(struct drm_buffer_object * bo);
+static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
+static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
 
 static inline uint64_t drm_bo_type_flags(unsigned type)
 {
@@ -63,7 +62,7 @@ static inline uint64_t drm_bo_type_flags(unsigned type)
  * bo locked. dev->struct_mutex locked.
  */
 
-void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
+void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
 {
        struct drm_mem_type_manager *man;
 
@@ -74,13 +73,13 @@ void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo)
        list_add_tail(&bo->pinned_lru, &man->pinned);
 }
 
-void drm_bo_add_to_lru(struct drm_buffer_object * bo)
+void drm_bo_add_to_lru(struct drm_buffer_object *bo)
 {
        struct drm_mem_type_manager *man;
 
        DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
 
-       if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
+       if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
            || bo->mem.mem_type != bo->pinned_mem_type) {
                man = &bo->dev->bm.man[bo->mem.mem_type];
                list_add_tail(&bo->lru, &man->lru);
@@ -89,7 +88,7 @@ void drm_bo_add_to_lru(struct drm_buffer_object * bo)
        }
 }
 
-static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
+static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
 {
 #ifdef DRM_ODD_MM_COMPAT
        int ret;
@@ -112,7 +111,7 @@ static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci)
        return 0;
 }
 
-static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
+static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
 {
 #ifdef DRM_ODD_MM_COMPAT
        int ret;
@@ -133,31 +132,36 @@ static void drm_bo_vm_post_move(struct drm_buffer_object * bo)
  * Call bo->mutex locked.
  */
 
-static int drm_bo_add_ttm(struct drm_buffer_object * bo)
+int drm_bo_add_ttm(struct drm_buffer_object *bo)
 {
        struct drm_device *dev = bo->dev;
        int ret = 0;
+       uint32_t page_flags = 0;
 
        DRM_ASSERT_LOCKED(&bo->mutex);
        bo->ttm = NULL;
 
+       if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
+               page_flags |= DRM_TTM_PAGE_WRITE;
+
        switch (bo->type) {
-       case drm_bo_type_dc:
+       case drm_bo_type_device:
        case drm_bo_type_kernel:
-               bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
+               bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
+                                        page_flags, dev->bm.dummy_read_page);
                if (!bo->ttm)
                        ret = -ENOMEM;
                break;
        case drm_bo_type_user:
-               bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
+               bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
+                                        page_flags | DRM_TTM_PAGE_USER,
+                                        dev->bm.dummy_read_page);
                if (!bo->ttm)
                        ret = -ENOMEM;
 
                ret = drm_ttm_set_user(bo->ttm, current,
-                                      bo->mem.mask & DRM_BO_FLAG_WRITE,
                                       bo->buffer_start,
-                                      bo->num_pages,
-                                      dev->bm.dummy_read_page);
+                                      bo->num_pages);
                if (ret)
                        return ret;
 
@@ -170,9 +174,10 @@ static int drm_bo_add_ttm(struct drm_buffer_object * bo)
 
        return ret;
 }
+EXPORT_SYMBOL(drm_bo_add_ttm);
 
-static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
-                                 struct drm_bo_mem_reg * mem,
+static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
+                                 struct drm_bo_mem_reg *mem,
                                  int evict, int no_wait)
 {
        struct drm_device *dev = bo->dev;
@@ -199,40 +204,39 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
                        goto out_err;
 
                if (mem->mem_type != DRM_BO_MEM_LOCAL) {
-                       ret = drm_bind_ttm(bo->ttm, mem);
+                       ret = drm_ttm_bind(bo->ttm, mem);
                        if (ret)
                                goto out_err;
                }
-       }
-
-       if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) {
-
-               struct drm_bo_mem_reg *old_mem = &bo->mem;
-               uint64_t save_flags = old_mem->flags;
-               uint64_t save_mask = old_mem->mask;
 
-               *old_mem = *mem;
-               mem->mm_node = NULL;
-               old_mem->mask = save_mask;
-               DRM_FLAG_MASKED(save_flags, mem->flags, DRM_BO_MASK_MEMTYPE);
-
-       } else if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
-                  !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
+               if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
+                       
+                       struct drm_bo_mem_reg *old_mem = &bo->mem;
+                       uint64_t save_flags = old_mem->flags;
+                       uint64_t save_proposed_flags = old_mem->proposed_flags;
+                       
+                       *old_mem = *mem;
+                       mem->mm_node = NULL;
+                       old_mem->proposed_flags = save_proposed_flags;
+                       DRM_FLAG_MASKED(save_flags, mem->flags,
+                                       DRM_BO_MASK_MEMTYPE);
+                       goto moved;
+               }
+               
+       }
 
+       if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
+           !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))                
                ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
-
-       } else if (dev->driver->bo_driver->move) {
+       else if (dev->driver->bo_driver->move) 
                ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
-
-       } else {
-
+       else
                ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
 
-       }
-
        if (ret)
                goto out_err;
 
+moved:
        if (old_is_pci || new_is_pci)
                drm_bo_vm_post_move(bo);
 
@@ -255,14 +259,14 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
 
        return 0;
 
-      out_err:
+out_err:
        if (old_is_pci || new_is_pci)
                drm_bo_vm_post_move(bo);
 
        new_man = &bm->man[bo->mem.mem_type];
        if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
                drm_ttm_unbind(bo->ttm);
-               drm_destroy_ttm(bo->ttm);
+               drm_ttm_destroy(bo->ttm);
                bo->ttm = NULL;
        }
 
@@ -271,37 +275,87 @@ static int drm_bo_handle_move_mem(struct drm_buffer_object * bo,
 
 /*
  * Call bo->mutex locked.
- * Wait until the buffer is idle.
+ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
  */
 
-int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals,
-               int no_wait)
+static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
 {
-       int ret;
+       struct drm_fence_object *fence = bo->fence;
 
-       DRM_ASSERT_LOCKED(&bo->mutex);
+       if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
+               return -EBUSY;
 
-       if (bo->fence) {
-               if (drm_fence_object_signaled(bo->fence, bo->fence_type, 0)) {
+       if (fence) {
+               if (drm_fence_object_signaled(fence, bo->fence_type)) {
+                       drm_fence_usage_deref_unlocked(&bo->fence);
+                       return 0;
+               }
+               drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
+               if (drm_fence_object_signaled(fence, bo->fence_type)) {
                        drm_fence_usage_deref_unlocked(&bo->fence);
                        return 0;
                }
-               if (no_wait) {
+               return -EBUSY;
+       }
+       return 0;
+}
+
+static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
+{
+       int ret;
+
+       mutex_lock(&bo->mutex);
+       ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
+       mutex_unlock(&bo->mutex);
+       return ret;
+}
+
+
+/*
+ * Call bo->mutex locked.
+ * Wait until the buffer is idle.
+ */
+
+int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
+               int no_wait, int check_unfenced)
+{
+       int ret;
+
+       DRM_ASSERT_LOCKED(&bo->mutex);
+       while(unlikely(drm_bo_busy(bo, check_unfenced))) {
+               if (no_wait)
                        return -EBUSY;
+
+               if (check_unfenced &&  (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
+                       mutex_unlock(&bo->mutex);
+                       wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
+                       mutex_lock(&bo->mutex);
+                       bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
+               }
+
+               if (bo->fence) {
+                       struct drm_fence_object *fence;
+                       uint32_t fence_type = bo->fence_type;
+
+                       drm_fence_reference_unlocked(&fence, bo->fence);
+                       mutex_unlock(&bo->mutex);
+
+                       ret = drm_fence_object_wait(fence, lazy, !interruptible,
+                                                   fence_type);
+
+                       drm_fence_usage_deref_unlocked(&fence);
+                       mutex_lock(&bo->mutex);
+                       bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
+                       if (ret)
+                               return ret;
                }
-               ret =
-                   drm_fence_object_wait(bo->fence, lazy, ignore_signals,
-                                         bo->fence_type);
-               if (ret)
-                       return ret;
 
-               drm_fence_usage_deref_unlocked(&bo->fence);
        }
        return 0;
 }
 EXPORT_SYMBOL(drm_bo_wait);
 
-static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
+static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
 {
        struct drm_device *dev = bo->dev;
        struct drm_buffer_manager *bm = &dev->bm;
@@ -311,7 +365,7 @@ static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
                        unsigned long _end = jiffies + 3 * DRM_HZ;
                        int ret;
                        do {
-                               ret = drm_bo_wait(bo, 0, 1, 0);
+                               ret = drm_bo_wait(bo, 0, 0, 0, 0);
                                if (ret && allow_errors)
                                        return ret;
 
@@ -336,7 +390,7 @@ static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors)
  * fence object and removing from lru lists and memory managers.
  */
 
-static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
+static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
 {
        struct drm_device *dev = bo->dev;
        struct drm_buffer_manager *bm = &dev->bm;
@@ -350,7 +404,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
        DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
 
        if (bo->fence && drm_fence_object_signaled(bo->fence,
-                                                  bo->fence_type, 0))
+                                                  bo->fence_type))
                drm_fence_usage_deref_unlocked(&bo->fence);
 
        if (bo->fence && remove_all)
@@ -358,9 +412,8 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
 
        mutex_lock(&dev->struct_mutex);
 
-       if (!atomic_dec_and_test(&bo->usage)) {
+       if (!atomic_dec_and_test(&bo->usage))
                goto out;
-       }
 
        if (!bo->fence) {
                list_del_init(&bo->lru);
@@ -388,7 +441,7 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
                                      ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
        }
 
-      out:
+out:
        mutex_unlock(&bo->mutex);
        return;
 }
@@ -398,13 +451,14 @@ static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all)
  * to the buffer object. Then destroy it.
  */
 
-static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
+static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
 {
        struct drm_device *dev = bo->dev;
        struct drm_buffer_manager *bm = &dev->bm;
 
        DRM_ASSERT_LOCKED(&dev->struct_mutex);
 
+       DRM_DEBUG("freeing %p\n", bo);
        if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
            list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
            list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
@@ -421,7 +475,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
 
                if (bo->ttm) {
                        drm_ttm_unbind(bo->ttm);
-                       drm_destroy_ttm(bo->ttm);
+                       drm_ttm_destroy(bo->ttm);
                        bo->ttm = NULL;
                }
 
@@ -446,7 +500,7 @@ static void drm_bo_destroy_locked(struct drm_buffer_object * bo)
  * Call dev->struct_mutex locked.
  */
 
-static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
+static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
 {
        struct drm_buffer_manager *bm = &dev->bm;
 
@@ -457,6 +511,7 @@ static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
                entry = list_entry(list, struct drm_buffer_object, ddestroy);
 
                nentry = NULL;
+               DRM_DEBUG("bo is %p, %d\n", entry, entry->num_pages);
                if (next != &bm->ddestroy) {
                        nentry = list_entry(next, struct drm_buffer_object,
                                            ddestroy);
@@ -465,9 +520,8 @@ static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all)
 
                drm_bo_cleanup_refs(entry, remove_all);
 
-               if (nentry) {
+               if (nentry)
                        atomic_dec(&nentry->usage);
-               }
        }
 }
 
@@ -501,32 +555,19 @@ static void drm_bo_delayed_workqueue(struct work_struct *work)
        mutex_unlock(&dev->struct_mutex);
 }
 
-void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo)
+void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
 {
-        struct drm_buffer_object *tmp_bo = *bo;
+       struct drm_buffer_object *tmp_bo = *bo;
        bo = NULL;
 
        DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
 
-       if (atomic_dec_and_test(&tmp_bo->usage)) {
+       if (atomic_dec_and_test(&tmp_bo->usage))
                drm_bo_destroy_locked(tmp_bo);
-       }
 }
 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
 
-static void drm_bo_base_deref_locked(struct drm_file * file_priv,
-                                    struct drm_user_object * uo)
-{
-       struct drm_buffer_object *bo =
-           drm_user_object_entry(uo, struct drm_buffer_object, base);
-
-       DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
-
-       drm_bo_takedown_vm_locked(bo);
-       drm_bo_usage_deref_locked(&bo);
-}
-
-void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo)
+void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
 {
        struct drm_buffer_object *tmp_bo = *bo;
        struct drm_device *dev = tmp_bo->dev;
@@ -558,7 +599,7 @@ void drm_putback_buffer_objects(struct drm_device *dev)
 
                list_del_init(&entry->lru);
                DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
-               DRM_WAKEUP(&entry->event_queue);
+               wake_up_all(&entry->event_queue);
 
                /*
                 * FIXME: Might want to put back on head of list
@@ -573,7 +614,6 @@ void drm_putback_buffer_objects(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_putback_buffer_objects);
 
-
 /*
  * Note. The caller has to register (if applicable)
  * and deregister fence object usage.
@@ -582,8 +622,8 @@ EXPORT_SYMBOL(drm_putback_buffer_objects);
 int drm_fence_buffer_objects(struct drm_device *dev,
                             struct list_head *list,
                             uint32_t fence_flags,
-                            struct drm_fence_object * fence,
-                            struct drm_fence_object ** used_fence)
+                            struct drm_fence_object *fence,
+                            struct drm_fence_object **used_fence)
 {
        struct drm_buffer_manager *bm = &dev->bm;
        struct drm_buffer_object *entry;
@@ -659,7 +699,7 @@ int drm_fence_buffer_objects(struct drm_device *dev,
                        entry->fence_type = entry->new_fence_type;
                        DRM_FLAG_MASKED(entry->priv_flags, 0,
                                        _DRM_BO_FLAG_UNFENCED);
-                       DRM_WAKEUP(&entry->event_queue);
+                       wake_up_all(&entry->event_queue);
                        drm_bo_add_to_lru(entry);
                }
                mutex_unlock(&entry->mutex);
@@ -667,7 +707,7 @@ int drm_fence_buffer_objects(struct drm_device *dev,
                l = list->next;
        }
        DRM_DEBUG("Fenced %d buffers\n", count);
-      out:
+out:
        mutex_unlock(&dev->struct_mutex);
        *used_fence = fence;
        return ret;
@@ -678,7 +718,7 @@ EXPORT_SYMBOL(drm_fence_buffer_objects);
  * bo->mutex locked
  */
 
-static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
+static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
                        int no_wait)
 {
        int ret = 0;
@@ -686,27 +726,36 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
        struct drm_bo_mem_reg evict_mem;
 
        /*
-        * Someone might have modified the buffer before we took the buffer mutex.
+        * Someone might have modified the buffer before we took the
+        * buffer mutex.
         */
 
-       if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
-               goto out;
-       if (bo->mem.mem_type != mem_type)
-               goto out;
-
-       ret = drm_bo_wait(bo, 0, 0, no_wait);
+       do {
+               bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
+
+               if (unlikely(bo->mem.flags &
+                            (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
+                       goto out_unlock;
+               if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
+                       goto out_unlock;
+               if (unlikely(bo->mem.mem_type != mem_type))
+                       goto out_unlock;
+               ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
+               if (ret)
+                       goto out_unlock;
 
-       if (ret && ret != -EAGAIN) {
-               DRM_ERROR("Failed to expire fence before "
-                         "buffer eviction.\n");
-               goto out;
-       }
+       } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
 
        evict_mem = bo->mem;
        evict_mem.mm_node = NULL;
 
        evict_mem = bo->mem;
-       evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
+       evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
+
+       mutex_lock(&dev->struct_mutex);
+       list_del_init(&bo->lru);
+       mutex_unlock(&dev->struct_mutex);
+
        ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
 
        if (ret) {
@@ -724,20 +773,21 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
                goto out;
        }
 
+       DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
+                       _DRM_BO_FLAG_EVICTED);
+
+out:
        mutex_lock(&dev->struct_mutex);
        if (evict_mem.mm_node) {
                if (evict_mem.mm_node != bo->pinned_node)
                        drm_mm_put_block(evict_mem.mm_node);
                evict_mem.mm_node = NULL;
        }
-       list_del(&bo->lru);
        drm_bo_add_to_lru(bo);
+       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
+out_unlock:
        mutex_unlock(&dev->struct_mutex);
 
-       DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
-                       _DRM_BO_FLAG_EVICTED);
-
-      out:
        return ret;
 }
 
@@ -745,8 +795,8 @@ static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type,
  * Repeatedly evict memory from the LRU for @mem_type until we create enough
  * space, or we've evicted everything and there isn't enough space.
  */
-static int drm_bo_mem_force_space(struct drm_device * dev,
-                                 struct drm_bo_mem_reg * mem,
+static int drm_bo_mem_force_space(struct drm_device *dev,
+                                 struct drm_bo_mem_reg *mem,
                                  uint32_t mem_type, int no_wait)
 {
        struct drm_mm_node *node;
@@ -772,8 +822,6 @@ static int drm_bo_mem_force_space(struct drm_device * dev,
                atomic_inc(&entry->usage);
                mutex_unlock(&dev->struct_mutex);
                mutex_lock(&entry->mutex);
-               BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
-
                ret = drm_bo_evict(entry, mem_type, no_wait);
                mutex_unlock(&entry->mutex);
                drm_bo_usage_deref_unlocked(&entry);
@@ -788,16 +836,21 @@ static int drm_bo_mem_force_space(struct drm_device * dev,
        }
 
        node = drm_mm_get_block(node, num_pages, mem->page_alignment);
+       if (unlikely(!node)) {
+               mutex_unlock(&dev->struct_mutex);
+               return -ENOMEM;
+       }
+
        mutex_unlock(&dev->struct_mutex);
        mem->mm_node = node;
        mem->mem_type = mem_type;
        return 0;
 }
 
-static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
+static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
                                int disallow_fixed,
                                uint32_t mem_type,
-                               uint64_t mask, uint32_t * res_mask)
+                               uint64_t mask, uint32_t *res_mask)
 {
        uint64_t cur_flags = drm_bo_type_flags(mem_type);
        uint64_t flag_diff;
@@ -830,7 +883,7 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
 
        if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
            ((mask & DRM_BO_FLAG_MAPPABLE) ||
-            (mask & DRM_BO_FLAG_FORCE_MAPPABLE)) )
+            (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
                return 0;
 
        *res_mask = cur_flags;
@@ -845,8 +898,8 @@ static int drm_bo_mt_compatible(struct drm_mem_type_manager * man,
  * drm_bo_mem_force_space is attempted in priority order to evict and find
  * space.
  */
-int drm_bo_mem_space(struct drm_buffer_object * bo,
-                    struct drm_bo_mem_reg * mem, int no_wait)
+int drm_bo_mem_space(struct drm_buffer_object *bo,
+                    struct drm_bo_mem_reg *mem, int no_wait)
 {
        struct drm_device *dev = bo->dev;
        struct drm_buffer_manager *bm = &dev->bm;
@@ -870,7 +923,7 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
 
                type_ok = drm_bo_mt_compatible(man,
                                               bo->type == drm_bo_type_user,
-                                              mem_type, mem->mask,
+                                              mem_type, mem->proposed_flags,
                                               &cur_flags);
 
                if (!type_ok)
@@ -922,13 +975,13 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
                if (!drm_bo_mt_compatible(man,
                                          bo->type == drm_bo_type_user,
                                          mem_type,
-                                         mem->mask,
+                                         mem->proposed_flags,
                                          &cur_flags))
                        continue;
 
                ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
 
-               if (ret == 0) {
+               if (ret == 0 && mem->mm_node) {
                        mem->flags = cur_flags;
                        return 0;
                }
@@ -940,121 +993,94 @@ int drm_bo_mem_space(struct drm_buffer_object * bo,
        ret = (has_eagain) ? -EAGAIN : -ENOMEM;
        return ret;
 }
-
 EXPORT_SYMBOL(drm_bo_mem_space);
 
-static int drm_bo_new_mask(struct drm_buffer_object * bo,
-                          uint64_t new_flags, uint64_t used_mask)
+/*
+ * drm_bo_propose_flags:
+ *
+ * @bo: the buffer object getting new flags
+ *
+ * @new_flags: the new set of proposed flag bits
+ *
+ * @new_mask: the mask of bits changed in new_flags
+ *
+ * Modify the proposed_flag bits in @bo
+ */
+static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
+                                        uint64_t new_flags, uint64_t new_mask)
 {
-       uint32_t new_props;
+       uint32_t new_access;
 
+       /* Copy unchanging bits from existing proposed_flags */
+       DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
+        
        if (bo->type == drm_bo_type_user &&
-           ((used_mask & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
+           ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
             (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
                DRM_ERROR("User buffers require cache-coherent memory.\n");
                return -EINVAL;
        }
 
-       if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
-               DRM_ERROR
-                   ("DRM_BO_FLAG_NO_EVICT is only available to priviliged "
-                    "processes.\n");
+       if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
+               DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
                return -EPERM;
        }
 
+       if (likely(new_mask & DRM_BO_MASK_MEM) &&
+           (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
+           !DRM_SUSER(DRM_CURPROC)) {
+               if (likely(bo->mem.flags & new_flags & new_mask &
+                          DRM_BO_MASK_MEM))
+                       new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
+                               (bo->mem.flags & DRM_BO_MASK_MEM);
+               else {
+                       DRM_ERROR("Incompatible memory type specification "
+                                 "for NO_EVICT buffer.\n");
+                       return -EPERM;
+               }
+       }
+
        if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
-               DRM_ERROR
-                       ("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
+               DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
                return -EPERM;
        }
 
-       new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
-                                DRM_BO_FLAG_READ);
+       new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
+                                 DRM_BO_FLAG_READ);
 
-       if (!new_props) {
+       if (new_access == 0) {
                DRM_ERROR("Invalid buffer object rwx properties\n");
                return -EINVAL;
        }
 
-       bo->mem.mask = new_flags;
+       bo->mem.proposed_flags = new_flags;
        return 0;
 }
 
 /*
- * Call dev->struct_mutex locked.
- */
-
-struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
-                                             uint32_t handle, int check_owner)
-{
-       struct drm_user_object *uo;
-       struct drm_buffer_object *bo;
-
-       uo = drm_lookup_user_object(file_priv, handle);
-
-       if (!uo || (uo->type != drm_buffer_type)) {
-               DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
-               return NULL;
-       }
-
-       if (check_owner && file_priv != uo->owner) {
-               if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
-                       return NULL;
-       }
-
-       bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
-       atomic_inc(&bo->usage);
-       return bo;
-}
-EXPORT_SYMBOL(drm_lookup_buffer_object);
-
-/*
  * Call bo->mutex locked.
- * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
+ * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
  */
 
-static int drm_bo_quick_busy(struct drm_buffer_object * bo)
+int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
 {
        struct drm_fence_object *fence = bo->fence;
 
-       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-       if (fence) {
-               if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
-                       drm_fence_usage_deref_unlocked(&bo->fence);
-                       return 0;
-               }
-               return 1;
-       }
-       return 0;
-}
-
-/*
- * Call bo->mutex locked.
- * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
- */
-
-static int drm_bo_busy(struct drm_buffer_object * bo)
-{
-       struct drm_fence_object *fence = bo->fence;
+       if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
+               return -EBUSY;
 
-       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
        if (fence) {
-               if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
-                       drm_fence_usage_deref_unlocked(&bo->fence);
-                       return 0;
-               }
-               drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
-               if (drm_fence_object_signaled(fence, bo->fence_type, 0)) {
+               if (drm_fence_object_signaled(fence, bo->fence_type)) {
                        drm_fence_usage_deref_unlocked(&bo->fence);
                        return 0;
                }
-               return 1;
+               return -EBUSY;
        }
        return 0;
 }
 
-static int drm_bo_evict_cached(struct drm_buffer_object * bo)
+int drm_bo_evict_cached(struct drm_buffer_object *bo)
 {
        int ret = 0;
 
@@ -1064,373 +1090,154 @@ static int drm_bo_evict_cached(struct drm_buffer_object * bo)
        return ret;
 }
 
+EXPORT_SYMBOL(drm_bo_evict_cached);
 /*
  * Wait until a buffer is unmapped.
  */
 
-static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait)
+static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
 {
        int ret = 0;
 
-       if ((atomic_read(&bo->mapped) >= 0) && no_wait)
-               return -EBUSY;
-
-       DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
-                   atomic_read(&bo->mapped) == -1);
-
-       if (ret == -EINTR)
-               ret = -EAGAIN;
-
-       return ret;
-}
-
-static int drm_bo_check_unfenced(struct drm_buffer_object * bo)
-{
-       int ret;
-
-       mutex_lock(&bo->mutex);
-       ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-       mutex_unlock(&bo->mutex);
-       return ret;
-}
-
-/*
- * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
- * Until then, we cannot really do anything with it except delete it.
- */
-
-static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait,
-                               int eagain_if_wait)
-{
-       int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-
-       if (ret && no_wait)
-               return -EBUSY;
-       else if (!ret)
+       if (likely(atomic_read(&bo->mapped)) == 0)
                return 0;
 
-       ret = 0;
-       mutex_unlock(&bo->mutex);
-       DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
-                   !drm_bo_check_unfenced(bo));
-       mutex_lock(&bo->mutex);
-       if (ret == -EINTR)
-               return -EAGAIN;
-       ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
-       if (ret) {
-               DRM_ERROR("Timeout waiting for buffer to become fenced\n");
+       if (unlikely(no_wait))
                return -EBUSY;
-       }
-       if (eagain_if_wait)
-               return -EAGAIN;
-
-       return 0;
-}
-
-/*
- * Fill in the ioctl reply argument with buffer info.
- * Bo locked.
- */
-
-static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo,
-                               struct drm_bo_info_rep *rep)
-{
-       if (!rep)
-               return;
 
-       rep->handle = bo->base.hash.key;
-       rep->flags = bo->mem.flags;
-       rep->size = bo->num_pages * PAGE_SIZE;
-       rep->offset = bo->offset;
-
-       if (bo->type == drm_bo_type_dc)
-               rep->arg_handle = bo->map_list.user_token;
-       else
-               rep->arg_handle = 0;
+       do {
+               mutex_unlock(&bo->mutex);
+               ret = wait_event_interruptible(bo->event_queue,
+                                              atomic_read(&bo->mapped) == 0);
+               mutex_lock(&bo->mutex);
+               bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
 
-       rep->mask = bo->mem.mask;
-       rep->buffer_start = bo->buffer_start;
-       rep->fence_flags = bo->fence_type;
-       rep->rep_flags = 0;
-       rep->page_alignment = bo->mem.page_alignment;
+               if (ret == -ERESTARTSYS)
+                       ret = -EAGAIN;
+       } while((ret == 0) && atomic_read(&bo->mapped) > 0);
 
-       if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
-               DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
-                               DRM_BO_REP_BUSY);
-       }
+       return ret;
 }
 
 /*
- * Wait for buffer idle and register that we've mapped the buffer.
- * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
- * so that if the client dies, the mapping is automatically
- * unregistered.
+ * bo->mutex locked.
+ * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
  */
 
-static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
-                                uint32_t map_flags, unsigned hint,
-                                struct drm_bo_info_rep *rep)
+int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
+                      int no_wait, int move_unfenced)
 {
-       struct drm_buffer_object *bo;
-       struct drm_device *dev = file_priv->head->dev;
+       struct drm_device *dev = bo->dev;
+       struct drm_buffer_manager *bm = &dev->bm;
        int ret = 0;
-       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
+       struct drm_bo_mem_reg mem;
 
-       mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, handle, 1);
-       mutex_unlock(&dev->struct_mutex);
+       BUG_ON(bo->fence != NULL);
 
-       if (!bo)
-               return -EINVAL;
+       mem.num_pages = bo->num_pages;
+       mem.size = mem.num_pages << PAGE_SHIFT;
+       mem.proposed_flags = new_mem_flags;
+       mem.page_alignment = bo->mem.page_alignment;
 
-       mutex_lock(&bo->mutex);
-       ret = drm_bo_wait_unfenced(bo, no_wait, 0);
-       if (ret)
-               goto out;
+       mutex_lock(&bm->evict_mutex);
+       mutex_lock(&dev->struct_mutex);
+       list_del_init(&bo->lru);
+       mutex_unlock(&dev->struct_mutex);
 
        /*
-        * If this returns true, we are currently unmapped.
-        * We need to do this test, because unmapping can
-        * be done without the bo->mutex held.
+        * Determine where to move the buffer.
         */
+       ret = drm_bo_mem_space(bo, &mem, no_wait);
+       if (ret)
+               goto out_unlock;
 
-       while (1) {
-               if (atomic_inc_and_test(&bo->mapped)) {
-                       if (no_wait && drm_bo_busy(bo)) {
-                               atomic_dec(&bo->mapped);
-                               ret = -EBUSY;
-                               goto out;
-                       }
-                       ret = drm_bo_wait(bo, 0, 0, no_wait);
-                       if (ret) {
-                               atomic_dec(&bo->mapped);
-                               goto out;
-                       }
-
-                       if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
-                               drm_bo_evict_cached(bo);
-
-                       break;
-               } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
-
-                       /*
-                        * We are already mapped with different flags.
-                        * need to wait for unmap.
-                        */
-
-                       ret = drm_bo_wait_unmapped(bo, no_wait);
-                       if (ret)
-                               goto out;
+       ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
 
-                       continue;
+out_unlock:
+       mutex_lock(&dev->struct_mutex);
+       if (ret || !move_unfenced) {
+               if (mem.mm_node) {
+                       if (mem.mm_node != bo->pinned_node)
+                               drm_mm_put_block(mem.mm_node);
+                       mem.mm_node = NULL;
                }
-               break;
+               drm_bo_add_to_lru(bo);
+               if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
+                       wake_up_all(&bo->event_queue);
+                       DRM_FLAG_MASKED(bo->priv_flags, 0,
+                                       _DRM_BO_FLAG_UNFENCED);
+               }
+       } else {
+               list_add_tail(&bo->lru, &bm->unfenced);
+               DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
+                               _DRM_BO_FLAG_UNFENCED);
        }
+       /* clear the clean flags */
+       bo->mem.flags &= ~DRM_BO_FLAG_CLEAN;
+       bo->mem.proposed_flags &= ~DRM_BO_FLAG_CLEAN;
 
-       mutex_lock(&dev->struct_mutex);
-       ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
        mutex_unlock(&dev->struct_mutex);
-       if (ret) {
-               if (atomic_add_negative(-1, &bo->mapped))
-                       DRM_WAKEUP(&bo->event_queue);
-
-       } else
-               drm_bo_fill_rep_arg(bo, rep);
-      out:
-       mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(&bo);
+       mutex_unlock(&bm->evict_mutex);
        return ret;
 }
 
-static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
+static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
 {
-       struct drm_device *dev = file_priv->head->dev;
-       struct drm_buffer_object *bo;
-       struct drm_ref_object *ro;
-       int ret = 0;
-
-       mutex_lock(&dev->struct_mutex);
+       uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
 
-       bo = drm_lookup_buffer_object(file_priv, handle, 1);
-       if (!bo) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
-       if (!ro) {
-               ret = -EINVAL;
-               goto out;
-       }
-
-       drm_remove_ref_object(file_priv, ro);
-       drm_bo_usage_deref_locked(&bo);
-      out:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
-}
-
-/*
- * Call struct-sem locked.
- */
-
-static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
-                                        struct drm_user_object * uo,
-                                        enum drm_ref_type action)
-{
-       struct drm_buffer_object *bo =
-           drm_user_object_entry(uo, struct drm_buffer_object, base);
-
-       /*
-        * We DON'T want to take the bo->lock here, because we want to
-        * hold it when we wait for unmapped buffer.
-        */
-
-       BUG_ON(action != _DRM_REF_TYPE1);
-
-       if (atomic_add_negative(-1, &bo->mapped))
-               DRM_WAKEUP(&bo->event_queue);
-}
-
-/*
- * bo->mutex locked.
- * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
- */
-
-int drm_bo_move_buffer(struct drm_buffer_object * bo, uint64_t new_mem_flags,
-                      int no_wait, int move_unfenced)
-{
-       struct drm_device *dev = bo->dev;
-       struct drm_buffer_manager *bm = &dev->bm;
-       int ret = 0;
-       struct drm_bo_mem_reg mem;
-       /*
-        * Flush outstanding fences.
-        */
-
-       drm_bo_busy(bo);
-
-       /*
-        * Wait for outstanding fences.
-        */
-
-       ret = drm_bo_wait(bo, 0, 0, no_wait);
-       if (ret)
-               return ret;
-
-       mem.num_pages = bo->num_pages;
-       mem.size = mem.num_pages << PAGE_SHIFT;
-       mem.mask = new_mem_flags;
-       mem.page_alignment = bo->mem.page_alignment;
-
-       mutex_lock(&bm->evict_mutex);
-       mutex_lock(&dev->struct_mutex);
-       list_del_init(&bo->lru);
-       mutex_unlock(&dev->struct_mutex);
-
-       /*
-        * Determine where to move the buffer.
-        */
-       ret = drm_bo_mem_space(bo, &mem, no_wait);
-       if (ret)
-               goto out_unlock;
-
-       ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
-
- out_unlock:
-       if (ret || !move_unfenced) {
-               mutex_lock(&dev->struct_mutex);
-               if (mem.mm_node) {
-                       if (mem.mm_node != bo->pinned_node)
-                               drm_mm_put_block(mem.mm_node);
-                       mem.mm_node = NULL;
-               }
-               mutex_unlock(&dev->struct_mutex);
-       }
-
-       mutex_unlock(&bm->evict_mutex);
-       return ret;
-}
-
-static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem)
-{
-       uint32_t flag_diff = (mem->mask ^ mem->flags);
-
-       if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
+       if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
                return 0;
        if ((flag_diff & DRM_BO_FLAG_CACHED) &&
-           (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
-            (mem->mask & DRM_BO_FLAG_FORCE_CACHING))) {
-         return 0;
-       }
+           (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
+            (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
+               return 0;
+
        if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
-           ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
-            (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
+           ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
+            (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
                return 0;
        return 1;
 }
 
-/*
- * bo locked.
+/**
+ * drm_buffer_object_validate:
+ *
+ * @bo: the buffer object to modify
+ *
+ * @fence_class: the new fence class covering this buffer
+ *
+ * @move_unfenced: a boolean indicating whether switching the
+ * memory space of this buffer should cause the buffer to
+ * be placed on the unfenced list.
+ *
+ * @no_wait: whether this function should return -EBUSY instead
+ * of waiting.
+ *
+ * Change buffer access parameters. This can involve moving
+ * the buffer to the correct memory type, pinning the buffer
+ * or changing the class/type of fence covering this buffer
+ *
+ * Must be called with bo locked.
  */
 
-static int drm_buffer_object_validate(struct drm_buffer_object * bo,
+static int drm_buffer_object_validate(struct drm_buffer_object *bo,
                                      uint32_t fence_class,
-                                     int move_unfenced, int no_wait)
+                                     int move_unfenced, int no_wait,
+                                     int move_buffer)
 {
        struct drm_device *dev = bo->dev;
        struct drm_buffer_manager *bm = &dev->bm;
-       struct drm_bo_driver *driver = dev->driver->bo_driver;
-       uint32_t ftype;
        int ret;
 
-       DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
-                 (unsigned long long) bo->mem.mask,
-                 (unsigned long long) bo->mem.flags);
-
-       ret = driver->fence_type(bo, &fence_class, &ftype);
-
-       if (ret) {
-               DRM_ERROR("Driver did not support given buffer permissions\n");
-               return ret;
-       }
-
-       /*
-        * We're switching command submission mechanism,
-        * or cannot simply rely on the hardware serializing for us.
-        *
-        * Wait for buffer idle.
-        */
-
-       if ((fence_class != bo->fence_class) ||
-           ((ftype ^ bo->fence_type) & bo->fence_type)) {
-
-               ret = drm_bo_wait(bo, 0, 0, no_wait);
-
-               if (ret)
-                       return ret;
-
-       }
-
-       bo->new_fence_class = fence_class;
-       bo->new_fence_type = ftype;
-
-       ret = drm_bo_wait_unmapped(bo, no_wait);
-       if (ret) {
-               DRM_ERROR("Timed out waiting for buffer unmap.\n");
-               return ret;
-       }
-
-       /*
-        * Check whether we need to move buffer.
-        */
-
-       if (!drm_bo_mem_compat(&bo->mem)) {
-               ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
+       if (move_buffer) {
+               ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
                                         move_unfenced);
                if (ret) {
                        if (ret != -EAGAIN)
                                DRM_ERROR("Failed moving buffer.\n");
+                       if (ret == -ENOMEM)
+                               DRM_ERROR("Out of aperture space or "
+                                         "DRM memory quota.\n");
                        return ret;
                }
        }
@@ -1439,7 +1246,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
         * Pinned buffers.
         */
 
-       if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
+       if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
                bo->pinned_mem_type = bo->mem.mem_type;
                mutex_lock(&dev->struct_mutex);
                list_del_init(&bo->pinned_lru);
@@ -1475,7 +1282,13 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
                if (ret)
                        return ret;
        }
-       DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
+       /*
+        * Validation has succeeded, move the access and other
+        * non-mapping-related flag bits from the proposed flags to
+        * the active flags
+        */
+
+       DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
 
        /*
         * Finally, adjust lru to be sure.
@@ -1490,7 +1303,7 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
        } else {
                drm_bo_add_to_lru(bo);
                if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
-                       DRM_WAKEUP(&bo->event_queue);
+                       wake_up_all(&bo->event_queue);
                        DRM_FLAG_MASKED(bo->priv_flags, 0,
                                        _DRM_BO_FLAG_UNFENCED);
                }
@@ -1500,427 +1313,236 @@ static int drm_buffer_object_validate(struct drm_buffer_object * bo,
        return 0;
 }
 
-int drm_bo_do_validate(struct drm_buffer_object *bo,
-                      uint64_t flags, uint64_t mask, uint32_t hint,
-                      uint32_t fence_class,
-                      int no_wait,
-                      struct drm_bo_info_rep *rep)
+/*
+ * This function is called with bo->mutex locked, but may release it
+ * temporarily to wait for events.
+ */
+
+static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
+                                      uint64_t flags,
+                                      uint64_t mask,
+                                      uint32_t hint,
+                                      uint32_t fence_class,
+                                      int no_wait,
+                                      int *move_buffer)
 {
+       struct drm_device *dev = bo->dev;
+       struct drm_bo_driver *driver = dev->driver->bo_driver;
+       uint32_t ftype;
+
        int ret;
 
-       mutex_lock(&bo->mutex);
-       ret = drm_bo_wait_unfenced(bo, no_wait, 0);
 
+       ret = drm_bo_modify_proposed_flags (bo, flags, mask);
        if (ret)
-               goto out;
+               return ret;
 
+       DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
+                 (unsigned long long) bo->mem.proposed_flags,
+                 (unsigned long long) bo->mem.flags);
 
-       DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
-       ret = drm_bo_new_mask(bo, flags, mask);
+       ret = drm_bo_wait_unmapped(bo, no_wait);
        if (ret)
-               goto out;
-
-       ret = drm_buffer_object_validate(bo,
-                                        fence_class,
-                                        !(hint & DRM_BO_HINT_DONT_FENCE),
-                                        no_wait);
-out:
-       if (rep)
-               drm_bo_fill_rep_arg(bo, rep);
-
-       mutex_unlock(&bo->mutex);
-       return ret;
-}
-EXPORT_SYMBOL(drm_bo_do_validate);
-
-
-int drm_bo_handle_validate(struct drm_file * file_priv, uint32_t handle,
-                          uint32_t fence_class,
-                          uint64_t flags, uint64_t mask, 
-                          uint32_t hint,
-                          int use_old_fence_class,
-                          struct drm_bo_info_rep * rep,
-                          struct drm_buffer_object **bo_rep)
-{
-       struct drm_device *dev = file_priv->head->dev;
-       struct drm_buffer_object *bo;
-       int ret;
-       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-
-       mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, handle, 1);
-       mutex_unlock(&dev->struct_mutex);
+               return ret;
 
-       if (!bo) 
-               return -EINVAL;
+       ret = driver->fence_type(bo, &fence_class, &ftype);
 
-       if (use_old_fence_class)
-               fence_class = bo->fence_class;
+       if (ret) {
+               DRM_ERROR("Driver did not support given buffer permissions.\n");
+               return ret;
+       }
 
        /*
-        * Only allow creator to change shared buffer mask.
+        * We're switching command submission mechanism,
+        * or cannot simply rely on the hardware serializing for us.
+        * Insert a driver-dependant barrier or wait for buffer idle.
         */
 
-       if (bo->base.owner != file_priv) 
-               mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
+       if ((fence_class != bo->fence_class) ||
+           ((ftype ^ bo->fence_type) & bo->fence_type)) {
 
+               ret = -EINVAL;
+               if (driver->command_stream_barrier) {
+                       ret = driver->command_stream_barrier(bo,
+                                                            fence_class,
+                                                            ftype,
+                                                            no_wait);
+               }
+               if (ret && ret != -EAGAIN) 
+                       ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
                
-       ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
-                                no_wait, rep);
-
-       if (!ret && bo_rep)
-               *bo_rep = bo;
-       else
-               drm_bo_usage_deref_unlocked(&bo);
-
-       return ret;
-}
-EXPORT_SYMBOL(drm_bo_handle_validate);
-
-static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
-                             struct drm_bo_info_rep *rep)
-{
-       struct drm_device *dev = file_priv->head->dev;
-       struct drm_buffer_object *bo;
-
-       mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, handle, 1);
-       mutex_unlock(&dev->struct_mutex);
-
-       if (!bo) {
-               return -EINVAL;
-       }
-       mutex_lock(&bo->mutex);
-       if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
-               (void)drm_bo_busy(bo);
-       drm_bo_fill_rep_arg(bo, rep);
-       mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(&bo);
-       return 0;
-}
-
-static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
-                             uint32_t hint,
-                             struct drm_bo_info_rep *rep)
-{
-       struct drm_device *dev = file_priv->head->dev;
-       struct drm_buffer_object *bo;
-       int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
-       int ret;
-
-       mutex_lock(&dev->struct_mutex);
-       bo = drm_lookup_buffer_object(file_priv, handle, 1);
-       mutex_unlock(&dev->struct_mutex);
-
-       if (!bo) {
-               return -EINVAL;
-       }
-
-       mutex_lock(&bo->mutex);
-       ret = drm_bo_wait_unfenced(bo, no_wait, 0);
-       if (ret)
-               goto out;
-       ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
-       if (ret)
-               goto out;
-
-       drm_bo_fill_rep_arg(bo, rep);
-
-      out:
-       mutex_unlock(&bo->mutex);
-       drm_bo_usage_deref_unlocked(&bo);
-       return ret;
-}
-
-int drm_buffer_object_create(struct drm_device *dev,
-                            unsigned long size,
-                            enum drm_bo_type type,
-                            uint64_t mask,
-                            uint32_t hint,
-                            uint32_t page_alignment,
-                            unsigned long buffer_start,
-                            struct drm_buffer_object ** buf_obj)
-{
-       struct drm_buffer_manager *bm = &dev->bm;
-       struct drm_buffer_object *bo;
-       int ret = 0;
-       unsigned long num_pages;
-
-       size += buffer_start & ~PAGE_MASK;
-       num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       if (num_pages == 0) {
-               DRM_ERROR("Illegal buffer object size.\n");
-               return -EINVAL;
-       }
-
-       bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
-
-       if (!bo)
-               return -ENOMEM;
-
-       mutex_init(&bo->mutex);
-       mutex_lock(&bo->mutex);
-
-       atomic_set(&bo->usage, 1);
-       atomic_set(&bo->mapped, -1);
-       DRM_INIT_WAITQUEUE(&bo->event_queue);
-       INIT_LIST_HEAD(&bo->lru);
-       INIT_LIST_HEAD(&bo->pinned_lru);
-       INIT_LIST_HEAD(&bo->ddestroy);
-#ifdef DRM_ODD_MM_COMPAT
-       INIT_LIST_HEAD(&bo->p_mm_list);
-       INIT_LIST_HEAD(&bo->vma_list);
-#endif
-       bo->dev = dev;
-       bo->type = type;
-       bo->num_pages = num_pages;
-       bo->mem.mem_type = DRM_BO_MEM_LOCAL;
-       bo->mem.num_pages = bo->num_pages;
-       bo->mem.mm_node = NULL;
-       bo->mem.page_alignment = page_alignment;
-       bo->buffer_start = buffer_start & PAGE_MASK;
-       bo->priv_flags = 0;
-       bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
-               DRM_BO_FLAG_MAPPABLE;
-       bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
-               DRM_BO_FLAG_MAPPABLE;
-       atomic_inc(&bm->count);
-       ret = drm_bo_new_mask(bo, mask, mask);
-       if (ret)
-               goto out_err;
-
-       if (bo->type == drm_bo_type_dc) {
-               mutex_lock(&dev->struct_mutex);
-               ret = drm_bo_setup_vm_locked(bo);
-               mutex_unlock(&dev->struct_mutex);
                if (ret)
-                       goto out_err;
-       }
-
-       ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
-       if (ret)
-               goto out_err;
-
-       mutex_unlock(&bo->mutex);
-       *buf_obj = bo;
-       return 0;
-
-      out_err:
-       mutex_unlock(&bo->mutex);
-
-       drm_bo_usage_deref_unlocked(&bo);
-       return ret;
-}
-EXPORT_SYMBOL(drm_buffer_object_create);
-
-
-static int drm_bo_add_user_object(struct drm_file *file_priv,
-                                 struct drm_buffer_object *bo, int shareable)
-{
-       struct drm_device *dev = file_priv->head->dev;
-       int ret;
-
-       mutex_lock(&dev->struct_mutex);
-       ret = drm_add_user_object(file_priv, &bo->base, shareable);
-       if (ret)
-               goto out;
-
-       bo->base.remove = drm_bo_base_deref_locked;
-       bo->base.type = drm_buffer_type;
-       bo->base.ref_struct_locked = NULL;
-       bo->base.unref = drm_buffer_user_object_unmap;
-
-      out:
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
-}
-
-int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       struct drm_bo_create_arg *arg = data;
-       struct drm_bo_create_req *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       struct drm_buffer_object *entry;
-       enum drm_bo_type bo_type;
-       int ret = 0;
-
-       DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
-           (int)(req->size / 1024), req->page_alignment * 4);
-
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
+                       return ret;
        }
 
-       bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
+       bo->new_fence_class = fence_class;
+       bo->new_fence_type = ftype;
 
-       if (bo_type == drm_bo_type_user)
-               req->mask &= ~DRM_BO_FLAG_SHAREABLE;
+       /*
+        * Check whether we need to move buffer.
+        */
 
-       ret = drm_buffer_object_create(file_priv->head->dev,
-                                      req->size, bo_type, req->mask,
-                                      req->hint, req->page_alignment,
-                                      req->buffer_start, &entry);
-       if (ret)
-               goto out;
-       
-       ret = drm_bo_add_user_object(file_priv, entry,
-                                    req->mask & DRM_BO_FLAG_SHAREABLE);
-       if (ret) {
-               drm_bo_usage_deref_unlocked(&entry);
-               goto out;
+       *move_buffer = 0;
+       if (!drm_bo_mem_compat(&bo->mem)) {
+               *move_buffer = 1;
+               ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
        }
-       
-       mutex_lock(&entry->mutex);
-       drm_bo_fill_rep_arg(entry, rep);
-       mutex_unlock(&entry->mutex);
 
-out:
        return ret;
 }
 
-int drm_bo_setstatus_ioctl(struct drm_device *dev, 
-                          void *data, struct drm_file *file_priv)
-{
-       struct drm_bo_map_wait_idle_arg *arg = data;
-       struct drm_bo_info_req *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       int ret;
-
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-
-       ret = drm_bo_read_lock(&dev->bm.bm_lock);
-       if (ret)
-               return ret;
-
-       ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
-                                    req->flags,
-                                    req->mask,
-                                    req->hint | DRM_BO_HINT_DONT_FENCE,
-                                    1,
-                                    rep, NULL);
-
-       (void) drm_bo_read_unlock(&dev->bm.bm_lock);
-       if (ret)
-               return ret;
-
-       return 0;
-}
+/**
+ * drm_bo_do_validate:
+ *
+ * @bo:        the buffer object
+ *
+ * @flags: access rights, mapping parameters and cacheability. See
+ * the DRM_BO_FLAG_* values in drm.h
+ *
+ * @mask: Which flag values to change; this allows callers to modify
+ * things without knowing the current state of other flags.
+ *
+ * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
+ * values in drm.h.
+ *
+ * @fence_class: a driver-specific way of doing fences. Presumably,
+ * this would be used if the driver had more than one submission and
+ * fencing mechanism. At this point, there isn't any use of this
+ * from the user mode code.
+ *
+ * @rep: To be stuffed with the reply from validation
+ * 
+ * 'validate' a buffer object. This changes where the buffer is
+ * located, along with changing access modes.
+ */
 
-int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_bo_do_validate(struct drm_buffer_object *bo,
+                      uint64_t flags, uint64_t mask, uint32_t hint,
+                      uint32_t fence_class)
 {
-       struct drm_bo_map_wait_idle_arg *arg = data;
-       struct drm_bo_info_req *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
        int ret;
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
-
-       ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
-                                   req->hint, rep);
-       if (ret)
-               return ret;
+       int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
+       int move_buffer;
 
-       return 0;
-}
-
-int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       struct drm_bo_handle_arg *arg = data;
-       int ret;
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
+       mutex_lock(&bo->mutex);
 
-       ret = drm_buffer_object_unmap(file_priv, arg->handle);
-       return ret;
-}
+       do {
+               bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
 
+               ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
+                                                 fence_class, no_wait,
+                                                 &move_buffer);
+               if (ret)
+                       goto out;
 
-int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       struct drm_bo_reference_info_arg *arg = data;
-       struct drm_bo_handle_arg *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       struct drm_user_object *uo;
-       int ret;
+       } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
 
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
+       ret = drm_buffer_object_validate(bo,
+                                        fence_class,
+                                        !(hint & DRM_BO_HINT_DONT_FENCE),
+                                        no_wait,
+                                        move_buffer);
 
-       ret = drm_user_object_ref(file_priv, req->handle,
-                                 drm_buffer_type, &uo);
-       if (ret)
-               return ret;
-       
-       ret = drm_bo_handle_info(file_priv, req->handle, rep);
-       if (ret)
-               return ret;
+       BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
+out:
+       mutex_unlock(&bo->mutex);
 
-       return 0;
+       return ret;
 }
+EXPORT_SYMBOL(drm_bo_do_validate);
 
-int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_buffer_object_create(struct drm_device *dev,
+                            unsigned long size,
+                            enum drm_bo_type type,
+                            uint64_t flags,
+                            uint32_t hint,
+                            uint32_t page_alignment,
+                            unsigned long buffer_start,
+                            struct drm_buffer_object **buf_obj)
 {
-       struct drm_bo_handle_arg *arg = data;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_buffer_object *bo;
        int ret = 0;
+       unsigned long num_pages;
 
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
+       size += buffer_start & ~PAGE_MASK;
+       num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+       if (num_pages == 0) {
+               DRM_ERROR("Illegal buffer object size %ld.\n", size);
                return -EINVAL;
        }
 
-       ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
-       return ret;
-}
+       bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
 
-int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       struct drm_bo_reference_info_arg *arg = data;
-       struct drm_bo_handle_arg *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       int ret;
+       if (!bo)
+               return -ENOMEM;
 
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
-       }
+       mutex_init(&bo->mutex);
+       mutex_lock(&bo->mutex);
 
-       ret = drm_bo_handle_info(file_priv, req->handle, rep);
-       if (ret)
-               return ret;
+       atomic_set(&bo->usage, 1);
+       atomic_set(&bo->mapped, 0);
+       DRM_INIT_WAITQUEUE(&bo->event_queue);
+       INIT_LIST_HEAD(&bo->lru);
+       INIT_LIST_HEAD(&bo->pinned_lru);
+       INIT_LIST_HEAD(&bo->ddestroy);
+#ifdef DRM_ODD_MM_COMPAT
+       INIT_LIST_HEAD(&bo->p_mm_list);
+       INIT_LIST_HEAD(&bo->vma_list);
+#endif
+       bo->dev = dev;
+       bo->type = type;
+       bo->num_pages = num_pages;
+       bo->mem.mem_type = DRM_BO_MEM_LOCAL;
+       bo->mem.num_pages = bo->num_pages;
+       bo->mem.mm_node = NULL;
+       bo->mem.page_alignment = page_alignment;
+       bo->buffer_start = buffer_start & PAGE_MASK;
+       bo->priv_flags = 0;
+       bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
+                        DRM_BO_FLAG_MAPPABLE | DRM_BO_FLAG_CLEAN);
+       bo->mem.proposed_flags = 0;
+       atomic_inc(&bm->count);
+       /*
+        * Use drm_bo_modify_proposed_flags to error-check the proposed flags
+        */
+       flags |= DRM_BO_FLAG_CLEAN; /* or in the clean flag */
 
-       return 0;
-}
+       ret = drm_bo_modify_proposed_flags (bo, flags, flags);
+       if (ret)
+               goto out_err;
 
-int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       struct drm_bo_map_wait_idle_arg *arg = data;
-       struct drm_bo_info_req *req = &arg->d.req;
-       struct drm_bo_info_rep *rep = &arg->d.rep;
-       int ret;
-       if (!dev->bm.initialized) {
-               DRM_ERROR("Buffer object manager is not initialized.\n");
-               return -EINVAL;
+       /*
+        * For drm_bo_type_device buffers, allocate
+        * address space from the device so that applications
+        * can mmap the buffer from there
+        */
+       if (bo->type == drm_bo_type_device) {
+               mutex_lock(&dev->struct_mutex);
+               ret = drm_bo_setup_vm_locked(bo);
+               mutex_unlock(&dev->struct_mutex);
+               if (ret)
+                       goto out_err;
        }
 
-       ret = drm_bo_handle_wait(file_priv, req->handle,
-                                req->hint, rep);
+       mutex_unlock(&bo->mutex);
+       ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
+                                0);
        if (ret)
-               return ret;
+               goto out_err_unlocked;
 
+       *buf_obj = bo;
        return 0;
+
+out_err:
+       mutex_unlock(&bo->mutex);
+out_err_unlocked:
+       drm_bo_usage_deref_unlocked(&bo);
+       return ret;
 }
+EXPORT_SYMBOL(drm_buffer_object_create);
 
-static int drm_bo_leave_list(struct drm_buffer_object * bo,
+static int drm_bo_leave_list(struct drm_buffer_object *bo,
                             uint32_t mem_type,
                             int free_pinned,
                             int allow_errors)
@@ -1951,7 +1573,7 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo,
                DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
                          "cleanup. Removing flag and evicting.\n");
                bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
-               bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
+               bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
        }
 
        if (bo->mem.mem_type == mem_type)
@@ -1966,7 +1588,7 @@ static int drm_bo_leave_list(struct drm_buffer_object * bo,
                }
        }
 
-      out:
+out:
        mutex_unlock(&bo->mutex);
        return ret;
 }
@@ -1985,7 +1607,7 @@ static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
  * dev->struct_mutex locked.
  */
 
-static int drm_bo_force_list_clean(struct drm_device * dev,
+static int drm_bo_force_list_clean(struct drm_device *dev,
                                   struct list_head *head,
                                   unsigned mem_type,
                                   int free_pinned,
@@ -2050,7 +1672,7 @@ restart:
        return 0;
 }
 
-int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
+int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
 {
        struct drm_buffer_manager *bm = &dev->bm;
        struct drm_mem_type_manager *man = &bm->man[mem_type];
@@ -2066,6 +1688,13 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type)
                          "memory manager type %u\n", mem_type);
                return ret;
        }
+
+       if ((man->kern_init_type) && (kern_clean == 0)) {
+               DRM_ERROR("Trying to take down kernel initialized "
+                         "memory manager type %u\n", mem_type);
+               return -EPERM;
+       }
+
        man->use_type = 0;
        man->has_type = 0;
 
@@ -2092,7 +1721,7 @@ EXPORT_SYMBOL(drm_bo_clean_mm);
  *point since we have the hardware lock.
  */
 
-static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
+int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
 {
        int ret;
        struct drm_buffer_manager *bm = &dev->bm;
@@ -2117,9 +1746,9 @@ static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type)
        return ret;
 }
 
-int drm_bo_init_mm(struct drm_device * dev,
-                  unsigned type,
-                  unsigned long p_offset, unsigned long p_size)
+int drm_bo_init_mm(struct drm_device *dev, unsigned type,
+                  unsigned long p_offset, unsigned long p_size,
+                  int kern_init)
 {
        struct drm_buffer_manager *bm = &dev->bm;
        int ret = -EINVAL;
@@ -2153,6 +1782,8 @@ int drm_bo_init_mm(struct drm_device * dev,
        }
        man->has_type = 1;
        man->use_type = 1;
+       man->kern_init_type = kern_init;
+       man->size = p_size;
 
        INIT_LIST_HEAD(&man->lru);
        INIT_LIST_HEAD(&man->pinned);
@@ -2164,11 +1795,11 @@ EXPORT_SYMBOL(drm_bo_init_mm);
 /*
  * This function is intended to be called on drm driver unload.
  * If you decide to call it from lastclose, you must protect the call
- * from a potentially racing drm_bo_driver_init in firstopen. 
+ * from a potentially racing drm_bo_driver_init in firstopen.
  * (This may happen on X server restart).
  */
 
-int drm_bo_driver_finish(struct drm_device * dev)
+int drm_bo_driver_finish(struct drm_device *dev)
 {
        struct drm_buffer_manager *bm = &dev->bm;
        int ret = 0;
@@ -2185,7 +1816,7 @@ int drm_bo_driver_finish(struct drm_device * dev)
                man = &bm->man[i];
                if (man->has_type) {
                        man->use_type = 0;
-                       if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
+                       if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
                                ret = -EBUSY;
                                DRM_ERROR("DRM memory manager type %d "
                                          "is not clean.\n", i);
@@ -2195,31 +1826,32 @@ int drm_bo_driver_finish(struct drm_device * dev)
        }
        mutex_unlock(&dev->struct_mutex);
 
-       if (!cancel_delayed_work(&bm->wq)) {
+       if (!cancel_delayed_work(&bm->wq))
                flush_scheduled_work();
-       }
+
        mutex_lock(&dev->struct_mutex);
        drm_bo_delayed_delete(dev, 1);
-       if (list_empty(&bm->ddestroy)) {
+       if (list_empty(&bm->ddestroy))
                DRM_DEBUG("Delayed destroy list was clean\n");
-       }
-       if (list_empty(&bm->man[0].lru)) {
+
+       if (list_empty(&bm->man[0].lru))
                DRM_DEBUG("Swap list was clean\n");
-       }
-       if (list_empty(&bm->man[0].pinned)) {
+
+       if (list_empty(&bm->man[0].pinned))
                DRM_DEBUG("NO_MOVE list was clean\n");
-       }
-       if (list_empty(&bm->unfenced)) {
+
+       if (list_empty(&bm->unfenced))
                DRM_DEBUG("Unfenced list was clean\n");
-       }
-      out:
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
-       unlock_page(bm->dummy_read_page);
-#else
-       ClearPageReserved(bm->dummy_read_page);
+       if (bm->dummy_read_page) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+               ClearPageReserved(bm->dummy_read_page);
 #endif
-       __free_page(bm->dummy_read_page);
+               __free_page(bm->dummy_read_page);
+       }
+
+       drm_uncached_fini();
+out:
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
@@ -2228,18 +1860,19 @@ EXPORT_SYMBOL(drm_bo_driver_finish);
 /*
  * This function is intended to be called on drm driver load.
  * If you decide to call it from firstopen, you must protect the call
- * from a potentially racing drm_bo_driver_finish in lastclose. 
+ * from a potentially racing drm_bo_driver_finish in lastclose.
  * (This may happen on X server restart).
  */
 
-int drm_bo_driver_init(struct drm_device * dev)
+int drm_bo_driver_init(struct drm_device *dev)
 {
        struct drm_bo_driver *driver = dev->driver->bo_driver;
        struct drm_buffer_manager *bm = &dev->bm;
        int ret = -EINVAL;
 
+       drm_uncached_init();
+
        bm->dummy_read_page = NULL;
-       drm_bo_init_lock(&bm->bm_lock);
        mutex_lock(&dev->struct_mutex);
        if (!driver)
                goto out_unlock;
@@ -2250,9 +1883,7 @@ int drm_bo_driver_init(struct drm_device * dev)
                goto out_unlock;
        }
 
-#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
-       SetPageLocked(bm->dummy_read_page);
-#else
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
        SetPageReserved(bm->dummy_read_page);
 #endif
 
@@ -2260,9 +1891,15 @@ int drm_bo_driver_init(struct drm_device * dev)
         * Initialize the system memory buffer type.
         * Other types need to be driver / IOCTL initialized.
         */
-       ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
-       if (ret)
+       ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
+       if (ret) {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
+               ClearPageReserved(bm->dummy_read_page);
+#endif
+               __free_page(bm->dummy_read_page);
+               bm->dummy_read_page = NULL;
                goto out_unlock;
+       }
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
        INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
@@ -2275,165 +1912,17 @@ int drm_bo_driver_init(struct drm_device * dev)
        bm->cur_pages = 0;
        INIT_LIST_HEAD(&bm->unfenced);
        INIT_LIST_HEAD(&bm->ddestroy);
-      out_unlock:
+out_unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
-
 EXPORT_SYMBOL(drm_bo_driver_init);
 
-int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       struct drm_mm_init_arg *arg = data;
-       struct drm_buffer_manager *bm = &dev->bm;
-       struct drm_bo_driver *driver = dev->driver->bo_driver;
-       int ret;
-
-       if (!driver) {
-               DRM_ERROR("Buffer objects are not supported by this driver\n");
-               return -EINVAL;
-       }
-
-       ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
-       if (ret)
-               return ret;
-
-       ret = -EINVAL;
-       if (arg->magic != DRM_BO_INIT_MAGIC) {
-               DRM_ERROR("You are using an old libdrm that is not compatible with\n"
-                         "\tthe kernel DRM module. Please upgrade your libdrm.\n");
-               return -EINVAL;
-       }
-       if (arg->major != DRM_BO_INIT_MAJOR) {
-               DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
-                         "\tversion don't match. Got %d, expected %d.\n",
-                         arg->major, DRM_BO_INIT_MAJOR);
-               return -EINVAL;
-       }
-
-       mutex_lock(&dev->struct_mutex);
-       if (!bm->initialized) {
-               DRM_ERROR("DRM memory manager was not initialized.\n");
-               goto out;
-       }
-       if (arg->mem_type == 0) {
-               DRM_ERROR("System memory buffers already initialized.\n");
-               goto out;
-       }
-       ret = drm_bo_init_mm(dev, arg->mem_type,
-                            arg->p_offset, arg->p_size);
-
-out:
-       mutex_unlock(&dev->struct_mutex);
-       (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
-
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       struct drm_mm_type_arg *arg = data;
-       struct drm_buffer_manager *bm = &dev->bm;
-       struct drm_bo_driver *driver = dev->driver->bo_driver;
-       int ret;
-
-       if (!driver) {
-               DRM_ERROR("Buffer objects are not supported by this driver\n");
-               return -EINVAL;
-       }
-
-       ret = drm_bo_write_lock(&bm->bm_lock, file_priv);
-       if (ret)
-               return ret;
-
-       mutex_lock(&dev->struct_mutex);
-       ret = -EINVAL;
-       if (!bm->initialized) {
-               DRM_ERROR("DRM memory manager was not initialized\n");
-               goto out;
-       }
-       if (arg->mem_type == 0) {
-               DRM_ERROR("No takedown for System memory buffers.\n");
-               goto out;
-       }
-       ret = 0;
-       if (drm_bo_clean_mm(dev, arg->mem_type)) {
-               DRM_ERROR("Memory manager type %d not clean. "
-                         "Delaying takedown\n", arg->mem_type);
-       }
-out:
-       mutex_unlock(&dev->struct_mutex);
-       (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
-
-       if (ret)
-               return ret;
-
-       return 0;
-}
-
-int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-       struct drm_mm_type_arg *arg = data;
-       struct drm_bo_driver *driver = dev->driver->bo_driver;
-       int ret;
-
-       if (!driver) {
-               DRM_ERROR("Buffer objects are not supported by this driver\n");
-               return -EINVAL;
-       }
-
-       if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
-               DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
-               return -EINVAL;
-       }
-               
-       if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
-               ret = drm_bo_write_lock(&dev->bm.bm_lock, file_priv);
-               if (ret)
-                       return ret;
-       }
-               
-       mutex_lock(&dev->struct_mutex);
-       ret = drm_bo_lock_mm(dev, arg->mem_type);
-       mutex_unlock(&dev->struct_mutex);
-       if (ret) {
-               (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
-               return ret;
-       }
-
-       return 0;
-}
-
-int drm_mm_unlock_ioctl(struct drm_device *dev, 
-                       void *data, 
-                       struct drm_file *file_priv)
-{
-       struct drm_mm_type_arg *arg = data;
-       struct drm_bo_driver *driver = dev->driver->bo_driver;
-       int ret;
-
-       if (!driver) {
-               DRM_ERROR("Buffer objects are not supported by this driver\n");
-               return -EINVAL;
-       }
-
-       if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
-               ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
-               if (ret)
-                       return ret;
-       }
-               
-       return 0;
-}
-
 /*
  * buffer object vm functions.
  */
 
-int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
+int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
 {
        struct drm_buffer_manager *bm = &dev->bm;
        struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
@@ -2450,7 +1939,6 @@ int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem)
        }
        return 1;
 }
-
 EXPORT_SYMBOL(drm_mem_reg_is_pci);
 
 /**
@@ -2496,7 +1984,7 @@ int drm_bo_pci_offset(struct drm_device *dev,
  * Call bo->mutex locked.
  */
 
-void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
+void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
 {
        struct drm_device *dev = bo->dev;
        loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
@@ -2508,14 +1996,22 @@ void drm_bo_unmap_virtual(struct drm_buffer_object * bo)
        unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
 }
 
-static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
+/**
+ * drm_bo_takedown_vm_locked:
+ *
+ * @bo: the buffer object to remove any drm device mapping
+ *
+ * Remove any associated vm mapping on the drm device node that
+ * would have been created for a drm_bo_type_device buffer
+ */
+void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
 {
-        struct drm_map_list *list;
+       struct drm_map_list *list;
        drm_local_map_t *map;
        struct drm_device *dev = bo->dev;
 
        DRM_ASSERT_LOCKED(&dev->struct_mutex);
-       if (bo->type != drm_bo_type_dc)
+       if (bo->type != drm_bo_type_device)
                return;
 
        list = &bo->map_list;
@@ -2537,8 +2033,19 @@ static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo)
        list->user_token = 0ULL;
        drm_bo_usage_deref_locked(&bo);
 }
+EXPORT_SYMBOL(drm_bo_takedown_vm_locked);
 
-static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
+/**
+ * drm_bo_setup_vm_locked:
+ *
+ * @bo: the buffer to allocate address space for
+ *
+ * Allocate address space in the drm device so that applications
+ * can mmap the buffer and access the contents. This only
+ * applies to drm_bo_type_device objects as others are not
+ * placed in the drm device address space.
+ */
+static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
 {
        struct drm_map_list *list = &bo->map_list;
        drm_local_map_t *map;
@@ -2560,7 +2067,7 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
        list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
                                                    bo->mem.num_pages, 0, 0);
 
-       if (!list->file_offset_node) {
+       if (unlikely(!list->file_offset_node)) {
                drm_bo_takedown_vm_locked(bo);
                return -ENOMEM;
        }
@@ -2568,6 +2075,11 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
        list->file_offset_node = drm_mm_get_block(list->file_offset_node,
                                                  bo->mem.num_pages, 0);
 
+       if (unlikely(!list->file_offset_node)) {
+               drm_bo_takedown_vm_locked(bo);
+               return -ENOMEM;
+       }
+               
        list->hash.key = list->file_offset_node->start;
        if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
                drm_bo_takedown_vm_locked(bo);
@@ -2579,14 +2091,53 @@ static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo)
        return 0;
 }
 
-int drm_bo_version_ioctl(struct drm_device *dev, void *data, 
-                        struct drm_file *file_priv)
+/* used to EVICT VRAM lru at suspend time */
+void drm_bo_evict_mm(struct drm_device *dev, int mem_type, int no_wait)
 {
-       struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
+       struct drm_buffer_manager *bm = &dev->bm;
+       struct drm_mem_type_manager *man = &bm->man[mem_type];
+       struct drm_buffer_object *entry;
+       /* we need to migrate all objects in VRAM */
+       struct list_head *lru;
+       int ret;
+       /* evict all buffers on the LRU - won't evict pinned buffers */
        
-       arg->major = DRM_BO_INIT_MAJOR;
-       arg->minor = DRM_BO_INIT_MINOR;
-       arg->patchlevel = DRM_BO_INIT_PATCH;
+       mutex_lock(&dev->struct_mutex);
+       do {
+               lru = &man->lru;
+
+redo:
+               if (lru->next == &man->lru) {
+                       DRM_ERROR("lru empty\n");
+                       break;
+               }
+
+               entry = list_entry(lru->next, struct drm_buffer_object, lru);
+
+               if (entry->mem.flags & DRM_BO_FLAG_DISCARDABLE) {
+                       lru = lru->next;
+                       goto redo;
+               }
+
+               atomic_inc(&entry->usage);
+               mutex_unlock(&dev->struct_mutex);
+               mutex_lock(&entry->mutex);
+
+               ret = drm_bo_evict(entry, mem_type, no_wait);
+               mutex_unlock(&entry->mutex);
+
+               if (ret)
+                       DRM_ERROR("Evict failed for BO\n");
+
+               mutex_lock(&entry->mutex);
+               (void)drm_bo_expire_fence(entry, 0);
+               mutex_unlock(&entry->mutex);
+               drm_bo_usage_deref_unlocked(&entry);
+
+               mutex_lock(&dev->struct_mutex);
+       } while(1);
+
+       mutex_unlock(&dev->struct_mutex);
 
-       return 0;
 }
+EXPORT_SYMBOL(drm_bo_evict_mm);