OSDN Git Service

libdrm/nouveau: incr refcount on ref fence before decr on old fence
[android-x86/external-libdrm.git] / linux-core / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads,
40  * Hash tables and hash heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those,
44  * we need both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47  * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48  * the list traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
56
57 static inline uint64_t drm_bo_type_flags(unsigned type)
58 {
59         return (1ULL << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
67 {
68         struct drm_mem_type_manager *man;
69
70         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71         DRM_ASSERT_LOCKED(&bo->mutex);
72
73         man = &bo->dev->bm.man[bo->pinned_mem_type];
74         list_add_tail(&bo->pinned_lru, &man->pinned);
75 }
76
77 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
78 {
79         struct drm_mem_type_manager *man;
80
81         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82
83         if (!(bo->mem.proposed_flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84             || bo->mem.mem_type != bo->pinned_mem_type) {
85                 man = &bo->dev->bm.man[bo->mem.mem_type];
86                 list_add_tail(&bo->lru, &man->lru);
87         } else {
88                 INIT_LIST_HEAD(&bo->lru);
89         }
90 }
91
92 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
93 {
94         if (!bo->map_list.map)
95                 return 0;
96
97         drm_bo_unmap_virtual(bo);
98         return 0;
99 }
100
101 /*
102  * Call bo->mutex locked.
103  */
104
105 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
106 {
107         struct drm_device *dev = bo->dev;
108         int ret = 0;
109         uint32_t page_flags = 0;
110
111         DRM_ASSERT_LOCKED(&bo->mutex);
112         bo->ttm = NULL;
113
114         if (bo->mem.proposed_flags & DRM_BO_FLAG_WRITE)
115                 page_flags |= DRM_TTM_PAGE_WRITE;
116
117         switch (bo->type) {
118         case drm_bo_type_device:
119         case drm_bo_type_kernel:
120                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT, 
121                                          page_flags, dev->bm.dummy_read_page);
122                 if (!bo->ttm)
123                         ret = -ENOMEM;
124                 break;
125         case drm_bo_type_user:
126                 bo->ttm = drm_ttm_create(dev, bo->num_pages << PAGE_SHIFT,
127                                          page_flags | DRM_TTM_PAGE_USER,
128                                          dev->bm.dummy_read_page);
129                 if (!bo->ttm)
130                         ret = -ENOMEM;
131
132                 ret = drm_ttm_set_user(bo->ttm, current,
133                                        bo->buffer_start,
134                                        bo->num_pages);
135                 if (ret)
136                         return ret;
137
138                 break;
139         default:
140                 DRM_ERROR("Illegal buffer object type\n");
141                 ret = -EINVAL;
142                 break;
143         }
144
145         return ret;
146 }
147
148 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
149                                   struct drm_bo_mem_reg *mem,
150                                   int evict, int no_wait)
151 {
152         struct drm_device *dev = bo->dev;
153         struct drm_buffer_manager *bm = &dev->bm;
154         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
155         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
156         struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
157         struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
158         int ret = 0;
159
160         if (old_is_pci || new_is_pci ||
161             ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
162                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
163         if (ret)
164                 return ret;
165
166         /*
167          * Create and bind a ttm if required.
168          */
169
170         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
171                 ret = drm_bo_add_ttm(bo);
172                 if (ret)
173                         goto out_err;
174
175                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
176                         ret = drm_ttm_bind(bo->ttm, mem);
177                         if (ret)
178                                 goto out_err;
179                 }
180
181                 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
182                         
183                         struct drm_bo_mem_reg *old_mem = &bo->mem;
184                         uint64_t save_flags = old_mem->flags;
185                         uint64_t save_proposed_flags = old_mem->proposed_flags;
186                         
187                         *old_mem = *mem;
188                         mem->mm_node = NULL;
189                         old_mem->proposed_flags = save_proposed_flags;
190                         DRM_FLAG_MASKED(save_flags, mem->flags,
191                                         DRM_BO_MASK_MEMTYPE);
192                         goto moved;
193                 }
194                 
195         }
196
197         if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
198             !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED))                
199                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
200         else if (dev->driver->bo_driver->move) 
201                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
202         else
203                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
204
205         if (ret)
206                 goto out_err;
207
208 moved:
209         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
210                 ret =
211                     dev->driver->bo_driver->invalidate_caches(dev,
212                                                               bo->mem.flags);
213                 if (ret)
214                         DRM_ERROR("Can not flush read caches\n");
215         }
216
217         DRM_FLAG_MASKED(bo->priv_flags,
218                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
219                         _DRM_BO_FLAG_EVICTED);
220
221         if (bo->mem.mm_node)
222                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
223                         bm->man[bo->mem.mem_type].gpu_offset;
224
225
226         return 0;
227
228 out_err:
229         new_man = &bm->man[bo->mem.mem_type];
230         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
231                 drm_ttm_unbind(bo->ttm);
232                 drm_ttm_destroy(bo->ttm);
233                 bo->ttm = NULL;
234         }
235
236         return ret;
237 }
238
239 /*
240  * Call bo->mutex locked.
241  * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
242  */
243
244 static int drm_bo_busy(struct drm_buffer_object *bo, int check_unfenced)
245 {
246         struct drm_fence_object *fence = bo->fence;
247
248         if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
249                 return -EBUSY;
250
251         if (fence) {
252                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
253                         drm_fence_usage_deref_unlocked(&bo->fence);
254                         return 0;
255                 }
256                 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
257                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
258                         drm_fence_usage_deref_unlocked(&bo->fence);
259                         return 0;
260                 }
261                 return -EBUSY;
262         }
263         return 0;
264 }
265
266 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
267 {
268         int ret;
269
270         mutex_lock(&bo->mutex);
271         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
272         mutex_unlock(&bo->mutex);
273         return ret;
274 }
275
276
277 /*
278  * Call bo->mutex locked.
279  * Wait until the buffer is idle.
280  */
281
282 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
283                 int no_wait, int check_unfenced)
284 {
285         int ret;
286
287         DRM_ASSERT_LOCKED(&bo->mutex);
288         while(unlikely(drm_bo_busy(bo, check_unfenced))) {
289                 if (no_wait)
290                         return -EBUSY;
291
292                 if (check_unfenced &&  (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)) {
293                         mutex_unlock(&bo->mutex);
294                         wait_event(bo->event_queue, !drm_bo_check_unfenced(bo));
295                         mutex_lock(&bo->mutex);
296                         bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
297                 }
298
299                 if (bo->fence) {
300                         struct drm_fence_object *fence;
301                         uint32_t fence_type = bo->fence_type;
302
303                         drm_fence_reference_unlocked(&fence, bo->fence);
304                         mutex_unlock(&bo->mutex);
305
306                         ret = drm_fence_object_wait(fence, lazy, !interruptible,
307                                                     fence_type);
308
309                         drm_fence_usage_deref_unlocked(&fence);
310                         mutex_lock(&bo->mutex);
311                         bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
312                         if (ret)
313                                 return ret;
314                 }
315
316         }
317         return 0;
318 }
319 EXPORT_SYMBOL(drm_bo_wait);
320
321 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
322 {
323         struct drm_device *dev = bo->dev;
324         struct drm_buffer_manager *bm = &dev->bm;
325
326         if (bo->fence) {
327                 if (bm->nice_mode) {
328                         unsigned long _end = jiffies + 3 * DRM_HZ;
329                         int ret;
330                         do {
331                                 ret = drm_bo_wait(bo, 0, 0, 0, 0);
332                                 if (ret && allow_errors)
333                                         return ret;
334
335                         } while (ret && !time_after_eq(jiffies, _end));
336
337                         if (bo->fence) {
338                                 bm->nice_mode = 0;
339                                 DRM_ERROR("Detected GPU lockup or "
340                                           "fence driver was taken down. "
341                                           "Evicting buffer.\n");
342                         }
343                 }
344                 if (bo->fence)
345                         drm_fence_usage_deref_unlocked(&bo->fence);
346         }
347         return 0;
348 }
349
350 /*
351  * Call dev->struct_mutex locked.
352  * Attempts to remove all private references to a buffer by expiring its
353  * fence object and removing from lru lists and memory managers.
354  */
355
356 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
357 {
358         struct drm_device *dev = bo->dev;
359         struct drm_buffer_manager *bm = &dev->bm;
360
361         DRM_ASSERT_LOCKED(&dev->struct_mutex);
362
363         atomic_inc(&bo->usage);
364         mutex_unlock(&dev->struct_mutex);
365         mutex_lock(&bo->mutex);
366
367         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
368
369         if (bo->fence && drm_fence_object_signaled(bo->fence,
370                                                    bo->fence_type))
371                 drm_fence_usage_deref_unlocked(&bo->fence);
372
373         if (bo->fence && remove_all)
374                 (void)drm_bo_expire_fence(bo, 0);
375
376         mutex_lock(&dev->struct_mutex);
377
378         if (!atomic_dec_and_test(&bo->usage))
379                 goto out;
380
381         if (!bo->fence) {
382                 list_del_init(&bo->lru);
383                 if (bo->mem.mm_node) {
384                         drm_mm_put_block(bo->mem.mm_node);
385                         if (bo->pinned_node == bo->mem.mm_node)
386                                 bo->pinned_node = NULL;
387                         bo->mem.mm_node = NULL;
388                 }
389                 list_del_init(&bo->pinned_lru);
390                 if (bo->pinned_node) {
391                         drm_mm_put_block(bo->pinned_node);
392                         bo->pinned_node = NULL;
393                 }
394                 list_del_init(&bo->ddestroy);
395                 mutex_unlock(&bo->mutex);
396                 drm_bo_destroy_locked(bo);
397                 return;
398         }
399
400         if (list_empty(&bo->ddestroy)) {
401                 drm_fence_object_flush(bo->fence, bo->fence_type);
402                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
403                 schedule_delayed_work(&bm->wq,
404                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
405         }
406
407 out:
408         mutex_unlock(&bo->mutex);
409         return;
410 }
411
412 /*
413  * Verify that refcount is 0 and that there are no internal references
414  * to the buffer object. Then destroy it.
415  */
416
417 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
418 {
419         struct drm_device *dev = bo->dev;
420         struct drm_buffer_manager *bm = &dev->bm;
421
422         DRM_ASSERT_LOCKED(&dev->struct_mutex);
423
424         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
425             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
426             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
427                 if (bo->fence != NULL) {
428                         DRM_ERROR("Fence was non-zero.\n");
429                         drm_bo_cleanup_refs(bo, 0);
430                         return;
431                 }
432
433                 if (bo->ttm) {
434                         drm_ttm_unbind(bo->ttm);
435                         drm_ttm_destroy(bo->ttm);
436                         bo->ttm = NULL;
437                 }
438
439                 atomic_dec(&bm->count);
440
441                 drm_ctl_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
442
443                 return;
444         }
445
446         /*
447          * Some stuff is still trying to reference the buffer object.
448          * Get rid of those references.
449          */
450
451         drm_bo_cleanup_refs(bo, 0);
452
453         return;
454 }
455
456 /*
457  * Call dev->struct_mutex locked.
458  */
459
460 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
461 {
462         struct drm_buffer_manager *bm = &dev->bm;
463
464         struct drm_buffer_object *entry, *nentry;
465         struct list_head *list, *next;
466
467         list_for_each_safe(list, next, &bm->ddestroy) {
468                 entry = list_entry(list, struct drm_buffer_object, ddestroy);
469
470                 nentry = NULL;
471                 if (next != &bm->ddestroy) {
472                         nentry = list_entry(next, struct drm_buffer_object,
473                                             ddestroy);
474                         atomic_inc(&nentry->usage);
475                 }
476
477                 drm_bo_cleanup_refs(entry, remove_all);
478
479                 if (nentry)
480                         atomic_dec(&nentry->usage);
481         }
482 }
483
484 static void drm_bo_delayed_workqueue(struct work_struct *work)
485 {
486         struct drm_buffer_manager *bm =
487             container_of(work, struct drm_buffer_manager, wq.work);
488         struct drm_device *dev = container_of(bm, struct drm_device, bm);
489
490         DRM_DEBUG("Delayed delete Worker\n");
491
492         mutex_lock(&dev->struct_mutex);
493         if (!bm->initialized) {
494                 mutex_unlock(&dev->struct_mutex);
495                 return;
496         }
497         drm_bo_delayed_delete(dev, 0);
498         if (bm->initialized && !list_empty(&bm->ddestroy)) {
499                 schedule_delayed_work(&bm->wq,
500                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
501         }
502         mutex_unlock(&dev->struct_mutex);
503 }
504
505 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
506 {
507         struct drm_buffer_object *tmp_bo = *bo;
508         bo = NULL;
509
510         DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
511
512         if (atomic_dec_and_test(&tmp_bo->usage))
513                 drm_bo_destroy_locked(tmp_bo);
514 }
515 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
516
517 static void drm_bo_base_deref_locked(struct drm_file *file_priv,
518                                      struct drm_user_object *uo)
519 {
520         struct drm_buffer_object *bo =
521             drm_user_object_entry(uo, struct drm_buffer_object, base);
522
523         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
524
525         drm_bo_takedown_vm_locked(bo);
526         drm_bo_usage_deref_locked(&bo);
527 }
528
529 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
530 {
531         struct drm_buffer_object *tmp_bo = *bo;
532         struct drm_device *dev = tmp_bo->dev;
533
534         *bo = NULL;
535         if (atomic_dec_and_test(&tmp_bo->usage)) {
536                 mutex_lock(&dev->struct_mutex);
537                 if (atomic_read(&tmp_bo->usage) == 0)
538                         drm_bo_destroy_locked(tmp_bo);
539                 mutex_unlock(&dev->struct_mutex);
540         }
541 }
542 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
543
544 void drm_putback_buffer_objects(struct drm_device *dev)
545 {
546         struct drm_buffer_manager *bm = &dev->bm;
547         struct list_head *list = &bm->unfenced;
548         struct drm_buffer_object *entry, *next;
549
550         mutex_lock(&dev->struct_mutex);
551         list_for_each_entry_safe(entry, next, list, lru) {
552                 atomic_inc(&entry->usage);
553                 mutex_unlock(&dev->struct_mutex);
554
555                 mutex_lock(&entry->mutex);
556                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
557                 mutex_lock(&dev->struct_mutex);
558
559                 list_del_init(&entry->lru);
560                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
561                 wake_up_all(&entry->event_queue);
562
563                 /*
564                  * FIXME: Might want to put back on head of list
565                  * instead of tail here.
566                  */
567
568                 drm_bo_add_to_lru(entry);
569                 mutex_unlock(&entry->mutex);
570                 drm_bo_usage_deref_locked(&entry);
571         }
572         mutex_unlock(&dev->struct_mutex);
573 }
574 EXPORT_SYMBOL(drm_putback_buffer_objects);
575
576
577 /*
578  * Note. The caller has to register (if applicable)
579  * and deregister fence object usage.
580  */
581
582 int drm_fence_buffer_objects(struct drm_device *dev,
583                              struct list_head *list,
584                              uint32_t fence_flags,
585                              struct drm_fence_object *fence,
586                              struct drm_fence_object **used_fence)
587 {
588         struct drm_buffer_manager *bm = &dev->bm;
589         struct drm_buffer_object *entry;
590         uint32_t fence_type = 0;
591         uint32_t fence_class = ~0;
592         int count = 0;
593         int ret = 0;
594         struct list_head *l;
595
596         mutex_lock(&dev->struct_mutex);
597
598         if (!list)
599                 list = &bm->unfenced;
600
601         if (fence)
602                 fence_class = fence->fence_class;
603
604         list_for_each_entry(entry, list, lru) {
605                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
606                 fence_type |= entry->new_fence_type;
607                 if (fence_class == ~0)
608                         fence_class = entry->new_fence_class;
609                 else if (entry->new_fence_class != fence_class) {
610                         DRM_ERROR("Unmatching fence classes on unfenced list: "
611                                   "%d and %d.\n",
612                                   fence_class,
613                                   entry->new_fence_class);
614                         ret = -EINVAL;
615                         goto out;
616                 }
617                 count++;
618         }
619
620         if (!count) {
621                 ret = -EINVAL;
622                 goto out;
623         }
624
625         if (fence) {
626                 if ((fence_type & fence->type) != fence_type ||
627                     (fence->fence_class != fence_class)) {
628                         DRM_ERROR("Given fence doesn't match buffers "
629                                   "on unfenced list.\n");
630                         ret = -EINVAL;
631                         goto out;
632                 }
633         } else {
634                 mutex_unlock(&dev->struct_mutex);
635                 ret = drm_fence_object_create(dev, fence_class, fence_type,
636                                               fence_flags | DRM_FENCE_FLAG_EMIT,
637                                               &fence);
638                 mutex_lock(&dev->struct_mutex);
639                 if (ret)
640                         goto out;
641         }
642
643         count = 0;
644         l = list->next;
645         while (l != list) {
646                 prefetch(l->next);
647                 entry = list_entry(l, struct drm_buffer_object, lru);
648                 atomic_inc(&entry->usage);
649                 mutex_unlock(&dev->struct_mutex);
650                 mutex_lock(&entry->mutex);
651                 mutex_lock(&dev->struct_mutex);
652                 list_del_init(l);
653                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
654                         count++;
655                         if (entry->fence)
656                                 drm_fence_usage_deref_locked(&entry->fence);
657                         entry->fence = drm_fence_reference_locked(fence);
658                         entry->fence_class = entry->new_fence_class;
659                         entry->fence_type = entry->new_fence_type;
660                         DRM_FLAG_MASKED(entry->priv_flags, 0,
661                                         _DRM_BO_FLAG_UNFENCED);
662                         wake_up_all(&entry->event_queue);
663                         drm_bo_add_to_lru(entry);
664                 }
665                 mutex_unlock(&entry->mutex);
666                 drm_bo_usage_deref_locked(&entry);
667                 l = list->next;
668         }
669         DRM_DEBUG("Fenced %d buffers\n", count);
670 out:
671         mutex_unlock(&dev->struct_mutex);
672         *used_fence = fence;
673         return ret;
674 }
675 EXPORT_SYMBOL(drm_fence_buffer_objects);
676
677 /*
678  * bo->mutex locked
679  */
680
681 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
682                         int no_wait)
683 {
684         int ret = 0;
685         struct drm_device *dev = bo->dev;
686         struct drm_bo_mem_reg evict_mem;
687
688         /*
689          * Someone might have modified the buffer before we took the
690          * buffer mutex.
691          */
692
693         do {
694                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
695
696                 if (unlikely(bo->mem.flags &
697                              (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT)))
698                         goto out_unlock;
699                 if (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
700                         goto out_unlock;
701                 if (unlikely(bo->mem.mem_type != mem_type))
702                         goto out_unlock;
703                 ret = drm_bo_wait(bo, 0, 1, no_wait, 0);
704                 if (ret)
705                         goto out_unlock;
706
707         } while(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
708
709         evict_mem = bo->mem;
710         evict_mem.mm_node = NULL;
711
712         evict_mem = bo->mem;
713         evict_mem.proposed_flags = dev->driver->bo_driver->evict_flags(bo);
714
715         mutex_lock(&dev->struct_mutex);
716         list_del_init(&bo->lru);
717         mutex_unlock(&dev->struct_mutex);
718
719         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
720
721         if (ret) {
722                 if (ret != -EAGAIN)
723                         DRM_ERROR("Failed to find memory space for "
724                                   "buffer 0x%p eviction.\n", bo);
725                 goto out;
726         }
727
728         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
729
730         if (ret) {
731                 if (ret != -EAGAIN)
732                         DRM_ERROR("Buffer eviction failed\n");
733                 goto out;
734         }
735
736         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
737                         _DRM_BO_FLAG_EVICTED);
738
739 out:
740         mutex_lock(&dev->struct_mutex);
741         if (evict_mem.mm_node) {
742                 if (evict_mem.mm_node != bo->pinned_node)
743                         drm_mm_put_block(evict_mem.mm_node);
744                 evict_mem.mm_node = NULL;
745         }
746         drm_bo_add_to_lru(bo);
747         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
748 out_unlock:
749         mutex_unlock(&dev->struct_mutex);
750
751         return ret;
752 }
753
754 /**
755  * Repeatedly evict memory from the LRU for @mem_type until we create enough
756  * space, or we've evicted everything and there isn't enough space.
757  */
758 static int drm_bo_mem_force_space(struct drm_device *dev,
759                                   struct drm_bo_mem_reg *mem,
760                                   uint32_t mem_type, int no_wait)
761 {
762         struct drm_mm_node *node;
763         struct drm_buffer_manager *bm = &dev->bm;
764         struct drm_buffer_object *entry;
765         struct drm_mem_type_manager *man = &bm->man[mem_type];
766         struct list_head *lru;
767         unsigned long num_pages = mem->num_pages;
768         int ret;
769
770         mutex_lock(&dev->struct_mutex);
771         do {
772                 node = drm_mm_search_free(&man->manager, num_pages,
773                                           mem->page_alignment, 1);
774                 if (node)
775                         break;
776
777                 lru = &man->lru;
778                 if (lru->next == lru)
779                         break;
780
781                 entry = list_entry(lru->next, struct drm_buffer_object, lru);
782                 atomic_inc(&entry->usage);
783                 mutex_unlock(&dev->struct_mutex);
784                 mutex_lock(&entry->mutex);
785                 ret = drm_bo_evict(entry, mem_type, no_wait);
786                 mutex_unlock(&entry->mutex);
787                 drm_bo_usage_deref_unlocked(&entry);
788                 if (ret)
789                         return ret;
790                 mutex_lock(&dev->struct_mutex);
791         } while (1);
792
793         if (!node) {
794                 mutex_unlock(&dev->struct_mutex);
795                 return -ENOMEM;
796         }
797
798         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
799         if (unlikely(!node)) {
800                 mutex_unlock(&dev->struct_mutex);
801                 return -ENOMEM;
802         }
803
804         mutex_unlock(&dev->struct_mutex);
805         mem->mm_node = node;
806         mem->mem_type = mem_type;
807         return 0;
808 }
809
810 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
811                                 int disallow_fixed,
812                                 uint32_t mem_type,
813                                 uint64_t mask, uint32_t *res_mask)
814 {
815         uint64_t cur_flags = drm_bo_type_flags(mem_type);
816         uint64_t flag_diff;
817
818         if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
819                 return 0;
820         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
821                 cur_flags |= DRM_BO_FLAG_CACHED;
822         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
823                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
824         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
825                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
826
827         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
828                 return 0;
829
830         if (mem_type == DRM_BO_MEM_LOCAL) {
831                 *res_mask = cur_flags;
832                 return 1;
833         }
834
835         flag_diff = (mask ^ cur_flags);
836         if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
837                 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
838
839         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
840             (!(mask & DRM_BO_FLAG_CACHED) ||
841              (mask & DRM_BO_FLAG_FORCE_CACHING)))
842                 return 0;
843
844         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
845             ((mask & DRM_BO_FLAG_MAPPABLE) ||
846              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
847                 return 0;
848
849         *res_mask = cur_flags;
850         return 1;
851 }
852
853 /**
854  * Creates space for memory region @mem according to its type.
855  *
856  * This function first searches for free space in compatible memory types in
857  * the priority order defined by the driver.  If free space isn't found, then
858  * drm_bo_mem_force_space is attempted in priority order to evict and find
859  * space.
860  */
861 int drm_bo_mem_space(struct drm_buffer_object *bo,
862                      struct drm_bo_mem_reg *mem, int no_wait)
863 {
864         struct drm_device *dev = bo->dev;
865         struct drm_buffer_manager *bm = &dev->bm;
866         struct drm_mem_type_manager *man;
867
868         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
869         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
870         uint32_t i;
871         uint32_t mem_type = DRM_BO_MEM_LOCAL;
872         uint32_t cur_flags;
873         int type_found = 0;
874         int type_ok = 0;
875         int has_eagain = 0;
876         struct drm_mm_node *node = NULL;
877         int ret;
878
879         mem->mm_node = NULL;
880         for (i = 0; i < num_prios; ++i) {
881                 mem_type = prios[i];
882                 man = &bm->man[mem_type];
883
884                 type_ok = drm_bo_mt_compatible(man,
885                                                bo->type == drm_bo_type_user,
886                                                mem_type, mem->proposed_flags,
887                                                &cur_flags);
888
889                 if (!type_ok)
890                         continue;
891
892                 if (mem_type == DRM_BO_MEM_LOCAL)
893                         break;
894
895                 if ((mem_type == bo->pinned_mem_type) &&
896                     (bo->pinned_node != NULL)) {
897                         node = bo->pinned_node;
898                         break;
899                 }
900
901                 mutex_lock(&dev->struct_mutex);
902                 if (man->has_type && man->use_type) {
903                         type_found = 1;
904                         node = drm_mm_search_free(&man->manager, mem->num_pages,
905                                                   mem->page_alignment, 1);
906                         if (node)
907                                 node = drm_mm_get_block(node, mem->num_pages,
908                                                         mem->page_alignment);
909                 }
910                 mutex_unlock(&dev->struct_mutex);
911                 if (node)
912                         break;
913         }
914
915         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
916                 mem->mm_node = node;
917                 mem->mem_type = mem_type;
918                 mem->flags = cur_flags;
919                 return 0;
920         }
921
922         if (!type_found)
923                 return -EINVAL;
924
925         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
926         prios = dev->driver->bo_driver->mem_busy_prio;
927
928         for (i = 0; i < num_prios; ++i) {
929                 mem_type = prios[i];
930                 man = &bm->man[mem_type];
931
932                 if (!man->has_type)
933                         continue;
934
935                 if (!drm_bo_mt_compatible(man,
936                                           bo->type == drm_bo_type_user,
937                                           mem_type,
938                                           mem->proposed_flags,
939                                           &cur_flags))
940                         continue;
941
942                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
943
944                 if (ret == 0 && mem->mm_node) {
945                         mem->flags = cur_flags;
946                         return 0;
947                 }
948
949                 if (ret == -EAGAIN)
950                         has_eagain = 1;
951         }
952
953         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
954         return ret;
955 }
956 EXPORT_SYMBOL(drm_bo_mem_space);
957
958 /*
959  * drm_bo_propose_flags:
960  *
961  * @bo: the buffer object getting new flags
962  *
963  * @new_flags: the new set of proposed flag bits
964  *
965  * @new_mask: the mask of bits changed in new_flags
966  *
967  * Modify the proposed_flag bits in @bo
968  */
969 static int drm_bo_modify_proposed_flags (struct drm_buffer_object *bo,
970                                          uint64_t new_flags, uint64_t new_mask)
971 {
972         uint32_t new_access;
973
974         /* Copy unchanging bits from existing proposed_flags */
975         DRM_FLAG_MASKED(new_flags, bo->mem.proposed_flags, ~new_mask);
976          
977         if (bo->type == drm_bo_type_user &&
978             ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
979              (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
980                 DRM_ERROR("User buffers require cache-coherent memory.\n");
981                 return -EINVAL;
982         }
983
984         if (bo->type != drm_bo_type_kernel && (new_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
985                 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
986                 return -EPERM;
987         }
988
989         if (likely(new_mask & DRM_BO_MASK_MEM) &&
990             (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
991             !DRM_SUSER(DRM_CURPROC)) {
992                 if (likely(bo->mem.flags & new_flags & new_mask &
993                            DRM_BO_MASK_MEM))
994                         new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
995                                 (bo->mem.flags & DRM_BO_MASK_MEM);
996                 else {
997                         DRM_ERROR("Incompatible memory type specification "
998                                   "for NO_EVICT buffer.\n");
999                         return -EPERM;
1000                 }
1001         }
1002
1003         if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
1004                 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
1005                 return -EPERM;
1006         }
1007
1008         new_access = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
1009                                   DRM_BO_FLAG_READ);
1010
1011         if (new_access == 0) {
1012                 DRM_ERROR("Invalid buffer object rwx properties\n");
1013                 return -EINVAL;
1014         }
1015
1016         bo->mem.proposed_flags = new_flags;
1017         return 0;
1018 }
1019
1020 /*
1021  * Call dev->struct_mutex locked.
1022  */
1023
1024 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
1025                                               uint32_t handle, int check_owner)
1026 {
1027         struct drm_user_object *uo;
1028         struct drm_buffer_object *bo;
1029
1030         uo = drm_lookup_user_object(file_priv, handle);
1031
1032         if (!uo || (uo->type != drm_buffer_type)) {
1033                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1034                 return NULL;
1035         }
1036
1037         if (check_owner && file_priv != uo->owner) {
1038                 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1039                         return NULL;
1040         }
1041
1042         bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1043         atomic_inc(&bo->usage);
1044         return bo;
1045 }
1046 EXPORT_SYMBOL(drm_lookup_buffer_object);
1047
1048 /*
1049  * Call bo->mutex locked.
1050  * Returns -EBUSY if the buffer is currently rendered to or from. 0 otherwise.
1051  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1052  */
1053
1054 static int drm_bo_quick_busy(struct drm_buffer_object *bo, int check_unfenced)
1055 {
1056         struct drm_fence_object *fence = bo->fence;
1057
1058         if (check_unfenced && (bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1059                 return -EBUSY;
1060
1061         if (fence) {
1062                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1063                         drm_fence_usage_deref_unlocked(&bo->fence);
1064                         return 0;
1065                 }
1066                 return -EBUSY;
1067         }
1068         return 0;
1069 }
1070
1071 int drm_bo_evict_cached(struct drm_buffer_object *bo)
1072 {
1073         int ret = 0;
1074
1075         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1076         if (bo->mem.mm_node)
1077                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1078         return ret;
1079 }
1080
1081 EXPORT_SYMBOL(drm_bo_evict_cached);
1082 /*
1083  * Wait until a buffer is unmapped.
1084  */
1085
1086 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1087 {
1088         int ret = 0;
1089
1090         if (likely(atomic_read(&bo->mapped)) == 0)
1091                 return 0;
1092
1093         if (unlikely(no_wait))
1094                 return -EBUSY;
1095
1096         do {
1097                 mutex_unlock(&bo->mutex);
1098                 ret = wait_event_interruptible(bo->event_queue,
1099                                                atomic_read(&bo->mapped) == 0);
1100                 mutex_lock(&bo->mutex);
1101                 bo->priv_flags |= _DRM_BO_FLAG_UNLOCKED;
1102
1103                 if (ret == -ERESTARTSYS)
1104                         ret = -EAGAIN;
1105         } while((ret == 0) && atomic_read(&bo->mapped) > 0);
1106
1107         return ret;
1108 }
1109
1110 /*
1111  * Fill in the ioctl reply argument with buffer info.
1112  * Bo locked.
1113  */
1114
1115 void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
1116                          struct drm_bo_info_rep *rep)
1117 {
1118         if (!rep)
1119                 return;
1120
1121         rep->handle = bo->base.hash.key;
1122         rep->flags = bo->mem.flags;
1123         rep->size = bo->num_pages * PAGE_SIZE;
1124         rep->offset = bo->offset;
1125
1126         /*
1127          * drm_bo_type_device buffers have user-visible
1128          * handles which can be used to share across
1129          * processes. Hand that back to the application
1130          */
1131         if (bo->type == drm_bo_type_device)
1132                 rep->arg_handle = bo->map_list.user_token;
1133         else
1134                 rep->arg_handle = 0;
1135
1136         rep->proposed_flags = bo->mem.proposed_flags;
1137         rep->buffer_start = bo->buffer_start;
1138         rep->fence_flags = bo->fence_type;
1139         rep->rep_flags = 0;
1140         rep->page_alignment = bo->mem.page_alignment;
1141
1142         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo, 1)) {
1143                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1144                                 DRM_BO_REP_BUSY);
1145         }
1146 }
1147 EXPORT_SYMBOL(drm_bo_fill_rep_arg);
1148
1149 /*
1150  * Wait for buffer idle and register that we've mapped the buffer.
1151  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1152  * so that if the client dies, the mapping is automatically
1153  * unregistered.
1154  */
1155
1156 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1157                                  uint32_t map_flags, unsigned hint,
1158                                  struct drm_bo_info_rep *rep)
1159 {
1160         struct drm_buffer_object *bo;
1161         struct drm_device *dev = file_priv->minor->dev;
1162         int ret = 0;
1163         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1164
1165         mutex_lock(&dev->struct_mutex);
1166         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1167         mutex_unlock(&dev->struct_mutex);
1168
1169         if (!bo)
1170                 return -EINVAL;
1171
1172         mutex_lock(&bo->mutex);
1173         do {
1174                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
1175
1176                 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1177                 if (unlikely(ret))
1178                         goto out;
1179
1180                 if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
1181                         drm_bo_evict_cached(bo);
1182
1183         } while (unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
1184
1185         atomic_inc(&bo->mapped);
1186         mutex_lock(&dev->struct_mutex);
1187         ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1188         mutex_unlock(&dev->struct_mutex);
1189         if (ret) {
1190                 if (atomic_dec_and_test(&bo->mapped))
1191                         wake_up_all(&bo->event_queue);
1192
1193         } else
1194                 drm_bo_fill_rep_arg(bo, rep);
1195
1196  out:
1197         mutex_unlock(&bo->mutex);
1198         drm_bo_usage_deref_unlocked(&bo);
1199
1200         return ret;
1201 }
1202
1203 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1204 {
1205         struct drm_device *dev = file_priv->minor->dev;
1206         struct drm_buffer_object *bo;
1207         struct drm_ref_object *ro;
1208         int ret = 0;
1209
1210         mutex_lock(&dev->struct_mutex);
1211
1212         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1213         if (!bo) {
1214                 ret = -EINVAL;
1215                 goto out;
1216         }
1217
1218         ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1219         if (!ro) {
1220                 ret = -EINVAL;
1221                 goto out;
1222         }
1223
1224         drm_remove_ref_object(file_priv, ro);
1225         drm_bo_usage_deref_locked(&bo);
1226 out:
1227         mutex_unlock(&dev->struct_mutex);
1228         return ret;
1229 }
1230
1231 /*
1232  * Call struct-sem locked.
1233  */
1234
1235 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1236                                          struct drm_user_object *uo,
1237                                          enum drm_ref_type action)
1238 {
1239         struct drm_buffer_object *bo =
1240             drm_user_object_entry(uo, struct drm_buffer_object, base);
1241
1242         /*
1243          * We DON'T want to take the bo->lock here, because we want to
1244          * hold it when we wait for unmapped buffer.
1245          */
1246
1247         BUG_ON(action != _DRM_REF_TYPE1);
1248
1249         if (atomic_dec_and_test(&bo->mapped))
1250                 wake_up_all(&bo->event_queue);
1251 }
1252
1253 /*
1254  * bo->mutex locked.
1255  * Note that new_mem_flags are NOT transferred to the bo->mem.proposed_flags.
1256  */
1257
1258 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1259                        int no_wait, int move_unfenced)
1260 {
1261         struct drm_device *dev = bo->dev;
1262         struct drm_buffer_manager *bm = &dev->bm;
1263         int ret = 0;
1264         struct drm_bo_mem_reg mem;
1265
1266         BUG_ON(bo->fence != NULL);
1267
1268         mem.num_pages = bo->num_pages;
1269         mem.size = mem.num_pages << PAGE_SHIFT;
1270         mem.proposed_flags = new_mem_flags;
1271         mem.page_alignment = bo->mem.page_alignment;
1272
1273         mutex_lock(&bm->evict_mutex);
1274         mutex_lock(&dev->struct_mutex);
1275         list_del_init(&bo->lru);
1276         mutex_unlock(&dev->struct_mutex);
1277
1278         /*
1279          * Determine where to move the buffer.
1280          */
1281         ret = drm_bo_mem_space(bo, &mem, no_wait);
1282         if (ret)
1283                 goto out_unlock;
1284
1285         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1286
1287 out_unlock:
1288         mutex_lock(&dev->struct_mutex);
1289         if (ret || !move_unfenced) {
1290                 if (mem.mm_node) {
1291                         if (mem.mm_node != bo->pinned_node)
1292                                 drm_mm_put_block(mem.mm_node);
1293                         mem.mm_node = NULL;
1294                 }
1295                 drm_bo_add_to_lru(bo);
1296                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1297                         wake_up_all(&bo->event_queue);
1298                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1299                                         _DRM_BO_FLAG_UNFENCED);
1300                 }
1301         } else {
1302                 list_add_tail(&bo->lru, &bm->unfenced);
1303                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1304                                 _DRM_BO_FLAG_UNFENCED);
1305         }
1306         mutex_unlock(&dev->struct_mutex);
1307         mutex_unlock(&bm->evict_mutex);
1308         return ret;
1309 }
1310
1311 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1312 {
1313         uint32_t flag_diff = (mem->proposed_flags ^ mem->flags);
1314
1315         if ((mem->proposed_flags & mem->flags & DRM_BO_MASK_MEM) == 0)
1316                 return 0;
1317         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1318             (/* !(mem->proposed_flags & DRM_BO_FLAG_CACHED) ||*/
1319              (mem->proposed_flags & DRM_BO_FLAG_FORCE_CACHING)))
1320                 return 0;
1321
1322         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1323             ((mem->proposed_flags & DRM_BO_FLAG_MAPPABLE) ||
1324              (mem->proposed_flags & DRM_BO_FLAG_FORCE_MAPPABLE)))
1325                 return 0;
1326         return 1;
1327 }
1328
1329 /**
1330  * drm_buffer_object_validate:
1331  *
1332  * @bo: the buffer object to modify
1333  *
1334  * @fence_class: the new fence class covering this buffer
1335  *
1336  * @move_unfenced: a boolean indicating whether switching the
1337  * memory space of this buffer should cause the buffer to
1338  * be placed on the unfenced list.
1339  *
1340  * @no_wait: whether this function should return -EBUSY instead
1341  * of waiting.
1342  *
1343  * Change buffer access parameters. This can involve moving
1344  * the buffer to the correct memory type, pinning the buffer
1345  * or changing the class/type of fence covering this buffer
1346  *
1347  * Must be called with bo locked.
1348  */
1349
1350 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1351                                       uint32_t fence_class,
1352                                       int move_unfenced, int no_wait,
1353                                       int move_buffer)
1354 {
1355         struct drm_device *dev = bo->dev;
1356         struct drm_buffer_manager *bm = &dev->bm;
1357         int ret;
1358
1359         if (move_buffer) {
1360                 ret = drm_bo_move_buffer(bo, bo->mem.proposed_flags, no_wait,
1361                                          move_unfenced);
1362                 if (ret) {
1363                         if (ret != -EAGAIN)
1364                                 DRM_ERROR("Failed moving buffer.\n");
1365                         if (ret == -ENOMEM)
1366                                 DRM_ERROR("Out of aperture space or "
1367                                           "DRM memory quota.\n");
1368                         return ret;
1369                 }
1370         }
1371
1372         /*
1373          * Pinned buffers.
1374          */
1375
1376         if (bo->mem.proposed_flags & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1377                 bo->pinned_mem_type = bo->mem.mem_type;
1378                 mutex_lock(&dev->struct_mutex);
1379                 list_del_init(&bo->pinned_lru);
1380                 drm_bo_add_to_pinned_lru(bo);
1381
1382                 if (bo->pinned_node != bo->mem.mm_node) {
1383                         if (bo->pinned_node != NULL)
1384                                 drm_mm_put_block(bo->pinned_node);
1385                         bo->pinned_node = bo->mem.mm_node;
1386                 }
1387
1388                 mutex_unlock(&dev->struct_mutex);
1389
1390         } else if (bo->pinned_node != NULL) {
1391
1392                 mutex_lock(&dev->struct_mutex);
1393
1394                 if (bo->pinned_node != bo->mem.mm_node)
1395                         drm_mm_put_block(bo->pinned_node);
1396
1397                 list_del_init(&bo->pinned_lru);
1398                 bo->pinned_node = NULL;
1399                 mutex_unlock(&dev->struct_mutex);
1400
1401         }
1402
1403         /*
1404          * We might need to add a TTM.
1405          */
1406
1407         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1408                 ret = drm_bo_add_ttm(bo);
1409                 if (ret)
1410                         return ret;
1411         }
1412         /*
1413          * Validation has succeeded, move the access and other
1414          * non-mapping-related flag bits from the proposed flags to
1415          * the active flags
1416          */
1417
1418         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.proposed_flags, ~DRM_BO_MASK_MEMTYPE);
1419
1420         /*
1421          * Finally, adjust lru to be sure.
1422          */
1423
1424         mutex_lock(&dev->struct_mutex);
1425         list_del(&bo->lru);
1426         if (move_unfenced) {
1427                 list_add_tail(&bo->lru, &bm->unfenced);
1428                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1429                                 _DRM_BO_FLAG_UNFENCED);
1430         } else {
1431                 drm_bo_add_to_lru(bo);
1432                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1433                         wake_up_all(&bo->event_queue);
1434                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1435                                         _DRM_BO_FLAG_UNFENCED);
1436                 }
1437         }
1438         mutex_unlock(&dev->struct_mutex);
1439
1440         return 0;
1441 }
1442
1443 /*
1444  * This function is called with bo->mutex locked, but may release it
1445  * temporarily to wait for events.
1446  */
1447
1448 static int drm_bo_prepare_for_validate(struct drm_buffer_object *bo,
1449                                        uint64_t flags,
1450                                        uint64_t mask,
1451                                        uint32_t hint,
1452                                        uint32_t fence_class,
1453                                        int no_wait,
1454                                        int *move_buffer)
1455 {
1456         struct drm_device *dev = bo->dev;
1457         struct drm_bo_driver *driver = dev->driver->bo_driver;
1458         uint32_t ftype;
1459
1460         int ret;
1461
1462         DRM_DEBUG("Proposed flags 0x%016llx, Old flags 0x%016llx\n",
1463                   (unsigned long long) bo->mem.proposed_flags,
1464                   (unsigned long long) bo->mem.flags);
1465
1466         ret = drm_bo_modify_proposed_flags (bo, flags, mask);
1467         if (ret)
1468                 return ret;
1469
1470         ret = drm_bo_wait_unmapped(bo, no_wait);
1471         if (ret)
1472                 return ret;
1473
1474         ret = driver->fence_type(bo, &fence_class, &ftype);
1475
1476         if (ret) {
1477                 DRM_ERROR("Driver did not support given buffer permissions.\n");
1478                 return ret;
1479         }
1480
1481         /*
1482          * We're switching command submission mechanism,
1483          * or cannot simply rely on the hardware serializing for us.
1484          * Insert a driver-dependant barrier or wait for buffer idle.
1485          */
1486
1487         if ((fence_class != bo->fence_class) ||
1488             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1489
1490                 ret = -EINVAL;
1491                 if (driver->command_stream_barrier) {
1492                         ret = driver->command_stream_barrier(bo,
1493                                                              fence_class,
1494                                                              ftype,
1495                                                              no_wait);
1496                 }
1497                 if (ret && ret != -EAGAIN) 
1498                         ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1499                 
1500                 if (ret)
1501                         return ret;
1502         }
1503
1504         bo->new_fence_class = fence_class;
1505         bo->new_fence_type = ftype;
1506
1507         /*
1508          * Check whether we need to move buffer.
1509          */
1510
1511         *move_buffer = 0;
1512         if (!drm_bo_mem_compat(&bo->mem)) {
1513                 *move_buffer = 1;
1514                 ret = drm_bo_wait(bo, 0, 1, no_wait, 1);
1515         }
1516
1517         return ret;
1518 }
1519
1520 /**
1521  * drm_bo_do_validate:
1522  *
1523  * @bo: the buffer object
1524  *
1525  * @flags: access rights, mapping parameters and cacheability. See
1526  * the DRM_BO_FLAG_* values in drm.h
1527  *
1528  * @mask: Which flag values to change; this allows callers to modify
1529  * things without knowing the current state of other flags.
1530  *
1531  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1532  * values in drm.h.
1533  *
1534  * @fence_class: a driver-specific way of doing fences. Presumably,
1535  * this would be used if the driver had more than one submission and
1536  * fencing mechanism. At this point, there isn't any use of this
1537  * from the user mode code.
1538  *
1539  * @rep: To be stuffed with the reply from validation
1540  * 
1541  * 'validate' a buffer object. This changes where the buffer is
1542  * located, along with changing access modes.
1543  */
1544
1545 int drm_bo_do_validate(struct drm_buffer_object *bo,
1546                        uint64_t flags, uint64_t mask, uint32_t hint,
1547                        uint32_t fence_class,
1548                        struct drm_bo_info_rep *rep)
1549 {
1550         int ret;
1551         int no_wait = (hint & DRM_BO_HINT_DONT_BLOCK) != 0;
1552         int move_buffer;
1553
1554         mutex_lock(&bo->mutex);
1555
1556         do {
1557                 bo->priv_flags &= ~_DRM_BO_FLAG_UNLOCKED;
1558
1559                 ret = drm_bo_prepare_for_validate(bo, flags, mask, hint,
1560                                                   fence_class, no_wait,
1561                                                   &move_buffer);
1562                 if (ret)
1563                         goto out;
1564
1565         } while(unlikely(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED));
1566
1567         ret = drm_buffer_object_validate(bo,
1568                                          fence_class,
1569                                          !(hint & DRM_BO_HINT_DONT_FENCE),
1570                                          no_wait,
1571                                          move_buffer);
1572
1573         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNLOCKED);
1574 out:
1575         if (rep)
1576                 drm_bo_fill_rep_arg(bo, rep);
1577
1578         mutex_unlock(&bo->mutex);
1579
1580         return ret;
1581 }
1582 EXPORT_SYMBOL(drm_bo_do_validate);
1583
1584 /**
1585  * drm_bo_handle_validate
1586  *
1587  * @file_priv: the drm file private, used to get a handle to the user context
1588  *
1589  * @handle: the buffer object handle
1590  *
1591  * @flags: access rights, mapping parameters and cacheability. See
1592  * the DRM_BO_FLAG_* values in drm.h
1593  *
1594  * @mask: Which flag values to change; this allows callers to modify
1595  * things without knowing the current state of other flags.
1596  *
1597  * @hint: changes the proceedure for this operation, see the DRM_BO_HINT_*
1598  * values in drm.h.
1599  *
1600  * @fence_class: a driver-specific way of doing fences. Presumably,
1601  * this would be used if the driver had more than one submission and
1602  * fencing mechanism. At this point, there isn't any use of this
1603  * from the user mode code.
1604  *
1605  * @rep: To be stuffed with the reply from validation
1606  *
1607  * @bp_rep: To be stuffed with the buffer object pointer
1608  *
1609  * Perform drm_bo_do_validate on a buffer referenced by a user-space handle instead
1610  * of a pointer to a buffer object. Optionally return a pointer to the buffer object.
1611  * This is a convenience wrapper only.
1612  */
1613
1614 int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
1615                            uint64_t flags, uint64_t mask,
1616                            uint32_t hint,
1617                            uint32_t fence_class,
1618                            struct drm_bo_info_rep *rep,
1619                            struct drm_buffer_object **bo_rep)
1620 {
1621         struct drm_device *dev = file_priv->minor->dev;
1622         struct drm_buffer_object *bo;
1623         int ret;
1624
1625         mutex_lock(&dev->struct_mutex);
1626         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1627         mutex_unlock(&dev->struct_mutex);
1628
1629         if (!bo)
1630                 return -EINVAL;
1631
1632         if (bo->base.owner != file_priv)
1633                 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1634
1635         ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class, rep);
1636
1637         if (!ret && bo_rep)
1638                 *bo_rep = bo;
1639         else
1640                 drm_bo_usage_deref_unlocked(&bo);
1641
1642         return ret;
1643 }
1644 EXPORT_SYMBOL(drm_bo_handle_validate);
1645
1646
1647 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1648                               struct drm_bo_info_rep *rep)
1649 {
1650         struct drm_device *dev = file_priv->minor->dev;
1651         struct drm_buffer_object *bo;
1652
1653         mutex_lock(&dev->struct_mutex);
1654         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1655         mutex_unlock(&dev->struct_mutex);
1656
1657         if (!bo)
1658                 return -EINVAL;
1659
1660         mutex_lock(&bo->mutex);
1661
1662         /*
1663          * FIXME: Quick busy here?
1664          */
1665
1666         drm_bo_busy(bo, 1);
1667         drm_bo_fill_rep_arg(bo, rep);
1668         mutex_unlock(&bo->mutex);
1669         drm_bo_usage_deref_unlocked(&bo);
1670         return 0;
1671 }
1672
1673 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1674                               uint32_t hint,
1675                               struct drm_bo_info_rep *rep)
1676 {
1677         struct drm_device *dev = file_priv->minor->dev;
1678         struct drm_buffer_object *bo;
1679         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1680         int ret;
1681
1682         mutex_lock(&dev->struct_mutex);
1683         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1684         mutex_unlock(&dev->struct_mutex);
1685
1686         if (!bo)
1687                 return -EINVAL;
1688
1689         mutex_lock(&bo->mutex);
1690         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 1, no_wait, 1);
1691         if (ret)
1692                 goto out;
1693
1694         drm_bo_fill_rep_arg(bo, rep);
1695 out:
1696         mutex_unlock(&bo->mutex);
1697         drm_bo_usage_deref_unlocked(&bo);
1698         return ret;
1699 }
1700
1701 int drm_buffer_object_create(struct drm_device *dev,
1702                              unsigned long size,
1703                              enum drm_bo_type type,
1704                              uint64_t flags,
1705                              uint32_t hint,
1706                              uint32_t page_alignment,
1707                              unsigned long buffer_start,
1708                              struct drm_buffer_object **buf_obj)
1709 {
1710         struct drm_buffer_manager *bm = &dev->bm;
1711         struct drm_buffer_object *bo;
1712         int ret = 0;
1713         unsigned long num_pages;
1714
1715         size += buffer_start & ~PAGE_MASK;
1716         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1717         if (num_pages == 0) {
1718                 DRM_ERROR("Illegal buffer object size.\n");
1719                 return -EINVAL;
1720         }
1721
1722         bo = drm_ctl_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1723
1724         if (!bo)
1725                 return -ENOMEM;
1726
1727         mutex_init(&bo->mutex);
1728         mutex_lock(&bo->mutex);
1729
1730         atomic_set(&bo->usage, 1);
1731         atomic_set(&bo->mapped, 0);
1732         DRM_INIT_WAITQUEUE(&bo->event_queue);
1733         INIT_LIST_HEAD(&bo->lru);
1734         INIT_LIST_HEAD(&bo->pinned_lru);
1735         INIT_LIST_HEAD(&bo->ddestroy);
1736         bo->dev = dev;
1737         bo->type = type;
1738         bo->num_pages = num_pages;
1739         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1740         bo->mem.num_pages = bo->num_pages;
1741         bo->mem.mm_node = NULL;
1742         bo->mem.page_alignment = page_alignment;
1743         bo->buffer_start = buffer_start & PAGE_MASK;
1744         bo->priv_flags = 0;
1745         bo->mem.flags = (DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1746                          DRM_BO_FLAG_MAPPABLE);
1747         bo->mem.proposed_flags = 0;
1748         atomic_inc(&bm->count);
1749         /*
1750          * Use drm_bo_modify_proposed_flags to error-check the proposed flags
1751          */
1752         ret = drm_bo_modify_proposed_flags (bo, flags, flags);
1753         if (ret)
1754                 goto out_err;
1755
1756         /*
1757          * For drm_bo_type_device buffers, allocate
1758          * address space from the device so that applications
1759          * can mmap the buffer from there
1760          */
1761         if (bo->type == drm_bo_type_device) {
1762                 mutex_lock(&dev->struct_mutex);
1763                 ret = drm_bo_setup_vm_locked(bo);
1764                 mutex_unlock(&dev->struct_mutex);
1765                 if (ret)
1766                         goto out_err;
1767         }
1768
1769         mutex_unlock(&bo->mutex);
1770         ret = drm_bo_do_validate(bo, 0, 0, hint | DRM_BO_HINT_DONT_FENCE,
1771                                  0, NULL);
1772         if (ret)
1773                 goto out_err_unlocked;
1774
1775         *buf_obj = bo;
1776         return 0;
1777
1778 out_err:
1779         mutex_unlock(&bo->mutex);
1780 out_err_unlocked:
1781         drm_bo_usage_deref_unlocked(&bo);
1782         return ret;
1783 }
1784 EXPORT_SYMBOL(drm_buffer_object_create);
1785
1786
1787 static int drm_bo_add_user_object(struct drm_file *file_priv,
1788                                   struct drm_buffer_object *bo, int shareable)
1789 {
1790         struct drm_device *dev = file_priv->minor->dev;
1791         int ret;
1792
1793         mutex_lock(&dev->struct_mutex);
1794         ret = drm_add_user_object(file_priv, &bo->base, shareable);
1795         if (ret)
1796                 goto out;
1797
1798         bo->base.remove = drm_bo_base_deref_locked;
1799         bo->base.type = drm_buffer_type;
1800         bo->base.ref_struct_locked = NULL;
1801         bo->base.unref = drm_buffer_user_object_unmap;
1802
1803 out:
1804         mutex_unlock(&dev->struct_mutex);
1805         return ret;
1806 }
1807
1808 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1809 {
1810         struct drm_bo_create_arg *arg = data;
1811         struct drm_bo_create_req *req = &arg->d.req;
1812         struct drm_bo_info_rep *rep = &arg->d.rep;
1813         struct drm_buffer_object *entry;
1814         enum drm_bo_type bo_type;
1815         int ret = 0;
1816
1817         DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1818             (int)(req->size / 1024), req->page_alignment * 4);
1819
1820         if (!dev->bm.initialized) {
1821                 DRM_ERROR("Buffer object manager is not initialized.\n");
1822                 return -EINVAL;
1823         }
1824
1825         /*
1826          * If the buffer creation request comes in with a starting address,
1827          * that points at the desired user pages to map. Otherwise, create
1828          * a drm_bo_type_device buffer, which uses pages allocated from the kernel
1829          */
1830         bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_device;
1831
1832         /*
1833          * User buffers cannot be shared
1834          */
1835         if (bo_type == drm_bo_type_user)
1836                 req->flags &= ~DRM_BO_FLAG_SHAREABLE;
1837
1838         ret = drm_buffer_object_create(file_priv->minor->dev,
1839                                        req->size, bo_type, req->flags,
1840                                        req->hint, req->page_alignment,
1841                                        req->buffer_start, &entry);
1842         if (ret)
1843                 goto out;
1844
1845         ret = drm_bo_add_user_object(file_priv, entry,
1846                                      req->flags & DRM_BO_FLAG_SHAREABLE);
1847         if (ret) {
1848                 drm_bo_usage_deref_unlocked(&entry);
1849                 goto out;
1850         }
1851
1852         mutex_lock(&entry->mutex);
1853         drm_bo_fill_rep_arg(entry, rep);
1854         mutex_unlock(&entry->mutex);
1855
1856 out:
1857         return ret;
1858 }
1859
1860 int drm_bo_setstatus_ioctl(struct drm_device *dev,
1861                            void *data, struct drm_file *file_priv)
1862 {
1863         struct drm_bo_map_wait_idle_arg *arg = data;
1864         struct drm_bo_info_req *req = &arg->d.req;
1865         struct drm_bo_info_rep *rep = &arg->d.rep;
1866         struct drm_buffer_object *bo;
1867         int ret;
1868
1869         if (!dev->bm.initialized) {
1870                 DRM_ERROR("Buffer object manager is not initialized.\n");
1871                 return -EINVAL;
1872         }
1873
1874         ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
1875         if (ret)
1876                 return ret;
1877
1878         mutex_lock(&dev->struct_mutex);
1879         bo = drm_lookup_buffer_object(file_priv, req->handle, 1);
1880         mutex_unlock(&dev->struct_mutex);
1881
1882         if (!bo)
1883                 return -EINVAL;
1884
1885         if (bo->base.owner != file_priv)
1886                 req->mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1887
1888         ret = drm_bo_do_validate(bo, req->flags, req->mask,
1889                                  req->hint | DRM_BO_HINT_DONT_FENCE,
1890                                  bo->fence_class, rep);
1891
1892         drm_bo_usage_deref_unlocked(&bo);
1893
1894         (void) drm_bo_read_unlock(&dev->bm.bm_lock);
1895
1896         return ret;
1897 }
1898
1899 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1900 {
1901         struct drm_bo_map_wait_idle_arg *arg = data;
1902         struct drm_bo_info_req *req = &arg->d.req;
1903         struct drm_bo_info_rep *rep = &arg->d.rep;
1904         int ret;
1905         if (!dev->bm.initialized) {
1906                 DRM_ERROR("Buffer object manager is not initialized.\n");
1907                 return -EINVAL;
1908         }
1909
1910         ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1911                                     req->hint, rep);
1912         if (ret)
1913                 return ret;
1914
1915         return 0;
1916 }
1917
1918 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1919 {
1920         struct drm_bo_handle_arg *arg = data;
1921         int ret;
1922         if (!dev->bm.initialized) {
1923                 DRM_ERROR("Buffer object manager is not initialized.\n");
1924                 return -EINVAL;
1925         }
1926
1927         ret = drm_buffer_object_unmap(file_priv, arg->handle);
1928         return ret;
1929 }
1930
1931
1932 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1933 {
1934         struct drm_bo_reference_info_arg *arg = data;
1935         struct drm_bo_handle_arg *req = &arg->d.req;
1936         struct drm_bo_info_rep *rep = &arg->d.rep;
1937         struct drm_user_object *uo;
1938         int ret;
1939
1940         if (!dev->bm.initialized) {
1941                 DRM_ERROR("Buffer object manager is not initialized.\n");
1942                 return -EINVAL;
1943         }
1944
1945         ret = drm_user_object_ref(file_priv, req->handle,
1946                                   drm_buffer_type, &uo);
1947         if (ret)
1948                 return ret;
1949
1950         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1951         if (ret)
1952                 return ret;
1953
1954         return 0;
1955 }
1956
1957 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1958 {
1959         struct drm_bo_handle_arg *arg = data;
1960         int ret = 0;
1961
1962         if (!dev->bm.initialized) {
1963                 DRM_ERROR("Buffer object manager is not initialized.\n");
1964                 return -EINVAL;
1965         }
1966
1967         ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
1968         return ret;
1969 }
1970
1971 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1972 {
1973         struct drm_bo_reference_info_arg *arg = data;
1974         struct drm_bo_handle_arg *req = &arg->d.req;
1975         struct drm_bo_info_rep *rep = &arg->d.rep;
1976         int ret;
1977
1978         if (!dev->bm.initialized) {
1979                 DRM_ERROR("Buffer object manager is not initialized.\n");
1980                 return -EINVAL;
1981         }
1982
1983         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1984         if (ret)
1985                 return ret;
1986
1987         return 0;
1988 }
1989
1990 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1991 {
1992         struct drm_bo_map_wait_idle_arg *arg = data;
1993         struct drm_bo_info_req *req = &arg->d.req;
1994         struct drm_bo_info_rep *rep = &arg->d.rep;
1995         int ret;
1996         if (!dev->bm.initialized) {
1997                 DRM_ERROR("Buffer object manager is not initialized.\n");
1998                 return -EINVAL;
1999         }
2000
2001         ret = drm_bo_handle_wait(file_priv, req->handle,
2002                                  req->hint, rep);
2003         if (ret)
2004                 return ret;
2005
2006         return 0;
2007 }
2008
2009 static int drm_bo_leave_list(struct drm_buffer_object *bo,
2010                              uint32_t mem_type,
2011                              int free_pinned,
2012                              int allow_errors)
2013 {
2014         struct drm_device *dev = bo->dev;
2015         int ret = 0;
2016
2017         mutex_lock(&bo->mutex);
2018
2019         ret = drm_bo_expire_fence(bo, allow_errors);
2020         if (ret)
2021                 goto out;
2022
2023         if (free_pinned) {
2024                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2025                 mutex_lock(&dev->struct_mutex);
2026                 list_del_init(&bo->pinned_lru);
2027                 if (bo->pinned_node == bo->mem.mm_node)
2028                         bo->pinned_node = NULL;
2029                 if (bo->pinned_node != NULL) {
2030                         drm_mm_put_block(bo->pinned_node);
2031                         bo->pinned_node = NULL;
2032                 }
2033                 mutex_unlock(&dev->struct_mutex);
2034         }
2035
2036         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2037                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2038                           "cleanup. Removing flag and evicting.\n");
2039                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2040                 bo->mem.proposed_flags &= ~DRM_BO_FLAG_NO_EVICT;
2041         }
2042
2043         if (bo->mem.mem_type == mem_type)
2044                 ret = drm_bo_evict(bo, mem_type, 0);
2045
2046         if (ret) {
2047                 if (allow_errors) {
2048                         goto out;
2049                 } else {
2050                         ret = 0;
2051                         DRM_ERROR("Cleanup eviction failed\n");
2052                 }
2053         }
2054
2055 out:
2056         mutex_unlock(&bo->mutex);
2057         return ret;
2058 }
2059
2060
2061 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2062                                          int pinned_list)
2063 {
2064         if (pinned_list)
2065                 return list_entry(list, struct drm_buffer_object, pinned_lru);
2066         else
2067                 return list_entry(list, struct drm_buffer_object, lru);
2068 }
2069
2070 /*
2071  * dev->struct_mutex locked.
2072  */
2073
2074 static int drm_bo_force_list_clean(struct drm_device *dev,
2075                                    struct list_head *head,
2076                                    unsigned mem_type,
2077                                    int free_pinned,
2078                                    int allow_errors,
2079                                    int pinned_list)
2080 {
2081         struct list_head *list, *next, *prev;
2082         struct drm_buffer_object *entry, *nentry;
2083         int ret;
2084         int do_restart;
2085
2086         /*
2087          * The list traversal is a bit odd here, because an item may
2088          * disappear from the list when we release the struct_mutex or
2089          * when we decrease the usage count. Also we're not guaranteed
2090          * to drain pinned lists, so we can't always restart.
2091          */
2092
2093 restart:
2094         nentry = NULL;
2095         list_for_each_safe(list, next, head) {
2096                 prev = list->prev;
2097
2098                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2099                 atomic_inc(&entry->usage);
2100                 if (nentry) {
2101                         atomic_dec(&nentry->usage);
2102                         nentry = NULL;
2103                 }
2104
2105                 /*
2106                  * Protect the next item from destruction, so we can check
2107                  * its list pointers later on.
2108                  */
2109
2110                 if (next != head) {
2111                         nentry = drm_bo_entry(next, pinned_list);
2112                         atomic_inc(&nentry->usage);
2113                 }
2114                 mutex_unlock(&dev->struct_mutex);
2115
2116                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2117                                         allow_errors);
2118                 mutex_lock(&dev->struct_mutex);
2119
2120                 drm_bo_usage_deref_locked(&entry);
2121                 if (ret)
2122                         return ret;
2123
2124                 /*
2125                  * Has the next item disappeared from the list?
2126                  */
2127
2128                 do_restart = ((next->prev != list) && (next->prev != prev));
2129
2130                 if (nentry != NULL && do_restart)
2131                         drm_bo_usage_deref_locked(&nentry);
2132
2133                 if (do_restart)
2134                         goto restart;
2135         }
2136         return 0;
2137 }
2138
2139 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean)
2140 {
2141         struct drm_buffer_manager *bm = &dev->bm;
2142         struct drm_mem_type_manager *man = &bm->man[mem_type];
2143         int ret = -EINVAL;
2144
2145         if (mem_type >= DRM_BO_MEM_TYPES) {
2146                 DRM_ERROR("Illegal memory type %d\n", mem_type);
2147                 return ret;
2148         }
2149
2150         if (!man->has_type) {
2151                 DRM_ERROR("Trying to take down uninitialized "
2152                           "memory manager type %u\n", mem_type);
2153                 return ret;
2154         }
2155
2156         if ((man->kern_init_type) && (kern_clean == 0)) {
2157                 DRM_ERROR("Trying to take down kernel initialized "
2158                           "memory manager type %u\n", mem_type);
2159                 return -EPERM;
2160         }
2161
2162         man->use_type = 0;
2163         man->has_type = 0;
2164
2165         ret = 0;
2166         if (mem_type > 0) {
2167                 BUG_ON(!list_empty(&bm->unfenced));
2168                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2169                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2170
2171                 if (drm_mm_clean(&man->manager)) {
2172                         drm_mm_takedown(&man->manager);
2173                 } else {
2174                         ret = -EBUSY;
2175                 }
2176         }
2177
2178         return ret;
2179 }
2180 EXPORT_SYMBOL(drm_bo_clean_mm);
2181
2182 /**
2183  *Evict all buffers of a particular mem_type, but leave memory manager
2184  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2185  *point since we have the hardware lock.
2186  */
2187
2188 static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
2189 {
2190         int ret;
2191         struct drm_buffer_manager *bm = &dev->bm;
2192         struct drm_mem_type_manager *man = &bm->man[mem_type];
2193
2194         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2195                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2196                 return -EINVAL;
2197         }
2198
2199         if (!man->has_type) {
2200                 DRM_ERROR("Memory type %u has not been initialized.\n",
2201                           mem_type);
2202                 return 0;
2203         }
2204
2205         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2206         if (ret)
2207                 return ret;
2208         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2209
2210         return ret;
2211 }
2212
2213 int drm_bo_init_mm(struct drm_device *dev, unsigned type,
2214                    unsigned long p_offset, unsigned long p_size,
2215                    int kern_init)
2216 {
2217         struct drm_buffer_manager *bm = &dev->bm;
2218         int ret = -EINVAL;
2219         struct drm_mem_type_manager *man;
2220
2221         if (type >= DRM_BO_MEM_TYPES) {
2222                 DRM_ERROR("Illegal memory type %d\n", type);
2223                 return ret;
2224         }
2225
2226         man = &bm->man[type];
2227         if (man->has_type) {
2228                 DRM_ERROR("Memory manager already initialized for type %d\n",
2229                           type);
2230                 return ret;
2231         }
2232
2233         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2234         if (ret)
2235                 return ret;
2236
2237         ret = 0;
2238         if (type != DRM_BO_MEM_LOCAL) {
2239                 if (!p_size) {
2240                         DRM_ERROR("Zero size memory manager type %d\n", type);
2241                         return ret;
2242                 }
2243                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2244                 if (ret)
2245                         return ret;
2246         }
2247         man->has_type = 1;
2248         man->use_type = 1;
2249         man->kern_init_type = kern_init;
2250         man->size = p_size;
2251
2252         INIT_LIST_HEAD(&man->lru);
2253         INIT_LIST_HEAD(&man->pinned);
2254
2255         return 0;
2256 }
2257 EXPORT_SYMBOL(drm_bo_init_mm);
2258
2259 /*
2260  * This function is intended to be called on drm driver unload.
2261  * If you decide to call it from lastclose, you must protect the call
2262  * from a potentially racing drm_bo_driver_init in firstopen.
2263  * (This may happen on X server restart).
2264  */
2265
2266 int drm_bo_driver_finish(struct drm_device *dev)
2267 {
2268         struct drm_buffer_manager *bm = &dev->bm;
2269         int ret = 0;
2270         unsigned i = DRM_BO_MEM_TYPES;
2271         struct drm_mem_type_manager *man;
2272
2273         mutex_lock(&dev->struct_mutex);
2274
2275         if (!bm->initialized)
2276                 goto out;
2277         bm->initialized = 0;
2278
2279         while (i--) {
2280                 man = &bm->man[i];
2281                 if (man->has_type) {
2282                         man->use_type = 0;
2283                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i, 1)) {
2284                                 ret = -EBUSY;
2285                                 DRM_ERROR("DRM memory manager type %d "
2286                                           "is not clean.\n", i);
2287                         }
2288                         man->has_type = 0;
2289                 }
2290         }
2291         mutex_unlock(&dev->struct_mutex);
2292
2293         if (!cancel_delayed_work(&bm->wq))
2294                 flush_scheduled_work();
2295
2296         mutex_lock(&dev->struct_mutex);
2297         drm_bo_delayed_delete(dev, 1);
2298         if (list_empty(&bm->ddestroy))
2299                 DRM_DEBUG("Delayed destroy list was clean\n");
2300
2301         if (list_empty(&bm->man[0].lru))
2302                 DRM_DEBUG("Swap list was clean\n");
2303
2304         if (list_empty(&bm->man[0].pinned))
2305                 DRM_DEBUG("NO_MOVE list was clean\n");
2306
2307         if (list_empty(&bm->unfenced))
2308                 DRM_DEBUG("Unfenced list was clean\n");
2309
2310         __free_page(bm->dummy_read_page);
2311
2312 out:
2313         mutex_unlock(&dev->struct_mutex);
2314         return ret;
2315 }
2316
2317 /*
2318  * This function is intended to be called on drm driver load.
2319  * If you decide to call it from firstopen, you must protect the call
2320  * from a potentially racing drm_bo_driver_finish in lastclose.
2321  * (This may happen on X server restart).
2322  */
2323
2324 int drm_bo_driver_init(struct drm_device *dev)
2325 {
2326         struct drm_bo_driver *driver = dev->driver->bo_driver;
2327         struct drm_buffer_manager *bm = &dev->bm;
2328         int ret = -EINVAL;
2329
2330         bm->dummy_read_page = NULL;
2331         drm_bo_init_lock(&bm->bm_lock);
2332         mutex_lock(&dev->struct_mutex);
2333         if (!driver)
2334                 goto out_unlock;
2335
2336         bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
2337         if (!bm->dummy_read_page) {
2338                 ret = -ENOMEM;
2339                 goto out_unlock;
2340         }
2341
2342         /*
2343          * Initialize the system memory buffer type.
2344          * Other types need to be driver / IOCTL initialized.
2345          */
2346         ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0, 1);
2347         if (ret)
2348                 goto out_unlock;
2349
2350         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2351         bm->initialized = 1;
2352         bm->nice_mode = 1;
2353         atomic_set(&bm->count, 0);
2354         bm->cur_pages = 0;
2355         INIT_LIST_HEAD(&bm->unfenced);
2356         INIT_LIST_HEAD(&bm->ddestroy);
2357 out_unlock:
2358         mutex_unlock(&dev->struct_mutex);
2359         return ret;
2360 }
2361 EXPORT_SYMBOL(drm_bo_driver_init);
2362
2363 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2364 {
2365         struct drm_mm_init_arg *arg = data;
2366         struct drm_buffer_manager *bm = &dev->bm;
2367         struct drm_bo_driver *driver = dev->driver->bo_driver;
2368         int ret;
2369
2370         if (!driver) {
2371                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2372                 return -EINVAL;
2373         }
2374
2375         ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
2376         if (ret)
2377                 return ret;
2378
2379         ret = -EINVAL;
2380         if (arg->magic != DRM_BO_INIT_MAGIC) {
2381                 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2382                           "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2383                 return -EINVAL;
2384         }
2385         if (arg->major != DRM_BO_INIT_MAJOR) {
2386                 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2387                           "\tversion don't match. Got %d, expected %d.\n",
2388                           arg->major, DRM_BO_INIT_MAJOR);
2389                 return -EINVAL;
2390         }
2391
2392         mutex_lock(&dev->struct_mutex);
2393         if (!bm->initialized) {
2394                 DRM_ERROR("DRM memory manager was not initialized.\n");
2395                 goto out;
2396         }
2397         if (arg->mem_type == 0) {
2398                 DRM_ERROR("System memory buffers already initialized.\n");
2399                 goto out;
2400         }
2401         ret = drm_bo_init_mm(dev, arg->mem_type,
2402                              arg->p_offset, arg->p_size, 0);
2403
2404 out:
2405         mutex_unlock(&dev->struct_mutex);
2406         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2407
2408         if (ret)
2409                 return ret;
2410
2411         return 0;
2412 }
2413
2414 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2415 {
2416         struct drm_mm_type_arg *arg = data;
2417         struct drm_buffer_manager *bm = &dev->bm;
2418         struct drm_bo_driver *driver = dev->driver->bo_driver;
2419         int ret;
2420
2421         if (!driver) {
2422                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2423                 return -EINVAL;
2424         }
2425
2426         ret = drm_bo_write_lock(&bm->bm_lock, 0, file_priv);
2427         if (ret)
2428                 return ret;
2429
2430         mutex_lock(&dev->struct_mutex);
2431         ret = -EINVAL;
2432         if (!bm->initialized) {
2433                 DRM_ERROR("DRM memory manager was not initialized\n");
2434                 goto out;
2435         }
2436         if (arg->mem_type == 0) {
2437                 DRM_ERROR("No takedown for System memory buffers.\n");
2438                 goto out;
2439         }
2440         ret = 0;
2441         if ((ret = drm_bo_clean_mm(dev, arg->mem_type, 0))) {
2442                 if (ret == -EINVAL)
2443                         DRM_ERROR("Memory manager type %d not clean. "
2444                                   "Delaying takedown\n", arg->mem_type);
2445                 ret = 0;
2446         }
2447 out:
2448         mutex_unlock(&dev->struct_mutex);
2449         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2450
2451         if (ret)
2452                 return ret;
2453
2454         return 0;
2455 }
2456
2457 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2458 {
2459         struct drm_mm_type_arg *arg = data;
2460         struct drm_bo_driver *driver = dev->driver->bo_driver;
2461         int ret;
2462
2463         if (!driver) {
2464                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2465                 return -EINVAL;
2466         }
2467
2468         if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
2469                 DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
2470                 return -EINVAL;
2471         }
2472
2473         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2474                 ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
2475                 if (ret)
2476                         return ret;
2477         }
2478
2479         mutex_lock(&dev->struct_mutex);
2480         ret = drm_bo_lock_mm(dev, arg->mem_type);
2481         mutex_unlock(&dev->struct_mutex);
2482         if (ret) {
2483                 (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2484                 return ret;
2485         }
2486
2487         return 0;
2488 }
2489
2490 int drm_mm_unlock_ioctl(struct drm_device *dev,
2491                         void *data,
2492                         struct drm_file *file_priv)
2493 {
2494         struct drm_mm_type_arg *arg = data;
2495         struct drm_bo_driver *driver = dev->driver->bo_driver;
2496         int ret;
2497
2498         if (!driver) {
2499                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2500                 return -EINVAL;
2501         }
2502
2503         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2504                 ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2505                 if (ret)
2506                         return ret;
2507         }
2508
2509         return 0;
2510 }
2511
2512 int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2513 {
2514         struct drm_mm_info_arg *arg = data;
2515         struct drm_buffer_manager *bm = &dev->bm;
2516         struct drm_bo_driver *driver = dev->driver->bo_driver;
2517         struct drm_mem_type_manager *man;
2518         int ret = 0;
2519         int mem_type = arg->mem_type;
2520
2521         if (!driver) {
2522                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2523                 return -EINVAL;
2524         }
2525
2526         if (mem_type >= DRM_BO_MEM_TYPES) {
2527                 DRM_ERROR("Illegal memory type %d\n", arg->mem_type);
2528                 return -EINVAL;
2529         }
2530
2531         mutex_lock(&dev->struct_mutex);
2532         if (!bm->initialized) {
2533                 DRM_ERROR("DRM memory manager was not initialized\n");
2534                 ret = -EINVAL;
2535                 goto out;
2536         }
2537
2538
2539         man = &bm->man[arg->mem_type];
2540
2541         arg->p_size = man->size;
2542
2543 out:
2544         mutex_unlock(&dev->struct_mutex);
2545      
2546         return ret;
2547 }
2548 /*
2549  * buffer object vm functions.
2550  */
2551
2552 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
2553 {
2554         struct drm_buffer_manager *bm = &dev->bm;
2555         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2556
2557         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2558                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2559                         return 0;
2560
2561                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2562                         return 0;
2563
2564                 if (mem->flags & DRM_BO_FLAG_CACHED)
2565                         return 0;
2566         }
2567         return 1;
2568 }
2569 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2570
2571 /**
2572  * \c Get the PCI offset for the buffer object memory.
2573  *
2574  * \param bo The buffer object.
2575  * \param bus_base On return the base of the PCI region
2576  * \param bus_offset On return the byte offset into the PCI region
2577  * \param bus_size On return the byte size of the buffer object or zero if
2578  *     the buffer object memory is not accessible through a PCI region.
2579  * \return Failure indication.
2580  *
2581  * Returns -EINVAL if the buffer object is currently not mappable.
2582  * Otherwise returns zero.
2583  */
2584
2585 int drm_bo_pci_offset(struct drm_device *dev,
2586                       struct drm_bo_mem_reg *mem,
2587                       unsigned long *bus_base,
2588                       unsigned long *bus_offset, unsigned long *bus_size)
2589 {
2590         struct drm_buffer_manager *bm = &dev->bm;
2591         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2592
2593         *bus_size = 0;
2594         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2595                 return -EINVAL;
2596
2597         if (drm_mem_reg_is_pci(dev, mem)) {
2598                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2599                 *bus_size = mem->num_pages << PAGE_SHIFT;
2600                 *bus_base = man->io_offset;
2601         }
2602
2603         return 0;
2604 }
2605
2606 /**
2607  * \c Kill all user-space virtual mappings of this buffer object.
2608  *
2609  * \param bo The buffer object.
2610  *
2611  * Call bo->mutex locked.
2612  */
2613
2614 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
2615 {
2616         struct drm_device *dev = bo->dev;
2617         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2618         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2619
2620         if (!dev->dev_mapping)
2621                 return;
2622
2623         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2624 }
2625
2626 /**
2627  * drm_bo_takedown_vm_locked:
2628  *
2629  * @bo: the buffer object to remove any drm device mapping
2630  *
2631  * Remove any associated vm mapping on the drm device node that
2632  * would have been created for a drm_bo_type_device buffer
2633  */
2634 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
2635 {
2636         struct drm_map_list *list;
2637         drm_local_map_t *map;
2638         struct drm_device *dev = bo->dev;
2639
2640         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2641         if (bo->type != drm_bo_type_device)
2642                 return;
2643
2644         list = &bo->map_list;
2645         if (list->user_token) {
2646                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2647                 list->user_token = 0;
2648         }
2649         if (list->file_offset_node) {
2650                 drm_mm_put_block(list->file_offset_node);
2651                 list->file_offset_node = NULL;
2652         }
2653
2654         map = list->map;
2655         if (!map)
2656                 return;
2657
2658         drm_ctl_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2659         list->map = NULL;
2660         list->user_token = 0ULL;
2661         drm_bo_usage_deref_locked(&bo);
2662 }
2663
2664 /**
2665  * drm_bo_setup_vm_locked:
2666  *
2667  * @bo: the buffer to allocate address space for
2668  *
2669  * Allocate address space in the drm device so that applications
2670  * can mmap the buffer and access the contents. This only
2671  * applies to drm_bo_type_device objects as others are not
2672  * placed in the drm device address space.
2673  */
2674 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2675 {
2676         struct drm_map_list *list = &bo->map_list;
2677         drm_local_map_t *map;
2678         struct drm_device *dev = bo->dev;
2679
2680         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2681         list->map = drm_ctl_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2682         if (!list->map)
2683                 return -ENOMEM;
2684
2685         map = list->map;
2686         map->offset = 0;
2687         map->type = _DRM_TTM;
2688         map->flags = _DRM_REMOVABLE;
2689         map->size = bo->mem.num_pages * PAGE_SIZE;
2690         atomic_inc(&bo->usage);
2691         map->handle = (void *)bo;
2692
2693         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2694                                                     bo->mem.num_pages, 0, 0);
2695
2696         if (unlikely(!list->file_offset_node)) {
2697                 drm_bo_takedown_vm_locked(bo);
2698                 return -ENOMEM;
2699         }
2700
2701         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2702                                                   bo->mem.num_pages, 0);
2703
2704         if (unlikely(!list->file_offset_node)) {
2705                 drm_bo_takedown_vm_locked(bo);
2706                 return -ENOMEM;
2707         }
2708                 
2709         list->hash.key = list->file_offset_node->start;
2710         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2711                 drm_bo_takedown_vm_locked(bo);
2712                 return -ENOMEM;
2713         }
2714
2715         list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2716
2717         return 0;
2718 }
2719
2720 int drm_bo_version_ioctl(struct drm_device *dev, void *data,
2721                          struct drm_file *file_priv)
2722 {
2723         struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
2724
2725         arg->major = DRM_BO_INIT_MAJOR;
2726         arg->minor = DRM_BO_INIT_MINOR;
2727         arg->patchlevel = DRM_BO_INIT_PATCH;
2728
2729         return 0;
2730 }