OSDN Git Service

modified dependency for kernel
[android-x86/hardware-menlow-psb.git] / drm_bo.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /*
34  * Locking may look a bit complicated but isn't really:
35  *
36  * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37  * when there is a chance that it can be zero before or after the operation.
38  *
39  * dev->struct_mutex also protects all lists and list heads,
40  * Hash tables and hash heads.
41  *
42  * bo->mutex protects the buffer object itself excluding the usage field.
43  * bo->mutex does also protect the buffer list heads, so to manipulate those,
44  * we need both the bo->mutex and the dev->struct_mutex.
45  *
46  * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47  * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48  * the list traversal will, in general, need to be restarted.
49  *
50  */
51
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
56
57 static inline uint64_t drm_bo_type_flags(unsigned type)
58 {
59         return (1ULL << (24 + type));
60 }
61
62 /*
63  * bo locked. dev->struct_mutex locked.
64  */
65
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
67 {
68         struct drm_mem_type_manager *man;
69
70         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71         DRM_ASSERT_LOCKED(&bo->mutex);
72
73         man = &bo->dev->bm.man[bo->pinned_mem_type];
74         list_add_tail(&bo->pinned_lru, &man->pinned);
75 }
76
77 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
78 {
79         struct drm_mem_type_manager *man;
80
81         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
82
83         if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84             || bo->mem.mem_type != bo->pinned_mem_type) {
85                 man = &bo->dev->bm.man[bo->mem.mem_type];
86                 list_add_tail(&bo->lru, &man->lru);
87         } else {
88                 INIT_LIST_HEAD(&bo->lru);
89         }
90 }
91
92 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
93 {
94 #ifdef DRM_ODD_MM_COMPAT
95         int ret;
96
97         if (!bo->map_list.map)
98                 return 0;
99
100         ret = drm_bo_lock_kmm(bo);
101         if (ret)
102                 return ret;
103         drm_bo_unmap_virtual(bo);
104         if (old_is_pci)
105                 drm_bo_finish_unmap(bo);
106 #else
107         if (!bo->map_list.map)
108                 return 0;
109
110         drm_bo_unmap_virtual(bo);
111 #endif
112         return 0;
113 }
114
115 static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
116 {
117 #ifdef DRM_ODD_MM_COMPAT
118         int ret;
119
120         if (!bo->map_list.map)
121                 return;
122
123         ret = drm_bo_remap_bound(bo);
124         if (ret) {
125                 DRM_ERROR("Failed to remap a bound buffer object.\n"
126                           "\tThis might cause a sigbus later.\n");
127         }
128         drm_bo_unlock_kmm(bo);
129 #endif
130 }
131
132 /*
133  * Call bo->mutex locked.
134  */
135
136 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
137 {
138         struct drm_device *dev = bo->dev;
139         int ret = 0;
140
141         DRM_ASSERT_LOCKED(&bo->mutex);
142         bo->ttm = NULL;
143
144         switch (bo->type) {
145         case drm_bo_type_dc:
146         case drm_bo_type_kernel:
147                 bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
148                 if (!bo->ttm)
149                         ret = -ENOMEM;
150                 break;
151         case drm_bo_type_user:
152                 bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
153                 if (!bo->ttm)
154                         ret = -ENOMEM;
155
156                 ret = drm_ttm_set_user(bo->ttm, current,
157                                        bo->mem.mask & DRM_BO_FLAG_WRITE,
158                                        bo->buffer_start,
159                                        bo->num_pages,
160                                        dev->bm.dummy_read_page);
161                 if (ret)
162                         return ret;
163
164                 break;
165         default:
166                 DRM_ERROR("Illegal buffer object type\n");
167                 ret = -EINVAL;
168                 break;
169         }
170
171         return ret;
172 }
173
174 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
175                                   struct drm_bo_mem_reg *mem,
176                                   int evict, int no_wait)
177 {
178         struct drm_device *dev = bo->dev;
179         struct drm_buffer_manager *bm = &dev->bm;
180         int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
181         int new_is_pci = drm_mem_reg_is_pci(dev, mem);
182         struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
183         struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
184         int ret = 0;
185
186         if (old_is_pci || new_is_pci ||
187             ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
188                 ret = drm_bo_vm_pre_move(bo, old_is_pci);
189         if (ret)
190                 return ret;
191
192         /*
193          * Create and bind a ttm if required.
194          */
195
196         if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
197                 ret = drm_bo_add_ttm(bo);
198                 if (ret)
199                         goto out_err;
200
201                 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
202                         ret = drm_bind_ttm(bo->ttm, mem);
203                         if (ret)
204                                 goto out_err;
205                 }
206
207                 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
208
209                         struct drm_bo_mem_reg *old_mem = &bo->mem;
210                         uint64_t save_flags = old_mem->flags;
211                         uint64_t save_mask = old_mem->mask;
212                         
213                         *old_mem = *mem;
214                         mem->mm_node = NULL;
215                         old_mem->mask = save_mask;
216                         DRM_FLAG_MASKED(save_flags, mem->flags, 
217                                         DRM_BO_MASK_MEMTYPE);
218                         goto moved;
219                 }
220                 
221         }
222
223         if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
224                    !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
225
226                 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
227
228         } else if (dev->driver->bo_driver->move) {
229                 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
230
231         } else {
232
233                 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
234
235         }
236
237         if (ret)
238                 goto out_err;
239
240 moved:
241         if (old_is_pci || new_is_pci)
242                 drm_bo_vm_post_move(bo);
243
244         if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
245                 ret =
246                     dev->driver->bo_driver->invalidate_caches(dev,
247                                                               bo->mem.flags);
248                 if (ret)
249                         DRM_ERROR("Can not flush read caches\n");
250         }
251
252         DRM_FLAG_MASKED(bo->priv_flags,
253                         (evict) ? _DRM_BO_FLAG_EVICTED : 0,
254                         _DRM_BO_FLAG_EVICTED);
255
256         if (bo->mem.mm_node)
257                 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
258                         bm->man[bo->mem.mem_type].gpu_offset;
259
260
261         return 0;
262
263 out_err:
264         if (old_is_pci || new_is_pci)
265                 drm_bo_vm_post_move(bo);
266
267         new_man = &bm->man[bo->mem.mem_type];
268         if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
269                 drm_ttm_unbind(bo->ttm);
270                 drm_destroy_ttm(bo->ttm);
271                 bo->ttm = NULL;
272         }
273
274         return ret;
275 }
276
277 /*
278  * Call bo->mutex locked.
279  * Wait until the buffer is idle.
280  */
281
282 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
283                 int no_wait)
284 {
285         int ret;
286
287         DRM_ASSERT_LOCKED(&bo->mutex);
288
289         if (bo->fence) {
290                 if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
291                         drm_fence_usage_deref_unlocked(&bo->fence);
292                         return 0;
293                 }
294                 if (no_wait)
295                         return -EBUSY;
296
297                 ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
298                                           bo->fence_type);
299                 if (ret) {
300                         return ret;
301                 }
302
303                 drm_fence_usage_deref_unlocked(&bo->fence);
304         }
305         return 0;
306 }
307 EXPORT_SYMBOL(drm_bo_wait);
308
309 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
310 {
311         struct drm_device *dev = bo->dev;
312         struct drm_buffer_manager *bm = &dev->bm;
313
314         if (bo->fence) {
315                 if (bm->nice_mode) {
316                         unsigned long _end = jiffies + 3 * DRM_HZ;
317                         int ret;
318                         do {
319                                 ret = drm_bo_wait(bo, 0, 1, 0);
320                                 if (ret && allow_errors)
321                                         return ret;
322
323                         } while (ret && !time_after_eq(jiffies, _end));
324
325                         if (bo->fence) {
326                                 bm->nice_mode = 0;
327                                 DRM_ERROR("Detected GPU lockup or "
328                                           "fence driver was taken down. "
329                                           "Evicting buffer.\n");
330                         }
331                 }
332                 if (bo->fence)
333                         drm_fence_usage_deref_unlocked(&bo->fence);
334         }
335         return 0;
336 }
337
338 /*
339  * Call dev->struct_mutex locked.
340  * Attempts to remove all private references to a buffer by expiring its
341  * fence object and removing from lru lists and memory managers.
342  */
343
344 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
345 {
346         struct drm_device *dev = bo->dev;
347         struct drm_buffer_manager *bm = &dev->bm;
348
349         DRM_ASSERT_LOCKED(&dev->struct_mutex);
350
351         atomic_inc(&bo->usage);
352         mutex_unlock(&dev->struct_mutex);
353         mutex_lock(&bo->mutex);
354
355         DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
356
357         if (bo->fence && drm_fence_object_signaled(bo->fence,
358                                                    bo->fence_type))
359                 drm_fence_usage_deref_unlocked(&bo->fence);
360
361         if (bo->fence && remove_all)
362                 (void)drm_bo_expire_fence(bo, 0);
363
364         mutex_lock(&dev->struct_mutex);
365
366         if (!atomic_dec_and_test(&bo->usage))
367                 goto out;
368
369         if (!bo->fence) {
370                 list_del_init(&bo->lru);
371                 if (bo->mem.mm_node) {
372                         drm_mm_put_block(bo->mem.mm_node);
373                         if (bo->pinned_node == bo->mem.mm_node)
374                                 bo->pinned_node = NULL;
375                         bo->mem.mm_node = NULL;
376                 }
377                 list_del_init(&bo->pinned_lru);
378                 if (bo->pinned_node) {
379                         drm_mm_put_block(bo->pinned_node);
380                         bo->pinned_node = NULL;
381                 }
382                 list_del_init(&bo->ddestroy);
383                 mutex_unlock(&bo->mutex);
384                 drm_bo_destroy_locked(bo);
385                 return;
386         }
387
388         if (list_empty(&bo->ddestroy)) {
389                 drm_fence_object_flush(bo->fence, bo->fence_type);
390                 list_add_tail(&bo->ddestroy, &bm->ddestroy);
391                 schedule_delayed_work(&bm->wq,
392                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
393         }
394
395 out:
396         mutex_unlock(&bo->mutex);
397         return;
398 }
399
400 static void drm_bo_unreserve_size(unsigned long size)
401 {
402         drm_free_memctl(size);
403 }
404
405 /*
406  * Verify that refcount is 0 and that there are no internal references
407  * to the buffer object. Then destroy it.
408  */
409
410 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
411 {
412         struct drm_device *dev = bo->dev;
413         struct drm_buffer_manager *bm = &dev->bm;
414         unsigned long reserved_size;
415
416         DRM_ASSERT_LOCKED(&dev->struct_mutex);
417
418         if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
419             list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
420             list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
421                 if (bo->fence != NULL) {
422                         DRM_ERROR("Fence was non-zero.\n");
423                         drm_bo_cleanup_refs(bo, 0);
424                         return;
425                 }
426
427 #ifdef DRM_ODD_MM_COMPAT
428                 BUG_ON(!list_empty(&bo->vma_list));
429                 BUG_ON(!list_empty(&bo->p_mm_list));
430 #endif
431
432                 if (bo->ttm) {
433                         drm_ttm_unbind(bo->ttm);
434                         drm_destroy_ttm(bo->ttm);
435                         bo->ttm = NULL;
436                 }
437
438                 atomic_dec(&bm->count);
439
440                 reserved_size = bo->reserved_size;
441
442                 drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
443                 drm_bo_unreserve_size(reserved_size);
444
445                 return;
446         }
447
448         /*
449          * Some stuff is still trying to reference the buffer object.
450          * Get rid of those references.
451          */
452
453         drm_bo_cleanup_refs(bo, 0);
454
455         return;
456 }
457
458 /*
459  * Call dev->struct_mutex locked.
460  */
461
462 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
463 {
464         struct drm_buffer_manager *bm = &dev->bm;
465
466         struct drm_buffer_object *entry, *nentry;
467         struct list_head *list, *next;
468
469         list_for_each_safe(list, next, &bm->ddestroy) {
470                 entry = list_entry(list, struct drm_buffer_object, ddestroy);
471
472                 nentry = NULL;
473                 if (next != &bm->ddestroy) {
474                         nentry = list_entry(next, struct drm_buffer_object,
475                                             ddestroy);
476                         atomic_inc(&nentry->usage);
477                 }
478
479                 drm_bo_cleanup_refs(entry, remove_all);
480
481                 if (nentry)
482                         atomic_dec(&nentry->usage);
483         }
484 }
485
486 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
487 static void drm_bo_delayed_workqueue(void *data)
488 #else
489 static void drm_bo_delayed_workqueue(struct work_struct *work)
490 #endif
491 {
492 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
493         struct drm_device *dev = (struct drm_device *) data;
494         struct drm_buffer_manager *bm = &dev->bm;
495 #else
496         struct drm_buffer_manager *bm =
497             container_of(work, struct drm_buffer_manager, wq.work);
498         struct drm_device *dev = container_of(bm, struct drm_device, bm);
499 #endif
500
501         DRM_DEBUG("Delayed delete Worker\n");
502
503         mutex_lock(&dev->struct_mutex);
504         if (!bm->initialized) {
505                 mutex_unlock(&dev->struct_mutex);
506                 return;
507         }
508         drm_bo_delayed_delete(dev, 0);
509         if (bm->initialized && !list_empty(&bm->ddestroy)) {
510                 schedule_delayed_work(&bm->wq,
511                                       ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
512         }
513         mutex_unlock(&dev->struct_mutex);
514 }
515
516 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
517 {
518         struct drm_buffer_object *tmp_bo = *bo;
519         bo = NULL;
520
521         DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
522
523         if (atomic_dec_and_test(&tmp_bo->usage))
524                 drm_bo_destroy_locked(tmp_bo);
525 }
526 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
527
528 static void drm_bo_base_deref_locked(struct drm_file *file_priv,
529                                      struct drm_user_object *uo)
530 {
531         struct drm_buffer_object *bo =
532             drm_user_object_entry(uo, struct drm_buffer_object, base);
533
534         DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
535
536         drm_bo_takedown_vm_locked(bo);
537         drm_bo_usage_deref_locked(&bo);
538 }
539
540 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
541 {
542         struct drm_buffer_object *tmp_bo = *bo;
543         struct drm_device *dev = tmp_bo->dev;
544
545         *bo = NULL;
546         if (atomic_dec_and_test(&tmp_bo->usage)) {
547                 mutex_lock(&dev->struct_mutex);
548                 if (atomic_read(&tmp_bo->usage) == 0)
549                         drm_bo_destroy_locked(tmp_bo);
550                 mutex_unlock(&dev->struct_mutex);
551         }
552 }
553 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
554
555 void drm_putback_buffer_objects(struct drm_device *dev)
556 {
557         struct drm_buffer_manager *bm = &dev->bm;
558         struct list_head *list = &bm->unfenced;
559         struct drm_buffer_object *entry, *next;
560
561         mutex_lock(&dev->struct_mutex);
562         list_for_each_entry_safe(entry, next, list, lru) {
563                 atomic_inc(&entry->usage);
564                 mutex_unlock(&dev->struct_mutex);
565
566                 mutex_lock(&entry->mutex);
567                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
568                 mutex_lock(&dev->struct_mutex);
569
570                 list_del_init(&entry->lru);
571                 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
572                 wake_up_all(&entry->event_queue);
573
574                 /*
575                  * FIXME: Might want to put back on head of list
576                  * instead of tail here.
577                  */
578
579                 drm_bo_add_to_lru(entry);
580                 mutex_unlock(&entry->mutex);
581                 drm_bo_usage_deref_locked(&entry);
582         }
583         mutex_unlock(&dev->struct_mutex);
584 }
585 EXPORT_SYMBOL(drm_putback_buffer_objects);
586
587
588 /*
589  * Note. The caller has to register (if applicable)
590  * and deregister fence object usage.
591  */
592
593 int drm_fence_buffer_objects(struct drm_device *dev,
594                              struct list_head *list,
595                              uint32_t fence_flags,
596                              struct drm_fence_object *fence,
597                              struct drm_fence_object **used_fence)
598 {
599         struct drm_buffer_manager *bm = &dev->bm;
600         struct drm_buffer_object *entry;
601         uint32_t fence_type = 0;
602         uint32_t fence_class = ~0;
603         int count = 0;
604         int ret = 0;
605         struct list_head *l;
606
607         mutex_lock(&dev->struct_mutex);
608
609         if (!list)
610                 list = &bm->unfenced;
611
612         if (fence)
613                 fence_class = fence->fence_class;
614
615         list_for_each_entry(entry, list, lru) {
616                 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
617                 fence_type |= entry->new_fence_type;
618                 if (fence_class == ~0)
619                         fence_class = entry->new_fence_class;
620                 else if (entry->new_fence_class != fence_class) {
621                         DRM_ERROR("Unmatching fence classes on unfenced list: "
622                                   "%d and %d.\n",
623                                   fence_class,
624                                   entry->new_fence_class);
625                         ret = -EINVAL;
626                         goto out;
627                 }
628                 count++;
629         }
630
631         if (!count) {
632                 ret = -EINVAL;
633                 goto out;
634         }
635
636         if (fence) {
637                 if ((fence_type & fence->type) != fence_type ||
638                     (fence->fence_class != fence_class)) {
639                         DRM_ERROR("Given fence doesn't match buffers "
640                                   "on unfenced list.\n");
641                         ret = -EINVAL;
642                         goto out;
643                 }
644         } else {
645                 mutex_unlock(&dev->struct_mutex);
646                 ret = drm_fence_object_create(dev, fence_class, fence_type,
647                                               fence_flags | DRM_FENCE_FLAG_EMIT,
648                                               &fence);
649                 mutex_lock(&dev->struct_mutex);
650                 if (ret)
651                         goto out;
652         }
653
654         count = 0;
655         l = list->next;
656         while (l != list) {
657                 prefetch(l->next);
658                 entry = list_entry(l, struct drm_buffer_object, lru);
659                 atomic_inc(&entry->usage);
660                 mutex_unlock(&dev->struct_mutex);
661                 mutex_lock(&entry->mutex);
662                 mutex_lock(&dev->struct_mutex);
663                 list_del_init(l);
664                 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
665                         count++;
666                         if (entry->fence)
667                                 drm_fence_usage_deref_locked(&entry->fence);
668                         entry->fence = drm_fence_reference_locked(fence);
669                         entry->fence_class = entry->new_fence_class;
670                         entry->fence_type = entry->new_fence_type;
671                         DRM_FLAG_MASKED(entry->priv_flags, 0,
672                                         _DRM_BO_FLAG_UNFENCED);
673                         wake_up_all(&entry->event_queue);
674                         drm_bo_add_to_lru(entry);
675                 }
676                 mutex_unlock(&entry->mutex);
677                 drm_bo_usage_deref_locked(&entry);
678                 l = list->next;
679         }
680         DRM_DEBUG("Fenced %d buffers\n", count);
681 out:
682         mutex_unlock(&dev->struct_mutex);
683         *used_fence = fence;
684         return ret;
685 }
686 EXPORT_SYMBOL(drm_fence_buffer_objects);
687
688 /*
689  * bo->mutex locked
690  */
691
692 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
693                         int no_wait)
694 {
695         int ret = 0;
696         struct drm_device *dev = bo->dev;
697         struct drm_bo_mem_reg evict_mem;
698
699         /*
700          * Someone might have modified the buffer before we took the
701          * buffer mutex.
702          */
703
704         if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
705                 goto out;
706         if (bo->mem.mem_type != mem_type)
707                 goto out;
708
709         ret = drm_bo_wait(bo, 0, 0, no_wait);
710
711         if (ret && ret != -EAGAIN) {
712                 DRM_ERROR("Failed to expire fence before "
713                           "buffer eviction.\n");
714                 goto out;
715         }
716
717         evict_mem = bo->mem;
718         evict_mem.mm_node = NULL;
719
720         evict_mem = bo->mem;
721         evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
722         ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
723
724         if (ret) {
725                 if (ret != -EAGAIN)
726                         DRM_ERROR("Failed to find memory space for "
727                                   "buffer 0x%p eviction.\n", bo);
728                 goto out;
729         }
730
731         ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
732
733         if (ret) {
734                 if (ret != -EAGAIN)
735                         DRM_ERROR("Buffer eviction failed\n");
736                 goto out;
737         }
738
739         mutex_lock(&dev->struct_mutex);
740         if (evict_mem.mm_node) {
741                 if (evict_mem.mm_node != bo->pinned_node)
742                         drm_mm_put_block(evict_mem.mm_node);
743                 evict_mem.mm_node = NULL;
744         }
745         list_del(&bo->lru);
746         drm_bo_add_to_lru(bo);
747         mutex_unlock(&dev->struct_mutex);
748
749         DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
750                         _DRM_BO_FLAG_EVICTED);
751
752 out:
753         return ret;
754 }
755
756 /**
757  * Repeatedly evict memory from the LRU for @mem_type until we create enough
758  * space, or we've evicted everything and there isn't enough space.
759  */
760 static int drm_bo_mem_force_space(struct drm_device *dev,
761                                   struct drm_bo_mem_reg *mem,
762                                   uint32_t mem_type, int no_wait)
763 {
764         struct drm_mm_node *node;
765         struct drm_buffer_manager *bm = &dev->bm;
766         struct drm_buffer_object *entry;
767         struct drm_mem_type_manager *man = &bm->man[mem_type];
768         struct list_head *lru;
769         unsigned long num_pages = mem->num_pages;
770         int ret;
771
772         mutex_lock(&dev->struct_mutex);
773         do {
774                 node = drm_mm_search_free(&man->manager, num_pages,
775                                           mem->page_alignment, 1);
776                 if (node)
777                         break;
778
779                 lru = &man->lru;
780                 if (lru->next == lru)
781                         break;
782
783                 entry = list_entry(lru->next, struct drm_buffer_object, lru);
784                 atomic_inc(&entry->usage);
785                 mutex_unlock(&dev->struct_mutex);
786                 mutex_lock(&entry->mutex);
787                 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
788
789                 ret = drm_bo_evict(entry, mem_type, no_wait);
790                 mutex_unlock(&entry->mutex);
791                 drm_bo_usage_deref_unlocked(&entry);
792                 if (ret)
793                         return ret;
794                 mutex_lock(&dev->struct_mutex);
795         } while (1);
796
797         if (!node) {
798                 mutex_unlock(&dev->struct_mutex);
799                 return -ENOMEM;
800         }
801
802         node = drm_mm_get_block(node, num_pages, mem->page_alignment);
803         if (!node) {
804                 mutex_unlock(&dev->struct_mutex);
805                 return -ENOMEM;
806         }
807
808         mutex_unlock(&dev->struct_mutex);
809         mem->mm_node = node;
810         mem->mem_type = mem_type;
811         return 0;
812 }
813
814 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
815                                 int disallow_fixed,
816                                 uint32_t mem_type,
817                                 uint64_t mask, uint32_t *res_mask)
818 {
819         uint64_t cur_flags = drm_bo_type_flags(mem_type);
820         uint64_t flag_diff;
821
822         if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
823                 return 0;
824         if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
825                 cur_flags |= DRM_BO_FLAG_CACHED;
826         if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
827                 cur_flags |= DRM_BO_FLAG_MAPPABLE;
828         if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
829                 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
830
831         if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
832                 return 0;
833
834         if (mem_type == DRM_BO_MEM_LOCAL) {
835                 *res_mask = cur_flags;
836                 return 1;
837         }
838
839         flag_diff = (mask ^ cur_flags);
840         if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
841                 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
842
843         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
844             (!(mask & DRM_BO_FLAG_CACHED) ||
845              (mask & DRM_BO_FLAG_FORCE_CACHING)))
846                 return 0;
847
848         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
849             ((mask & DRM_BO_FLAG_MAPPABLE) ||
850              (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
851                 return 0;
852
853         *res_mask = cur_flags;
854         return 1;
855 }
856
857 /**
858  * Creates space for memory region @mem according to its type.
859  *
860  * This function first searches for free space in compatible memory types in
861  * the priority order defined by the driver.  If free space isn't found, then
862  * drm_bo_mem_force_space is attempted in priority order to evict and find
863  * space.
864  */
865 int drm_bo_mem_space(struct drm_buffer_object *bo,
866                      struct drm_bo_mem_reg *mem, int no_wait)
867 {
868         struct drm_device *dev = bo->dev;
869         struct drm_buffer_manager *bm = &dev->bm;
870         struct drm_mem_type_manager *man;
871
872         uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
873         const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
874         uint32_t i;
875         uint32_t mem_type = DRM_BO_MEM_LOCAL;
876         uint32_t cur_flags;
877         int type_found = 0;
878         int type_ok = 0;
879         int has_eagain = 0;
880         struct drm_mm_node *node = NULL;
881         int ret;
882
883         mem->mm_node = NULL;
884         for (i = 0; i < num_prios; ++i) {
885                 mem_type = prios[i];
886                 man = &bm->man[mem_type];
887
888                 type_ok = drm_bo_mt_compatible(man,
889                                                bo->type == drm_bo_type_user,
890                                                mem_type, mem->mask,
891                                                &cur_flags);
892
893                 if (!type_ok)
894                         continue;
895
896                 if (mem_type == DRM_BO_MEM_LOCAL)
897                         break;
898
899                 if ((mem_type == bo->pinned_mem_type) &&
900                     (bo->pinned_node != NULL)) {
901                         node = bo->pinned_node;
902                         break;
903                 }
904
905                 mutex_lock(&dev->struct_mutex);
906                 if (man->has_type && man->use_type) {
907                         type_found = 1;
908                         node = drm_mm_search_free(&man->manager, mem->num_pages,
909                                                   mem->page_alignment, 1);
910                         if (node)
911                                 node = drm_mm_get_block(node, mem->num_pages,
912                                                         mem->page_alignment);
913                 }
914                 mutex_unlock(&dev->struct_mutex);
915                 if (node)
916                         break;
917         }
918
919         if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
920                 mem->mm_node = node;
921                 mem->mem_type = mem_type;
922                 mem->flags = cur_flags;
923                 return 0;
924         }
925
926         if (!type_found)
927                 return -EINVAL;
928
929         num_prios = dev->driver->bo_driver->num_mem_busy_prio;
930         prios = dev->driver->bo_driver->mem_busy_prio;
931
932         for (i = 0; i < num_prios; ++i) {
933                 mem_type = prios[i];
934                 man = &bm->man[mem_type];
935
936                 if (!man->has_type)
937                         continue;
938
939                 if (!drm_bo_mt_compatible(man,
940                                           bo->type == drm_bo_type_user,
941                                           mem_type,
942                                           mem->mask,
943                                           &cur_flags))
944                         continue;
945
946                 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
947
948                 if (ret == 0 && mem->mm_node) {
949                         mem->flags = cur_flags;
950                         return 0;
951                 }
952
953                 if (ret == -EAGAIN)
954                         has_eagain = 1;
955         }
956
957         ret = (has_eagain) ? -EAGAIN : -ENOMEM;
958         return ret;
959 }
960 EXPORT_SYMBOL(drm_bo_mem_space);
961
962 static int drm_bo_new_mask(struct drm_buffer_object *bo,
963                            uint64_t new_flags, uint64_t used_mask)
964 {
965         uint32_t new_props;
966
967         if (bo->type == drm_bo_type_user &&
968             ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
969              (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
970                 DRM_ERROR("User buffers require cache-coherent memory.\n");
971                 return -EINVAL;
972         }
973
974         if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
975                 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
976                 return -EPERM;
977         }
978         
979         if (likely(used_mask & DRM_BO_MASK_MEM) && 
980             (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
981             !DRM_SUSER(DRM_CURPROC)) {
982                 if (likely(bo->mem.flags & new_flags & used_mask & 
983                            DRM_BO_MASK_MEM)) 
984                         new_flags = (new_flags & ~DRM_BO_MASK_MEM) | 
985                                 (bo->mem.flags & DRM_BO_MASK_MEM);
986                 else {
987                         DRM_ERROR("Incompatible memory type specification "
988                                   "for NO_EVICT buffer.\n");
989                         return -EPERM;
990                 }
991         }
992
993         if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
994                 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
995                 return -EPERM;
996         }
997
998         new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
999                                  DRM_BO_FLAG_READ);
1000
1001         if (!new_props) {
1002                 DRM_ERROR("Invalid buffer object rwx properties\n");
1003                 return -EINVAL;
1004         }
1005
1006         bo->mem.mask = new_flags;
1007         return 0;
1008 }
1009
1010 /*
1011  * Call dev->struct_mutex locked.
1012  */
1013
1014 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
1015                                               uint32_t handle, int check_owner)
1016 {
1017         struct drm_user_object *uo;
1018         struct drm_buffer_object *bo;
1019
1020         uo = drm_lookup_user_object(file_priv, handle);
1021
1022         if (!uo || (uo->type != drm_buffer_type)) {
1023                 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1024                 return NULL;
1025         }
1026
1027         if (check_owner && file_priv != uo->owner) {
1028                 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1029                         return NULL;
1030         }
1031
1032         bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1033         atomic_inc(&bo->usage);
1034         return bo;
1035 }
1036 EXPORT_SYMBOL(drm_lookup_buffer_object);
1037
1038 /*
1039  * Call bo->mutex locked.
1040  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1041  * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1042  */
1043
1044 static int drm_bo_quick_busy(struct drm_buffer_object *bo)
1045 {
1046         struct drm_fence_object *fence = bo->fence;
1047
1048         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1049         if (fence) {
1050                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1051                         drm_fence_usage_deref_unlocked(&bo->fence);
1052                         return 0;
1053                 }
1054                 return 1;
1055         }
1056         return 0;
1057 }
1058
1059 /*
1060  * Call bo->mutex locked.
1061  * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1062  */
1063
1064 static int drm_bo_busy(struct drm_buffer_object *bo)
1065 {
1066         struct drm_fence_object *fence = bo->fence;
1067
1068         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1069         if (fence) {
1070                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1071                         drm_fence_usage_deref_unlocked(&bo->fence);
1072                         return 0;
1073                 }
1074                 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1075                 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1076                         drm_fence_usage_deref_unlocked(&bo->fence);
1077                         return 0;
1078                 }
1079                 return 1;
1080         }
1081         return 0;
1082 }
1083
1084 static int drm_bo_evict_cached(struct drm_buffer_object *bo)
1085 {
1086         int ret = 0;
1087
1088         BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1089         if (bo->mem.mm_node)
1090                 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1091         return ret;
1092 }
1093
1094 /*
1095  * Wait until a buffer is unmapped.
1096  */
1097
1098 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1099 {
1100         int ret = 0;
1101
1102         if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1103                 return -EBUSY;
1104
1105         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1106                     atomic_read(&bo->mapped) == -1);
1107
1108         if (ret == -EINTR)
1109                 ret = -EAGAIN;
1110
1111         return ret;
1112 }
1113
1114 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
1115 {
1116         int ret;
1117
1118         mutex_lock(&bo->mutex);
1119         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1120         mutex_unlock(&bo->mutex);
1121         return ret;
1122 }
1123
1124 /*
1125  * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1126  * Until then, we cannot really do anything with it except delete it.
1127  */
1128
1129 static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
1130                                 int eagain_if_wait)
1131 {
1132         int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1133
1134         if (ret && no_wait)
1135                 return -EBUSY;
1136         else if (!ret)
1137                 return 0;
1138
1139         ret = 0;
1140         mutex_unlock(&bo->mutex);
1141         DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1142                     !drm_bo_check_unfenced(bo));
1143         mutex_lock(&bo->mutex);
1144         if (ret == -EINTR)
1145                 return -EAGAIN;
1146         ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1147         if (ret) {
1148                 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1149                 return -EBUSY;
1150         }
1151         if (eagain_if_wait)
1152                 return -EAGAIN;
1153
1154         return 0;
1155 }
1156
1157 /*
1158  * Fill in the ioctl reply argument with buffer info.
1159  * Bo locked.
1160  */
1161
1162 void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
1163                          struct drm_bo_info_rep *rep)
1164 {
1165         if (!rep)
1166                 return;
1167
1168         rep->handle = bo->base.hash.key;
1169         rep->flags = bo->mem.flags;
1170         rep->size = bo->num_pages * PAGE_SIZE;
1171         rep->offset = bo->offset;
1172
1173         if (bo->type == drm_bo_type_dc)
1174                 rep->arg_handle = bo->map_list.user_token;
1175         else
1176                 rep->arg_handle = 0;
1177
1178         rep->mask = bo->mem.mask;
1179         rep->buffer_start = bo->buffer_start;
1180         rep->fence_flags = bo->fence_type;
1181         rep->rep_flags = 0;
1182         rep->page_alignment = bo->mem.page_alignment;
1183
1184         if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1185                 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1186                                 DRM_BO_REP_BUSY);
1187         }
1188 }
1189 EXPORT_SYMBOL(drm_bo_fill_rep_arg);
1190
1191 /*
1192  * Wait for buffer idle and register that we've mapped the buffer.
1193  * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1194  * so that if the client dies, the mapping is automatically
1195  * unregistered.
1196  */
1197
1198 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1199                                  uint32_t map_flags, unsigned hint,
1200                                  struct drm_bo_info_rep *rep)
1201 {
1202         struct drm_buffer_object *bo;
1203         struct drm_device *dev = file_priv->head->dev;
1204         int ret = 0;
1205         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1206
1207         mutex_lock(&dev->struct_mutex);
1208         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1209         mutex_unlock(&dev->struct_mutex);
1210
1211         if (!bo)
1212                 return -EINVAL;
1213
1214         mutex_lock(&bo->mutex);
1215         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1216         if (ret)
1217                 goto out;
1218
1219         /*
1220          * If this returns true, we are currently unmapped.
1221          * We need to do this test, because unmapping can
1222          * be done without the bo->mutex held.
1223          */
1224
1225         while (1) {
1226                 if (atomic_inc_and_test(&bo->mapped)) {
1227                         if (no_wait && drm_bo_busy(bo)) {
1228                                 atomic_dec(&bo->mapped);
1229                                 ret = -EBUSY;
1230                                 goto out;
1231                         }
1232
1233                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1234                         if (ret) {
1235                                 atomic_dec(&bo->mapped);
1236                                 goto out;
1237                         }
1238
1239                         if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
1240                                 drm_bo_evict_cached(bo);
1241
1242                         break;
1243                 } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
1244
1245                         /*
1246                          * We are already mapped with different flags.
1247                          * need to wait for unmap.
1248                          */
1249
1250                         ret = drm_bo_wait_unmapped(bo, no_wait);
1251                         if (ret)
1252                                 goto out;
1253
1254                         continue;
1255                 }
1256                 break;
1257         }
1258
1259         mutex_lock(&dev->struct_mutex);
1260         ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1261         mutex_unlock(&dev->struct_mutex);
1262         if (ret) {
1263                 if (atomic_add_negative(-1, &bo->mapped))
1264                         wake_up_all(&bo->event_queue);
1265
1266         } else
1267                 drm_bo_fill_rep_arg(bo, rep);
1268 out:
1269         mutex_unlock(&bo->mutex);
1270         drm_bo_usage_deref_unlocked(&bo);
1271         return ret;
1272 }
1273
1274 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1275 {
1276         struct drm_device *dev = file_priv->head->dev;
1277         struct drm_buffer_object *bo;
1278         struct drm_ref_object *ro;
1279         int ret = 0;
1280
1281         mutex_lock(&dev->struct_mutex);
1282
1283         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1284         if (!bo) {
1285                 ret = -EINVAL;
1286                 goto out;
1287         }
1288
1289         ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1290         if (!ro) {
1291                 ret = -EINVAL;
1292                 goto out;
1293         }
1294
1295         drm_remove_ref_object(file_priv, ro);
1296         drm_bo_usage_deref_locked(&bo);
1297 out:
1298         mutex_unlock(&dev->struct_mutex);
1299         return ret;
1300 }
1301
1302 /*
1303  * Call struct-sem locked.
1304  */
1305
1306 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1307                                          struct drm_user_object *uo,
1308                                          enum drm_ref_type action)
1309 {
1310         struct drm_buffer_object *bo =
1311             drm_user_object_entry(uo, struct drm_buffer_object, base);
1312
1313         /*
1314          * We DON'T want to take the bo->lock here, because we want to
1315          * hold it when we wait for unmapped buffer.
1316          */
1317
1318         BUG_ON(action != _DRM_REF_TYPE1);
1319
1320         if (atomic_add_negative(-1, &bo->mapped))
1321                 wake_up_all(&bo->event_queue);
1322 }
1323
1324 /*
1325  * bo->mutex locked.
1326  * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1327  */
1328
1329 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1330                        int no_wait, int move_unfenced)
1331 {
1332         struct drm_device *dev = bo->dev;
1333         struct drm_buffer_manager *bm = &dev->bm;
1334         int ret = 0;
1335         struct drm_bo_mem_reg mem;
1336         /*
1337          * Flush outstanding fences.
1338          */
1339
1340         drm_bo_busy(bo);
1341
1342         /*
1343          * Wait for outstanding fences.
1344          */
1345
1346         ret = drm_bo_wait(bo, 0, 0, no_wait);
1347         if (ret)
1348                 return ret;
1349
1350         mem.num_pages = bo->num_pages;
1351         mem.size = mem.num_pages << PAGE_SHIFT;
1352         mem.mask = new_mem_flags;
1353         mem.page_alignment = bo->mem.page_alignment;
1354
1355         mutex_lock(&bm->evict_mutex);
1356         mutex_lock(&dev->struct_mutex);
1357         list_del_init(&bo->lru);
1358         mutex_unlock(&dev->struct_mutex);
1359
1360         /*
1361          * Determine where to move the buffer.
1362          */
1363         ret = drm_bo_mem_space(bo, &mem, no_wait);
1364         if (ret)
1365                 goto out_unlock;
1366
1367         ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1368
1369 out_unlock:
1370         mutex_lock(&dev->struct_mutex);
1371         if (ret || !move_unfenced) {
1372                 if (mem.mm_node) {
1373                         if (mem.mm_node != bo->pinned_node)
1374                                 drm_mm_put_block(mem.mm_node);
1375                         mem.mm_node = NULL;
1376                 }
1377                 drm_bo_add_to_lru(bo);
1378                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1379                         wake_up_all(&bo->event_queue);
1380                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1381                                         _DRM_BO_FLAG_UNFENCED);
1382                 }
1383         } else {
1384                 list_add_tail(&bo->lru, &bm->unfenced);
1385                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1386                                 _DRM_BO_FLAG_UNFENCED);
1387         }
1388         mutex_unlock(&dev->struct_mutex);
1389         mutex_unlock(&bm->evict_mutex);
1390         return ret;
1391 }
1392
1393 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1394 {
1395         uint32_t flag_diff = (mem->mask ^ mem->flags);
1396
1397         if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1398                 return 0;
1399         if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1400             (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
1401              (mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
1402                 return 0;
1403
1404         if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1405             ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1406              (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1407                 return 0;
1408         return 1;
1409 }
1410
1411 /*
1412  * bo locked.
1413  */
1414
1415 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1416                                       uint32_t fence_class,
1417                                       int move_unfenced, int no_wait)
1418 {
1419         struct drm_device *dev = bo->dev;
1420         struct drm_buffer_manager *bm = &dev->bm;
1421         struct drm_bo_driver *driver = dev->driver->bo_driver;
1422         uint32_t ftype;
1423         int ret;
1424
1425         DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1426                   (unsigned long long) bo->mem.mask,
1427                   (unsigned long long) bo->mem.flags);
1428
1429         ret = driver->fence_type(bo, &fence_class, &ftype);
1430
1431         if (ret) {
1432                 DRM_ERROR("Driver did not support given buffer permissions\n");
1433                 return ret;
1434         }
1435
1436         /*
1437          * We're switching command submission mechanism,
1438          * or cannot simply rely on the hardware serializing for us.
1439          *
1440          * Insert a driver-dependant barrier or wait for buffer idle.
1441          */
1442
1443         if ((fence_class != bo->fence_class) ||
1444             ((ftype ^ bo->fence_type) & bo->fence_type)) {
1445
1446                 ret = -EINVAL;
1447                 if (driver->command_stream_barrier) {
1448                         ret = driver->command_stream_barrier(bo,
1449                                                              fence_class,
1450                                                              ftype,
1451                                                              no_wait);
1452                 }
1453                 if (ret)
1454                         ret = drm_bo_wait(bo, 0, 0, no_wait);
1455
1456                 if (ret)
1457                         return ret;
1458
1459         }
1460
1461         bo->new_fence_class = fence_class;
1462         bo->new_fence_type = ftype;
1463
1464         ret = drm_bo_wait_unmapped(bo, no_wait);
1465         if (ret) {
1466                 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1467                 return ret;
1468         }
1469
1470         /*
1471          * Check whether we need to move buffer.
1472          */
1473
1474         if (!drm_bo_mem_compat(&bo->mem)) {
1475                 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1476                                          move_unfenced);
1477                 if (ret) {
1478                         if (ret != -EAGAIN)
1479                                 DRM_ERROR("Failed moving buffer.\n");
1480                         if (ret == -ENOMEM)
1481                                 DRM_ERROR("Out of aperture space.\n");
1482                         return ret;
1483                 }
1484         }
1485
1486         /*
1487          * Pinned buffers.
1488          */
1489
1490         if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1491                 bo->pinned_mem_type = bo->mem.mem_type;
1492                 mutex_lock(&dev->struct_mutex);
1493                 list_del_init(&bo->pinned_lru);
1494                 drm_bo_add_to_pinned_lru(bo);
1495
1496                 if (bo->pinned_node != bo->mem.mm_node) {
1497                         if (bo->pinned_node != NULL)
1498                                 drm_mm_put_block(bo->pinned_node);
1499                         bo->pinned_node = bo->mem.mm_node;
1500                 }
1501
1502                 mutex_unlock(&dev->struct_mutex);
1503
1504         } else if (bo->pinned_node != NULL) {
1505
1506                 mutex_lock(&dev->struct_mutex);
1507
1508                 if (bo->pinned_node != bo->mem.mm_node)
1509                         drm_mm_put_block(bo->pinned_node);
1510
1511                 list_del_init(&bo->pinned_lru);
1512                 bo->pinned_node = NULL;
1513                 mutex_unlock(&dev->struct_mutex);
1514
1515         }
1516
1517         /*
1518          * We might need to add a TTM.
1519          */
1520
1521         if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1522                 ret = drm_bo_add_ttm(bo);
1523                 if (ret)
1524                         return ret;
1525         }
1526         DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1527
1528         /*
1529          * Finally, adjust lru to be sure.
1530          */
1531
1532         mutex_lock(&dev->struct_mutex);
1533         list_del(&bo->lru);
1534         if (move_unfenced) {
1535                 list_add_tail(&bo->lru, &bm->unfenced);
1536                 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1537                                 _DRM_BO_FLAG_UNFENCED);
1538         } else {
1539                 drm_bo_add_to_lru(bo);
1540                 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1541                         wake_up_all(&bo->event_queue);
1542                         DRM_FLAG_MASKED(bo->priv_flags, 0,
1543                                         _DRM_BO_FLAG_UNFENCED);
1544                 }
1545         }
1546         mutex_unlock(&dev->struct_mutex);
1547
1548         return 0;
1549 }
1550
1551 int drm_bo_do_validate(struct drm_buffer_object *bo,
1552                        uint64_t flags, uint64_t mask, uint32_t hint,
1553                        uint32_t fence_class,
1554                        int no_wait,
1555                        struct drm_bo_info_rep *rep)
1556 {
1557         int ret;
1558
1559         mutex_lock(&bo->mutex);
1560         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1561
1562         if (ret)
1563                 goto out;
1564
1565         DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1566         ret = drm_bo_new_mask(bo, flags, mask);
1567         if (ret)
1568                 goto out;
1569
1570         ret = drm_buffer_object_validate(bo,
1571                                          fence_class,
1572                                          !(hint & DRM_BO_HINT_DONT_FENCE),
1573                                          no_wait);
1574 out:
1575         if (rep)
1576                 drm_bo_fill_rep_arg(bo, rep);
1577
1578         mutex_unlock(&bo->mutex);
1579         return ret;
1580 }
1581 EXPORT_SYMBOL(drm_bo_do_validate);
1582
1583
1584 int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
1585                            uint32_t fence_class,
1586                            uint64_t flags, uint64_t mask,
1587                            uint32_t hint,
1588                            int use_old_fence_class,
1589                            struct drm_bo_info_rep *rep,
1590                            struct drm_buffer_object **bo_rep)
1591 {
1592         struct drm_device *dev = file_priv->head->dev;
1593         struct drm_buffer_object *bo;
1594         int ret;
1595         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1596
1597         mutex_lock(&dev->struct_mutex);
1598         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1599         mutex_unlock(&dev->struct_mutex);
1600
1601         if (!bo)
1602                 return -EINVAL;
1603
1604         if (use_old_fence_class)
1605                 fence_class = bo->fence_class;
1606
1607         /*
1608          * Only allow creator to change shared buffer mask.
1609          */
1610
1611         if (bo->base.owner != file_priv)
1612                 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1613
1614
1615         ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
1616                                  no_wait, rep);
1617
1618         if (!ret && bo_rep)
1619                 *bo_rep = bo;
1620         else
1621                 drm_bo_usage_deref_unlocked(&bo);
1622
1623         return ret;
1624 }
1625 EXPORT_SYMBOL(drm_bo_handle_validate);
1626
1627 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1628                               struct drm_bo_info_rep *rep)
1629 {
1630         struct drm_device *dev = file_priv->head->dev;
1631         struct drm_buffer_object *bo;
1632
1633         mutex_lock(&dev->struct_mutex);
1634         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1635         mutex_unlock(&dev->struct_mutex);
1636
1637         if (!bo)
1638                 return -EINVAL;
1639
1640         mutex_lock(&bo->mutex);
1641         if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1642                 (void)drm_bo_busy(bo);
1643         drm_bo_fill_rep_arg(bo, rep);
1644         mutex_unlock(&bo->mutex);
1645         drm_bo_usage_deref_unlocked(&bo);
1646         return 0;
1647 }
1648
1649 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1650                               uint32_t hint,
1651                               struct drm_bo_info_rep *rep)
1652 {
1653         struct drm_device *dev = file_priv->head->dev;
1654         struct drm_buffer_object *bo;
1655         int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1656         int ret;
1657
1658         mutex_lock(&dev->struct_mutex);
1659         bo = drm_lookup_buffer_object(file_priv, handle, 1);
1660         mutex_unlock(&dev->struct_mutex);
1661
1662         if (!bo)
1663                 return -EINVAL;
1664
1665         mutex_lock(&bo->mutex);
1666         ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1667         if (ret)
1668                 goto out;
1669         ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1670         if (ret)
1671                 goto out;
1672
1673         drm_bo_fill_rep_arg(bo, rep);
1674
1675 out:
1676         mutex_unlock(&bo->mutex);
1677         drm_bo_usage_deref_unlocked(&bo);
1678         return ret;
1679 }
1680
1681 static int drm_bo_reserve_size(struct drm_device *dev,
1682                                int user_bo,
1683                                unsigned long num_pages,
1684                                unsigned long *size)
1685 {
1686         struct drm_bo_driver *driver = dev->driver->bo_driver;
1687
1688         *size = drm_size_align(sizeof(struct drm_buffer_object)) +
1689                 /* Always account for a TTM, even for fixed memory types */
1690                 drm_ttm_size(dev, num_pages, user_bo) +
1691                 /* user space mapping structure */
1692                 drm_size_align(sizeof(drm_local_map_t)) +
1693                 /* file offset space, aperture space, pinned space */
1694                 3*drm_size_align(sizeof(struct drm_mm_node *)) +
1695                 /* ttm backend */
1696                 driver->backend_size(dev, num_pages);
1697
1698         return drm_alloc_memctl(*size);
1699 }
1700
1701 int drm_buffer_object_create(struct drm_device *dev,
1702                              unsigned long size,
1703                              enum drm_bo_type type,
1704                              uint64_t mask,
1705                              uint32_t hint,
1706                              uint32_t page_alignment,
1707                              unsigned long buffer_start,
1708                              struct drm_buffer_object **buf_obj)
1709 {
1710         struct drm_buffer_manager *bm = &dev->bm;
1711         struct drm_buffer_object *bo;
1712         int ret = 0;
1713         unsigned long num_pages;
1714         unsigned long reserved_size;
1715
1716         size += buffer_start & ~PAGE_MASK;
1717         num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1718         if (num_pages == 0) {
1719                 DRM_ERROR("Illegal buffer object size.\n");
1720                 return -EINVAL;
1721         }
1722
1723         ret = drm_bo_reserve_size(dev, type == drm_bo_type_user,
1724                                   num_pages, &reserved_size);
1725
1726         if (ret) {
1727                 DRM_DEBUG("Failed reserving space for buffer object.\n");
1728                 return ret;
1729         }
1730
1731         bo = drm_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1732
1733         if (!bo) {
1734                 drm_bo_unreserve_size(num_pages);
1735                 return -ENOMEM;
1736         }
1737
1738         mutex_init(&bo->mutex);
1739         mutex_lock(&bo->mutex);
1740
1741         bo->reserved_size = reserved_size;
1742         atomic_set(&bo->usage, 1);
1743         atomic_set(&bo->mapped, -1);
1744         DRM_INIT_WAITQUEUE(&bo->event_queue);
1745         INIT_LIST_HEAD(&bo->lru);
1746         INIT_LIST_HEAD(&bo->pinned_lru);
1747         INIT_LIST_HEAD(&bo->ddestroy);
1748 #ifdef DRM_ODD_MM_COMPAT
1749         INIT_LIST_HEAD(&bo->p_mm_list);
1750         INIT_LIST_HEAD(&bo->vma_list);
1751 #endif
1752         bo->dev = dev;
1753         bo->type = type;
1754         bo->num_pages = num_pages;
1755         bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1756         bo->mem.num_pages = bo->num_pages;
1757         bo->mem.mm_node = NULL;
1758         bo->mem.page_alignment = page_alignment;
1759         bo->buffer_start = buffer_start & PAGE_MASK;
1760         bo->priv_flags = 0;
1761         bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1762                 DRM_BO_FLAG_MAPPABLE;
1763         bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1764                 DRM_BO_FLAG_MAPPABLE;
1765         atomic_inc(&bm->count);
1766         ret = drm_bo_new_mask(bo, mask, mask);
1767         if (ret)
1768                 goto out_err;
1769
1770         if (bo->type == drm_bo_type_dc) {
1771                 mutex_lock(&dev->struct_mutex);
1772                 ret = drm_bo_setup_vm_locked(bo);
1773                 mutex_unlock(&dev->struct_mutex);
1774                 if (ret)
1775                         goto out_err;
1776         }
1777
1778         ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1779         if (ret)
1780                 goto out_err;
1781
1782         mutex_unlock(&bo->mutex);
1783         *buf_obj = bo;
1784         return 0;
1785
1786 out_err:
1787         mutex_unlock(&bo->mutex);
1788
1789         drm_bo_usage_deref_unlocked(&bo);
1790         return ret;
1791 }
1792 EXPORT_SYMBOL(drm_buffer_object_create);
1793
1794
1795 static int drm_bo_add_user_object(struct drm_file *file_priv,
1796                                   struct drm_buffer_object *bo, int shareable)
1797 {
1798         struct drm_device *dev = file_priv->head->dev;
1799         int ret;
1800
1801         mutex_lock(&dev->struct_mutex);
1802         ret = drm_add_user_object(file_priv, &bo->base, shareable);
1803         if (ret)
1804                 goto out;
1805
1806         bo->base.remove = drm_bo_base_deref_locked;
1807         bo->base.type = drm_buffer_type;
1808         bo->base.ref_struct_locked = NULL;
1809         bo->base.unref = drm_buffer_user_object_unmap;
1810
1811 out:
1812         mutex_unlock(&dev->struct_mutex);
1813         return ret;
1814 }
1815
1816 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1817 {
1818         struct drm_bo_create_arg *arg = data;
1819         struct drm_bo_create_req *req = &arg->d.req;
1820         struct drm_bo_info_rep *rep = &arg->d.rep;
1821         struct drm_buffer_object *entry;
1822         enum drm_bo_type bo_type;
1823         int ret = 0;
1824
1825         DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1826             (int)(req->size / 1024), req->page_alignment * 4);
1827
1828         if (!dev->bm.initialized) {
1829                 DRM_ERROR("Buffer object manager is not initialized.\n");
1830                 return -EINVAL;
1831         }
1832
1833         bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
1834
1835         if (bo_type == drm_bo_type_user)
1836                 req->mask &= ~DRM_BO_FLAG_SHAREABLE;
1837
1838         ret = drm_buffer_object_create(file_priv->head->dev,
1839                                        req->size, bo_type, req->mask,
1840                                        req->hint, req->page_alignment,
1841                                        req->buffer_start, &entry);
1842         if (ret)
1843                 goto out;
1844
1845         ret = drm_bo_add_user_object(file_priv, entry,
1846                                      req->mask & DRM_BO_FLAG_SHAREABLE);
1847         if (ret) {
1848                 drm_bo_usage_deref_unlocked(&entry);
1849                 goto out;
1850         }
1851
1852         mutex_lock(&entry->mutex);
1853         drm_bo_fill_rep_arg(entry, rep);
1854         mutex_unlock(&entry->mutex);
1855
1856 out:
1857         return ret;
1858 }
1859
1860 int drm_bo_setstatus_ioctl(struct drm_device *dev,
1861                            void *data, struct drm_file *file_priv)
1862 {
1863         struct drm_bo_map_wait_idle_arg *arg = data;
1864         struct drm_bo_info_req *req = &arg->d.req;
1865         struct drm_bo_info_rep *rep = &arg->d.rep;
1866         int ret;
1867
1868         if (!dev->bm.initialized) {
1869                 DRM_ERROR("Buffer object manager is not initialized.\n");
1870                 return -EINVAL;
1871         }
1872
1873         ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
1874         if (ret)
1875                 return ret;
1876
1877         ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
1878                                      req->flags,
1879                                      req->mask,
1880                                      req->hint | DRM_BO_HINT_DONT_FENCE,
1881                                      1,
1882                                      rep, NULL);
1883
1884         (void) drm_bo_read_unlock(&dev->bm.bm_lock);
1885         if (ret)
1886                 return ret;
1887
1888         return 0;
1889 }
1890
1891 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1892 {
1893         struct drm_bo_map_wait_idle_arg *arg = data;
1894         struct drm_bo_info_req *req = &arg->d.req;
1895         struct drm_bo_info_rep *rep = &arg->d.rep;
1896         int ret;
1897         if (!dev->bm.initialized) {
1898                 DRM_ERROR("Buffer object manager is not initialized.\n");
1899                 return -EINVAL;
1900         }
1901
1902         ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1903                                     req->hint, rep);
1904         if (ret)
1905                 return ret;
1906
1907         return 0;
1908 }
1909
1910 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1911 {
1912         struct drm_bo_handle_arg *arg = data;
1913         int ret;
1914         if (!dev->bm.initialized) {
1915                 DRM_ERROR("Buffer object manager is not initialized.\n");
1916                 return -EINVAL;
1917         }
1918
1919         ret = drm_buffer_object_unmap(file_priv, arg->handle);
1920         return ret;
1921 }
1922
1923
1924 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1925 {
1926         struct drm_bo_reference_info_arg *arg = data;
1927         struct drm_bo_handle_arg *req = &arg->d.req;
1928         struct drm_bo_info_rep *rep = &arg->d.rep;
1929         struct drm_user_object *uo;
1930         int ret;
1931
1932         if (!dev->bm.initialized) {
1933                 DRM_ERROR("Buffer object manager is not initialized.\n");
1934                 return -EINVAL;
1935         }
1936
1937         ret = drm_user_object_ref(file_priv, req->handle,
1938                                   drm_buffer_type, &uo);
1939         if (ret)
1940                 return ret;
1941
1942         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1943         if (ret)
1944                 return ret;
1945
1946         return 0;
1947 }
1948
1949 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1950 {
1951         struct drm_bo_handle_arg *arg = data;
1952         int ret = 0;
1953
1954         if (!dev->bm.initialized) {
1955                 DRM_ERROR("Buffer object manager is not initialized.\n");
1956                 return -EINVAL;
1957         }
1958
1959         ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
1960         return ret;
1961 }
1962
1963 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1964 {
1965         struct drm_bo_reference_info_arg *arg = data;
1966         struct drm_bo_handle_arg *req = &arg->d.req;
1967         struct drm_bo_info_rep *rep = &arg->d.rep;
1968         int ret;
1969
1970         if (!dev->bm.initialized) {
1971                 DRM_ERROR("Buffer object manager is not initialized.\n");
1972                 return -EINVAL;
1973         }
1974
1975         ret = drm_bo_handle_info(file_priv, req->handle, rep);
1976         if (ret)
1977                 return ret;
1978
1979         return 0;
1980 }
1981
1982 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1983 {
1984         struct drm_bo_map_wait_idle_arg *arg = data;
1985         struct drm_bo_info_req *req = &arg->d.req;
1986         struct drm_bo_info_rep *rep = &arg->d.rep;
1987         int ret;
1988         if (!dev->bm.initialized) {
1989                 DRM_ERROR("Buffer object manager is not initialized.\n");
1990                 return -EINVAL;
1991         }
1992
1993         ret = drm_bo_handle_wait(file_priv, req->handle,
1994                                  req->hint, rep);
1995         if (ret)
1996                 return ret;
1997
1998         return 0;
1999 }
2000
2001 static int drm_bo_leave_list(struct drm_buffer_object *bo,
2002                              uint32_t mem_type,
2003                              int free_pinned,
2004                              int allow_errors)
2005 {
2006         struct drm_device *dev = bo->dev;
2007         int ret = 0;
2008
2009         mutex_lock(&bo->mutex);
2010
2011         ret = drm_bo_expire_fence(bo, allow_errors);
2012         if (ret)
2013                 goto out;
2014
2015         if (free_pinned) {
2016                 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2017                 mutex_lock(&dev->struct_mutex);
2018                 list_del_init(&bo->pinned_lru);
2019                 if (bo->pinned_node == bo->mem.mm_node)
2020                         bo->pinned_node = NULL;
2021                 if (bo->pinned_node != NULL) {
2022                         drm_mm_put_block(bo->pinned_node);
2023                         bo->pinned_node = NULL;
2024                 }
2025                 mutex_unlock(&dev->struct_mutex);
2026         }
2027
2028         if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2029                 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2030                           "cleanup. Removing flag and evicting.\n");
2031                 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2032                 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
2033         }
2034
2035         if (bo->mem.mem_type == mem_type)
2036                 ret = drm_bo_evict(bo, mem_type, 0);
2037
2038         if (ret) {
2039                 if (allow_errors) {
2040                         goto out;
2041                 } else {
2042                         ret = 0;
2043                         DRM_ERROR("Cleanup eviction failed\n");
2044                 }
2045         }
2046
2047 out:
2048         mutex_unlock(&bo->mutex);
2049         return ret;
2050 }
2051
2052
2053 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2054                                          int pinned_list)
2055 {
2056         if (pinned_list)
2057                 return list_entry(list, struct drm_buffer_object, pinned_lru);
2058         else
2059                 return list_entry(list, struct drm_buffer_object, lru);
2060 }
2061
2062 /*
2063  * dev->struct_mutex locked.
2064  */
2065
2066 static int drm_bo_force_list_clean(struct drm_device *dev,
2067                                    struct list_head *head,
2068                                    unsigned mem_type,
2069                                    int free_pinned,
2070                                    int allow_errors,
2071                                    int pinned_list)
2072 {
2073         struct list_head *list, *next, *prev;
2074         struct drm_buffer_object *entry, *nentry;
2075         int ret;
2076         int do_restart;
2077
2078         /*
2079          * The list traversal is a bit odd here, because an item may
2080          * disappear from the list when we release the struct_mutex or
2081          * when we decrease the usage count. Also we're not guaranteed
2082          * to drain pinned lists, so we can't always restart.
2083          */
2084
2085 restart:
2086         nentry = NULL;
2087         list_for_each_safe(list, next, head) {
2088                 prev = list->prev;
2089
2090                 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2091                 atomic_inc(&entry->usage);
2092                 if (nentry) {
2093                         atomic_dec(&nentry->usage);
2094                         nentry = NULL;
2095                 }
2096
2097                 /*
2098                  * Protect the next item from destruction, so we can check
2099                  * its list pointers later on.
2100                  */
2101
2102                 if (next != head) {
2103                         nentry = drm_bo_entry(next, pinned_list);
2104                         atomic_inc(&nentry->usage);
2105                 }
2106                 mutex_unlock(&dev->struct_mutex);
2107
2108                 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2109                                         allow_errors);
2110                 mutex_lock(&dev->struct_mutex);
2111
2112                 drm_bo_usage_deref_locked(&entry);
2113                 if (ret)
2114                         return ret;
2115
2116                 /*
2117                  * Has the next item disappeared from the list?
2118                  */
2119
2120                 do_restart = ((next->prev != list) && (next->prev != prev));
2121
2122                 if (nentry != NULL && do_restart)
2123                         drm_bo_usage_deref_locked(&nentry);
2124
2125                 if (do_restart)
2126                         goto restart;
2127         }
2128         return 0;
2129 }
2130
2131 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
2132 {
2133         struct drm_buffer_manager *bm = &dev->bm;
2134         struct drm_mem_type_manager *man = &bm->man[mem_type];
2135         int ret = -EINVAL;
2136
2137         if (mem_type >= DRM_BO_MEM_TYPES) {
2138                 DRM_ERROR("Illegal memory type %d\n", mem_type);
2139                 return ret;
2140         }
2141
2142         if (!man->has_type) {
2143                 DRM_ERROR("Trying to take down uninitialized "
2144                           "memory manager type %u\n", mem_type);
2145                 return ret;
2146         }
2147         man->use_type = 0;
2148         man->has_type = 0;
2149
2150         ret = 0;
2151         if (mem_type > 0) {
2152                 BUG_ON(!list_empty(&bm->unfenced));
2153                 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2154                 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2155
2156                 if (drm_mm_clean(&man->manager)) {
2157                         drm_mm_takedown(&man->manager);
2158                 } else {
2159                         ret = -EBUSY;
2160                 }
2161         }
2162
2163         return ret;
2164 }
2165 EXPORT_SYMBOL(drm_bo_clean_mm);
2166
2167 /**
2168  *Evict all buffers of a particular mem_type, but leave memory manager
2169  *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2170  *point since we have the hardware lock.
2171  */
2172
2173 static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
2174 {
2175         int ret;
2176         struct drm_buffer_manager *bm = &dev->bm;
2177         struct drm_mem_type_manager *man = &bm->man[mem_type];
2178
2179         if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2180                 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2181                 return -EINVAL;
2182         }
2183
2184         if (!man->has_type) {
2185                 DRM_ERROR("Memory type %u has not been initialized.\n",
2186                           mem_type);
2187                 return 0;
2188         }
2189
2190         ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2191         if (ret)
2192                 return ret;
2193         ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2194
2195         return ret;
2196 }
2197
2198 int drm_bo_init_mm(struct drm_device *dev,
2199                    unsigned type,
2200                    unsigned long p_offset, unsigned long p_size)
2201 {
2202         struct drm_buffer_manager *bm = &dev->bm;
2203         int ret = -EINVAL;
2204         struct drm_mem_type_manager *man;
2205
2206         if (type >= DRM_BO_MEM_TYPES) {
2207                 DRM_ERROR("Illegal memory type %d\n", type);
2208                 return ret;
2209         }
2210
2211         man = &bm->man[type];
2212         if (man->has_type) {
2213                 DRM_ERROR("Memory manager already initialized for type %d\n",
2214                           type);
2215                 return ret;
2216         }
2217
2218         ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2219         if (ret)
2220                 return ret;
2221
2222         ret = 0;
2223         if (type != DRM_BO_MEM_LOCAL) {
2224                 if (!p_size) {
2225                         DRM_ERROR("Zero size memory manager type %d\n", type);
2226                         return ret;
2227                 }
2228                 ret = drm_mm_init(&man->manager, p_offset, p_size);
2229                 if (ret)
2230                         return ret;
2231         }
2232         man->has_type = 1;
2233         man->use_type = 1;
2234
2235         INIT_LIST_HEAD(&man->lru);
2236         INIT_LIST_HEAD(&man->pinned);
2237
2238         return 0;
2239 }
2240 EXPORT_SYMBOL(drm_bo_init_mm);
2241
2242 /*
2243  * This function is intended to be called on drm driver unload.
2244  * If you decide to call it from lastclose, you must protect the call
2245  * from a potentially racing drm_bo_driver_init in firstopen.
2246  * (This may happen on X server restart).
2247  */
2248
2249 int drm_bo_driver_finish(struct drm_device *dev)
2250 {
2251         struct drm_buffer_manager *bm = &dev->bm;
2252         int ret = 0;
2253         unsigned i = DRM_BO_MEM_TYPES;
2254         struct drm_mem_type_manager *man;
2255
2256         mutex_lock(&dev->struct_mutex);
2257
2258         if (!bm->initialized)
2259                 goto out;
2260         bm->initialized = 0;
2261
2262         while (i--) {
2263                 man = &bm->man[i];
2264                 if (man->has_type) {
2265                         man->use_type = 0;
2266                         if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2267                                 ret = -EBUSY;
2268                                 DRM_ERROR("DRM memory manager type %d "
2269                                           "is not clean.\n", i);
2270                         }
2271                         man->has_type = 0;
2272                 }
2273         }
2274         mutex_unlock(&dev->struct_mutex);
2275
2276         if (!cancel_delayed_work(&bm->wq))
2277                 flush_scheduled_work();
2278
2279         mutex_lock(&dev->struct_mutex);
2280         drm_bo_delayed_delete(dev, 1);
2281         if (list_empty(&bm->ddestroy))
2282                 DRM_DEBUG("Delayed destroy list was clean\n");
2283
2284         if (list_empty(&bm->man[0].lru))
2285                 DRM_DEBUG("Swap list was clean\n");
2286
2287         if (list_empty(&bm->man[0].pinned))
2288                 DRM_DEBUG("NO_MOVE list was clean\n");
2289
2290         if (list_empty(&bm->unfenced))
2291                 DRM_DEBUG("Unfenced list was clean\n");
2292
2293 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2294         ClearPageReserved(bm->dummy_read_page);
2295 #endif
2296         __free_page(bm->dummy_read_page);
2297
2298 out:
2299         mutex_unlock(&dev->struct_mutex);
2300         return ret;
2301 }
2302 EXPORT_SYMBOL(drm_bo_driver_finish);
2303
2304 /*
2305  * This function is intended to be called on drm driver load.
2306  * If you decide to call it from firstopen, you must protect the call
2307  * from a potentially racing drm_bo_driver_finish in lastclose.
2308  * (This may happen on X server restart).
2309  */
2310
2311 int drm_bo_driver_init(struct drm_device *dev)
2312 {
2313         struct drm_bo_driver *driver = dev->driver->bo_driver;
2314         struct drm_buffer_manager *bm = &dev->bm;
2315         int ret = -EINVAL;
2316
2317         bm->dummy_read_page = NULL;
2318         drm_bo_init_lock(&bm->bm_lock);
2319         mutex_lock(&dev->struct_mutex);
2320         if (!driver)
2321                 goto out_unlock;
2322
2323         bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
2324         if (!bm->dummy_read_page) {
2325                 ret = -ENOMEM;
2326                 goto out_unlock;
2327         }
2328
2329 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2330         SetPageReserved(bm->dummy_read_page);
2331 #endif
2332
2333         /*
2334          * Initialize the system memory buffer type.
2335          * Other types need to be driver / IOCTL initialized.
2336          */
2337         ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
2338         if (ret)
2339                 goto out_unlock;
2340
2341 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2342         INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2343 #else
2344         INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2345 #endif
2346         bm->initialized = 1;
2347         bm->nice_mode = 1;
2348         atomic_set(&bm->count, 0);
2349         bm->cur_pages = 0;
2350         INIT_LIST_HEAD(&bm->unfenced);
2351         INIT_LIST_HEAD(&bm->ddestroy);
2352 out_unlock:
2353         mutex_unlock(&dev->struct_mutex);
2354         return ret;
2355 }
2356 EXPORT_SYMBOL(drm_bo_driver_init);
2357
2358 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2359 {
2360         struct drm_mm_init_arg *arg = data;
2361         struct drm_buffer_manager *bm = &dev->bm;
2362         struct drm_bo_driver *driver = dev->driver->bo_driver;
2363         int ret;
2364
2365         if (!driver) {
2366                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2367                 return -EINVAL;
2368         }
2369
2370         ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
2371         if (ret)
2372                 return ret;
2373
2374         ret = -EINVAL;
2375         if (arg->magic != DRM_BO_INIT_MAGIC) {
2376                 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2377                           "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2378                 return -EINVAL;
2379         }
2380         if (arg->major != DRM_BO_INIT_MAJOR) {
2381                 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2382                           "\tversion don't match. Got %d, expected %d.\n",
2383                           arg->major, DRM_BO_INIT_MAJOR);
2384                 return -EINVAL;
2385         }
2386
2387         mutex_lock(&dev->struct_mutex);
2388         if (!bm->initialized) {
2389                 DRM_ERROR("DRM memory manager was not initialized.\n");
2390                 goto out;
2391         }
2392         if (arg->mem_type == 0) {
2393                 DRM_ERROR("System memory buffers already initialized.\n");
2394                 goto out;
2395         }
2396         ret = drm_bo_init_mm(dev, arg->mem_type,
2397                              arg->p_offset, arg->p_size);
2398
2399 out:
2400         mutex_unlock(&dev->struct_mutex);
2401         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2402
2403         if (ret)
2404                 return ret;
2405
2406         return 0;
2407 }
2408
2409 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2410 {
2411         struct drm_mm_type_arg *arg = data;
2412         struct drm_buffer_manager *bm = &dev->bm;
2413         struct drm_bo_driver *driver = dev->driver->bo_driver;
2414         int ret;
2415
2416         if (!driver) {
2417                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2418                 return -EINVAL;
2419         }
2420
2421         ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
2422         if (ret)
2423                 return ret;
2424
2425         mutex_lock(&dev->struct_mutex);
2426         ret = -EINVAL;
2427         if (!bm->initialized) {
2428                 DRM_ERROR("DRM memory manager was not initialized\n");
2429                 goto out;
2430         }
2431         if (arg->mem_type == 0) {
2432                 DRM_ERROR("No takedown for System memory buffers.\n");
2433                 goto out;
2434         }
2435         ret = 0;
2436         if (drm_bo_clean_mm(dev, arg->mem_type)) {
2437                 DRM_ERROR("Memory manager type %d not clean. "
2438                           "Delaying takedown\n", arg->mem_type);
2439         }
2440 out:
2441         mutex_unlock(&dev->struct_mutex);
2442         (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2443
2444         if (ret)
2445                 return ret;
2446
2447         return 0;
2448 }
2449
2450 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2451 {
2452         struct drm_mm_type_arg *arg = data;
2453         struct drm_bo_driver *driver = dev->driver->bo_driver;
2454         int ret;
2455
2456         if (!driver) {
2457                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2458                 return -EINVAL;
2459         }
2460
2461         if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
2462                 DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
2463                 return -EINVAL;
2464         }
2465
2466         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2467           ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
2468                 if (ret)
2469                         return ret;
2470         }
2471
2472         mutex_lock(&dev->struct_mutex);
2473         ret = drm_bo_lock_mm(dev, arg->mem_type);
2474         mutex_unlock(&dev->struct_mutex);
2475         if (ret) {
2476                 (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2477                 return ret;
2478         }
2479
2480         return 0;
2481 }
2482
2483 int drm_mm_unlock_ioctl(struct drm_device *dev,
2484                         void *data,
2485                         struct drm_file *file_priv)
2486 {
2487         struct drm_mm_type_arg *arg = data;
2488         struct drm_bo_driver *driver = dev->driver->bo_driver;
2489         int ret;
2490
2491         if (!driver) {
2492                 DRM_ERROR("Buffer objects are not supported by this driver\n");
2493                 return -EINVAL;
2494         }
2495
2496         if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2497                 ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2498                 if (ret)
2499                         return ret;
2500         }
2501
2502         return 0;
2503 }
2504
2505 /*
2506  * buffer object vm functions.
2507  */
2508
2509 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
2510 {
2511         struct drm_buffer_manager *bm = &dev->bm;
2512         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2513
2514         if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2515                 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2516                         return 0;
2517
2518                 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2519                         return 0;
2520
2521                 if (mem->flags & DRM_BO_FLAG_CACHED)
2522                         return 0;
2523         }
2524         return 1;
2525 }
2526 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2527
2528 /**
2529  * \c Get the PCI offset for the buffer object memory.
2530  *
2531  * \param bo The buffer object.
2532  * \param bus_base On return the base of the PCI region
2533  * \param bus_offset On return the byte offset into the PCI region
2534  * \param bus_size On return the byte size of the buffer object or zero if
2535  *     the buffer object memory is not accessible through a PCI region.
2536  * \return Failure indication.
2537  *
2538  * Returns -EINVAL if the buffer object is currently not mappable.
2539  * Otherwise returns zero.
2540  */
2541
2542 int drm_bo_pci_offset(struct drm_device *dev,
2543                       struct drm_bo_mem_reg *mem,
2544                       unsigned long *bus_base,
2545                       unsigned long *bus_offset, unsigned long *bus_size)
2546 {
2547         struct drm_buffer_manager *bm = &dev->bm;
2548         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2549
2550         *bus_size = 0;
2551         if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2552                 return -EINVAL;
2553
2554         if (drm_mem_reg_is_pci(dev, mem)) {
2555                 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2556                 *bus_size = mem->num_pages << PAGE_SHIFT;
2557                 *bus_base = man->io_offset;
2558         }
2559
2560         return 0;
2561 }
2562
2563 /**
2564  * \c Kill all user-space virtual mappings of this buffer object.
2565  *
2566  * \param bo The buffer object.
2567  *
2568  * Call bo->mutex locked.
2569  */
2570
2571 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
2572 {
2573         struct drm_device *dev = bo->dev;
2574         loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2575         loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2576
2577         if (!dev->dev_mapping)
2578                 return;
2579
2580         unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2581 }
2582
2583 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
2584 {
2585         struct drm_map_list *list;
2586         drm_local_map_t *map;
2587         struct drm_device *dev = bo->dev;
2588
2589         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2590         if (bo->type != drm_bo_type_dc)
2591                 return;
2592
2593         list = &bo->map_list;
2594         if (list->user_token) {
2595                 drm_ht_remove_item(&dev->map_hash, &list->hash);
2596                 list->user_token = 0;
2597         }
2598         if (list->file_offset_node) {
2599                 drm_mm_put_block(list->file_offset_node);
2600                 list->file_offset_node = NULL;
2601         }
2602
2603         map = list->map;
2604         if (!map)
2605                 return;
2606
2607         drm_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2608         list->map = NULL;
2609         list->user_token = 0ULL;
2610         drm_bo_usage_deref_locked(&bo);
2611 }
2612
2613 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2614 {
2615         struct drm_map_list *list = &bo->map_list;
2616         drm_local_map_t *map;
2617         struct drm_device *dev = bo->dev;
2618
2619         DRM_ASSERT_LOCKED(&dev->struct_mutex);
2620         list->map = drm_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2621         if (!list->map)
2622                 return -ENOMEM;
2623
2624         map = list->map;
2625         map->offset = 0;
2626         map->type = _DRM_TTM;
2627         map->flags = _DRM_REMOVABLE;
2628         map->size = bo->mem.num_pages * PAGE_SIZE;
2629         atomic_inc(&bo->usage);
2630         map->handle = (void *)bo;
2631
2632         list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2633                                                     bo->mem.num_pages, 0, 0);
2634
2635         if (!list->file_offset_node) {
2636                 drm_bo_takedown_vm_locked(bo);
2637                 return -ENOMEM;
2638         }
2639
2640         list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2641                                                   bo->mem.num_pages, 0);
2642         if (!list->file_offset_node) {
2643                 drm_bo_takedown_vm_locked(bo);
2644                 return -ENOMEM;
2645         }
2646
2647         list->hash.key = list->file_offset_node->start;
2648         if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2649                 drm_bo_takedown_vm_locked(bo);
2650                 return -ENOMEM;
2651         }
2652
2653         list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2654
2655         return 0;
2656 }
2657
2658 int drm_bo_version_ioctl(struct drm_device *dev, void *data,
2659                          struct drm_file *file_priv)
2660 {
2661         struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
2662
2663         arg->major = DRM_BO_INIT_MAJOR;
2664         arg->minor = DRM_BO_INIT_MINOR;
2665         arg->patchlevel = DRM_BO_INIT_PATCH;
2666
2667         return 0;
2668 }