1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
34 * Locking may look a bit complicated but isn't really:
36 * The buffer usage atomic_t needs to be protected by dev->struct_mutex
37 * when there is a chance that it can be zero before or after the operation.
39 * dev->struct_mutex also protects all lists and list heads,
40 * Hash tables and hash heads.
42 * bo->mutex protects the buffer object itself excluding the usage field.
43 * bo->mutex does also protect the buffer list heads, so to manipulate those,
44 * we need both the bo->mutex and the dev->struct_mutex.
46 * Locking order is bo->mutex, dev->struct_mutex. Therefore list traversal
47 * is a bit complicated. When dev->struct_mutex is released to grab bo->mutex,
48 * the list traversal will, in general, need to be restarted.
52 static void drm_bo_destroy_locked(struct drm_buffer_object *bo);
53 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo);
54 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
55 static void drm_bo_unmap_virtual(struct drm_buffer_object *bo);
57 static inline uint64_t drm_bo_type_flags(unsigned type)
59 return (1ULL << (24 + type));
63 * bo locked. dev->struct_mutex locked.
66 void drm_bo_add_to_pinned_lru(struct drm_buffer_object *bo)
68 struct drm_mem_type_manager *man;
70 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
71 DRM_ASSERT_LOCKED(&bo->mutex);
73 man = &bo->dev->bm.man[bo->pinned_mem_type];
74 list_add_tail(&bo->pinned_lru, &man->pinned);
77 void drm_bo_add_to_lru(struct drm_buffer_object *bo)
79 struct drm_mem_type_manager *man;
81 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
83 if (!(bo->mem.mask & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT))
84 || bo->mem.mem_type != bo->pinned_mem_type) {
85 man = &bo->dev->bm.man[bo->mem.mem_type];
86 list_add_tail(&bo->lru, &man->lru);
88 INIT_LIST_HEAD(&bo->lru);
92 static int drm_bo_vm_pre_move(struct drm_buffer_object *bo, int old_is_pci)
94 #ifdef DRM_ODD_MM_COMPAT
97 if (!bo->map_list.map)
100 ret = drm_bo_lock_kmm(bo);
103 drm_bo_unmap_virtual(bo);
105 drm_bo_finish_unmap(bo);
107 if (!bo->map_list.map)
110 drm_bo_unmap_virtual(bo);
115 static void drm_bo_vm_post_move(struct drm_buffer_object *bo)
117 #ifdef DRM_ODD_MM_COMPAT
120 if (!bo->map_list.map)
123 ret = drm_bo_remap_bound(bo);
125 DRM_ERROR("Failed to remap a bound buffer object.\n"
126 "\tThis might cause a sigbus later.\n");
128 drm_bo_unlock_kmm(bo);
133 * Call bo->mutex locked.
136 static int drm_bo_add_ttm(struct drm_buffer_object *bo)
138 struct drm_device *dev = bo->dev;
141 DRM_ASSERT_LOCKED(&bo->mutex);
146 case drm_bo_type_kernel:
147 bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
151 case drm_bo_type_user:
152 bo->ttm = drm_ttm_init(dev, bo->num_pages << PAGE_SHIFT);
156 ret = drm_ttm_set_user(bo->ttm, current,
157 bo->mem.mask & DRM_BO_FLAG_WRITE,
160 dev->bm.dummy_read_page);
166 DRM_ERROR("Illegal buffer object type\n");
174 static int drm_bo_handle_move_mem(struct drm_buffer_object *bo,
175 struct drm_bo_mem_reg *mem,
176 int evict, int no_wait)
178 struct drm_device *dev = bo->dev;
179 struct drm_buffer_manager *bm = &dev->bm;
180 int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem);
181 int new_is_pci = drm_mem_reg_is_pci(dev, mem);
182 struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type];
183 struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type];
186 if (old_is_pci || new_is_pci ||
187 ((mem->flags ^ bo->mem.flags) & DRM_BO_FLAG_CACHED))
188 ret = drm_bo_vm_pre_move(bo, old_is_pci);
193 * Create and bind a ttm if required.
196 if (!(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm == NULL)) {
197 ret = drm_bo_add_ttm(bo);
201 if (mem->mem_type != DRM_BO_MEM_LOCAL) {
202 ret = drm_bind_ttm(bo->ttm, mem);
207 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL) {
209 struct drm_bo_mem_reg *old_mem = &bo->mem;
210 uint64_t save_flags = old_mem->flags;
211 uint64_t save_mask = old_mem->mask;
215 old_mem->mask = save_mask;
216 DRM_FLAG_MASKED(save_flags, mem->flags,
217 DRM_BO_MASK_MEMTYPE);
223 if (!(old_man->flags & _DRM_FLAG_MEMTYPE_FIXED) &&
224 !(new_man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
226 ret = drm_bo_move_ttm(bo, evict, no_wait, mem);
228 } else if (dev->driver->bo_driver->move) {
229 ret = dev->driver->bo_driver->move(bo, evict, no_wait, mem);
233 ret = drm_bo_move_memcpy(bo, evict, no_wait, mem);
241 if (old_is_pci || new_is_pci)
242 drm_bo_vm_post_move(bo);
244 if (bo->priv_flags & _DRM_BO_FLAG_EVICTED) {
246 dev->driver->bo_driver->invalidate_caches(dev,
249 DRM_ERROR("Can not flush read caches\n");
252 DRM_FLAG_MASKED(bo->priv_flags,
253 (evict) ? _DRM_BO_FLAG_EVICTED : 0,
254 _DRM_BO_FLAG_EVICTED);
257 bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
258 bm->man[bo->mem.mem_type].gpu_offset;
264 if (old_is_pci || new_is_pci)
265 drm_bo_vm_post_move(bo);
267 new_man = &bm->man[bo->mem.mem_type];
268 if ((new_man->flags & _DRM_FLAG_MEMTYPE_FIXED) && bo->ttm) {
269 drm_ttm_unbind(bo->ttm);
270 drm_destroy_ttm(bo->ttm);
278 * Call bo->mutex locked.
279 * Wait until the buffer is idle.
282 int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int ignore_signals,
287 DRM_ASSERT_LOCKED(&bo->mutex);
290 if (drm_fence_object_signaled(bo->fence, bo->fence_type)) {
291 drm_fence_usage_deref_unlocked(&bo->fence);
297 ret = drm_fence_object_wait(bo->fence, lazy, ignore_signals,
303 drm_fence_usage_deref_unlocked(&bo->fence);
307 EXPORT_SYMBOL(drm_bo_wait);
309 static int drm_bo_expire_fence(struct drm_buffer_object *bo, int allow_errors)
311 struct drm_device *dev = bo->dev;
312 struct drm_buffer_manager *bm = &dev->bm;
316 unsigned long _end = jiffies + 3 * DRM_HZ;
319 ret = drm_bo_wait(bo, 0, 1, 0);
320 if (ret && allow_errors)
323 } while (ret && !time_after_eq(jiffies, _end));
327 DRM_ERROR("Detected GPU lockup or "
328 "fence driver was taken down. "
329 "Evicting buffer.\n");
333 drm_fence_usage_deref_unlocked(&bo->fence);
339 * Call dev->struct_mutex locked.
340 * Attempts to remove all private references to a buffer by expiring its
341 * fence object and removing from lru lists and memory managers.
344 static void drm_bo_cleanup_refs(struct drm_buffer_object *bo, int remove_all)
346 struct drm_device *dev = bo->dev;
347 struct drm_buffer_manager *bm = &dev->bm;
349 DRM_ASSERT_LOCKED(&dev->struct_mutex);
351 atomic_inc(&bo->usage);
352 mutex_unlock(&dev->struct_mutex);
353 mutex_lock(&bo->mutex);
355 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
357 if (bo->fence && drm_fence_object_signaled(bo->fence,
359 drm_fence_usage_deref_unlocked(&bo->fence);
361 if (bo->fence && remove_all)
362 (void)drm_bo_expire_fence(bo, 0);
364 mutex_lock(&dev->struct_mutex);
366 if (!atomic_dec_and_test(&bo->usage))
370 list_del_init(&bo->lru);
371 if (bo->mem.mm_node) {
372 drm_mm_put_block(bo->mem.mm_node);
373 if (bo->pinned_node == bo->mem.mm_node)
374 bo->pinned_node = NULL;
375 bo->mem.mm_node = NULL;
377 list_del_init(&bo->pinned_lru);
378 if (bo->pinned_node) {
379 drm_mm_put_block(bo->pinned_node);
380 bo->pinned_node = NULL;
382 list_del_init(&bo->ddestroy);
383 mutex_unlock(&bo->mutex);
384 drm_bo_destroy_locked(bo);
388 if (list_empty(&bo->ddestroy)) {
389 drm_fence_object_flush(bo->fence, bo->fence_type);
390 list_add_tail(&bo->ddestroy, &bm->ddestroy);
391 schedule_delayed_work(&bm->wq,
392 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
396 mutex_unlock(&bo->mutex);
400 static void drm_bo_unreserve_size(unsigned long size)
402 drm_free_memctl(size);
406 * Verify that refcount is 0 and that there are no internal references
407 * to the buffer object. Then destroy it.
410 static void drm_bo_destroy_locked(struct drm_buffer_object *bo)
412 struct drm_device *dev = bo->dev;
413 struct drm_buffer_manager *bm = &dev->bm;
414 unsigned long reserved_size;
416 DRM_ASSERT_LOCKED(&dev->struct_mutex);
418 if (list_empty(&bo->lru) && bo->mem.mm_node == NULL &&
419 list_empty(&bo->pinned_lru) && bo->pinned_node == NULL &&
420 list_empty(&bo->ddestroy) && atomic_read(&bo->usage) == 0) {
421 if (bo->fence != NULL) {
422 DRM_ERROR("Fence was non-zero.\n");
423 drm_bo_cleanup_refs(bo, 0);
427 #ifdef DRM_ODD_MM_COMPAT
428 BUG_ON(!list_empty(&bo->vma_list));
429 BUG_ON(!list_empty(&bo->p_mm_list));
433 drm_ttm_unbind(bo->ttm);
434 drm_destroy_ttm(bo->ttm);
438 atomic_dec(&bm->count);
440 reserved_size = bo->reserved_size;
442 drm_free(bo, sizeof(*bo), DRM_MEM_BUFOBJ);
443 drm_bo_unreserve_size(reserved_size);
449 * Some stuff is still trying to reference the buffer object.
450 * Get rid of those references.
453 drm_bo_cleanup_refs(bo, 0);
459 * Call dev->struct_mutex locked.
462 static void drm_bo_delayed_delete(struct drm_device *dev, int remove_all)
464 struct drm_buffer_manager *bm = &dev->bm;
466 struct drm_buffer_object *entry, *nentry;
467 struct list_head *list, *next;
469 list_for_each_safe(list, next, &bm->ddestroy) {
470 entry = list_entry(list, struct drm_buffer_object, ddestroy);
473 if (next != &bm->ddestroy) {
474 nentry = list_entry(next, struct drm_buffer_object,
476 atomic_inc(&nentry->usage);
479 drm_bo_cleanup_refs(entry, remove_all);
482 atomic_dec(&nentry->usage);
486 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
487 static void drm_bo_delayed_workqueue(void *data)
489 static void drm_bo_delayed_workqueue(struct work_struct *work)
492 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
493 struct drm_device *dev = (struct drm_device *) data;
494 struct drm_buffer_manager *bm = &dev->bm;
496 struct drm_buffer_manager *bm =
497 container_of(work, struct drm_buffer_manager, wq.work);
498 struct drm_device *dev = container_of(bm, struct drm_device, bm);
501 DRM_DEBUG("Delayed delete Worker\n");
503 mutex_lock(&dev->struct_mutex);
504 if (!bm->initialized) {
505 mutex_unlock(&dev->struct_mutex);
508 drm_bo_delayed_delete(dev, 0);
509 if (bm->initialized && !list_empty(&bm->ddestroy)) {
510 schedule_delayed_work(&bm->wq,
511 ((DRM_HZ / 100) < 1) ? 1 : DRM_HZ / 100);
513 mutex_unlock(&dev->struct_mutex);
516 void drm_bo_usage_deref_locked(struct drm_buffer_object **bo)
518 struct drm_buffer_object *tmp_bo = *bo;
521 DRM_ASSERT_LOCKED(&tmp_bo->dev->struct_mutex);
523 if (atomic_dec_and_test(&tmp_bo->usage))
524 drm_bo_destroy_locked(tmp_bo);
526 EXPORT_SYMBOL(drm_bo_usage_deref_locked);
528 static void drm_bo_base_deref_locked(struct drm_file *file_priv,
529 struct drm_user_object *uo)
531 struct drm_buffer_object *bo =
532 drm_user_object_entry(uo, struct drm_buffer_object, base);
534 DRM_ASSERT_LOCKED(&bo->dev->struct_mutex);
536 drm_bo_takedown_vm_locked(bo);
537 drm_bo_usage_deref_locked(&bo);
540 void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo)
542 struct drm_buffer_object *tmp_bo = *bo;
543 struct drm_device *dev = tmp_bo->dev;
546 if (atomic_dec_and_test(&tmp_bo->usage)) {
547 mutex_lock(&dev->struct_mutex);
548 if (atomic_read(&tmp_bo->usage) == 0)
549 drm_bo_destroy_locked(tmp_bo);
550 mutex_unlock(&dev->struct_mutex);
553 EXPORT_SYMBOL(drm_bo_usage_deref_unlocked);
555 void drm_putback_buffer_objects(struct drm_device *dev)
557 struct drm_buffer_manager *bm = &dev->bm;
558 struct list_head *list = &bm->unfenced;
559 struct drm_buffer_object *entry, *next;
561 mutex_lock(&dev->struct_mutex);
562 list_for_each_entry_safe(entry, next, list, lru) {
563 atomic_inc(&entry->usage);
564 mutex_unlock(&dev->struct_mutex);
566 mutex_lock(&entry->mutex);
567 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
568 mutex_lock(&dev->struct_mutex);
570 list_del_init(&entry->lru);
571 DRM_FLAG_MASKED(entry->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
572 wake_up_all(&entry->event_queue);
575 * FIXME: Might want to put back on head of list
576 * instead of tail here.
579 drm_bo_add_to_lru(entry);
580 mutex_unlock(&entry->mutex);
581 drm_bo_usage_deref_locked(&entry);
583 mutex_unlock(&dev->struct_mutex);
585 EXPORT_SYMBOL(drm_putback_buffer_objects);
589 * Note. The caller has to register (if applicable)
590 * and deregister fence object usage.
593 int drm_fence_buffer_objects(struct drm_device *dev,
594 struct list_head *list,
595 uint32_t fence_flags,
596 struct drm_fence_object *fence,
597 struct drm_fence_object **used_fence)
599 struct drm_buffer_manager *bm = &dev->bm;
600 struct drm_buffer_object *entry;
601 uint32_t fence_type = 0;
602 uint32_t fence_class = ~0;
607 mutex_lock(&dev->struct_mutex);
610 list = &bm->unfenced;
613 fence_class = fence->fence_class;
615 list_for_each_entry(entry, list, lru) {
616 BUG_ON(!(entry->priv_flags & _DRM_BO_FLAG_UNFENCED));
617 fence_type |= entry->new_fence_type;
618 if (fence_class == ~0)
619 fence_class = entry->new_fence_class;
620 else if (entry->new_fence_class != fence_class) {
621 DRM_ERROR("Unmatching fence classes on unfenced list: "
624 entry->new_fence_class);
637 if ((fence_type & fence->type) != fence_type ||
638 (fence->fence_class != fence_class)) {
639 DRM_ERROR("Given fence doesn't match buffers "
640 "on unfenced list.\n");
645 mutex_unlock(&dev->struct_mutex);
646 ret = drm_fence_object_create(dev, fence_class, fence_type,
647 fence_flags | DRM_FENCE_FLAG_EMIT,
649 mutex_lock(&dev->struct_mutex);
658 entry = list_entry(l, struct drm_buffer_object, lru);
659 atomic_inc(&entry->usage);
660 mutex_unlock(&dev->struct_mutex);
661 mutex_lock(&entry->mutex);
662 mutex_lock(&dev->struct_mutex);
664 if (entry->priv_flags & _DRM_BO_FLAG_UNFENCED) {
667 drm_fence_usage_deref_locked(&entry->fence);
668 entry->fence = drm_fence_reference_locked(fence);
669 entry->fence_class = entry->new_fence_class;
670 entry->fence_type = entry->new_fence_type;
671 DRM_FLAG_MASKED(entry->priv_flags, 0,
672 _DRM_BO_FLAG_UNFENCED);
673 wake_up_all(&entry->event_queue);
674 drm_bo_add_to_lru(entry);
676 mutex_unlock(&entry->mutex);
677 drm_bo_usage_deref_locked(&entry);
680 DRM_DEBUG("Fenced %d buffers\n", count);
682 mutex_unlock(&dev->struct_mutex);
686 EXPORT_SYMBOL(drm_fence_buffer_objects);
692 static int drm_bo_evict(struct drm_buffer_object *bo, unsigned mem_type,
696 struct drm_device *dev = bo->dev;
697 struct drm_bo_mem_reg evict_mem;
700 * Someone might have modified the buffer before we took the
704 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED)
706 if (bo->mem.mem_type != mem_type)
709 ret = drm_bo_wait(bo, 0, 0, no_wait);
711 if (ret && ret != -EAGAIN) {
712 DRM_ERROR("Failed to expire fence before "
713 "buffer eviction.\n");
718 evict_mem.mm_node = NULL;
721 evict_mem.mask = dev->driver->bo_driver->evict_mask(bo);
722 ret = drm_bo_mem_space(bo, &evict_mem, no_wait);
726 DRM_ERROR("Failed to find memory space for "
727 "buffer 0x%p eviction.\n", bo);
731 ret = drm_bo_handle_move_mem(bo, &evict_mem, 1, no_wait);
735 DRM_ERROR("Buffer eviction failed\n");
739 mutex_lock(&dev->struct_mutex);
740 if (evict_mem.mm_node) {
741 if (evict_mem.mm_node != bo->pinned_node)
742 drm_mm_put_block(evict_mem.mm_node);
743 evict_mem.mm_node = NULL;
746 drm_bo_add_to_lru(bo);
747 mutex_unlock(&dev->struct_mutex);
749 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_EVICTED,
750 _DRM_BO_FLAG_EVICTED);
757 * Repeatedly evict memory from the LRU for @mem_type until we create enough
758 * space, or we've evicted everything and there isn't enough space.
760 static int drm_bo_mem_force_space(struct drm_device *dev,
761 struct drm_bo_mem_reg *mem,
762 uint32_t mem_type, int no_wait)
764 struct drm_mm_node *node;
765 struct drm_buffer_manager *bm = &dev->bm;
766 struct drm_buffer_object *entry;
767 struct drm_mem_type_manager *man = &bm->man[mem_type];
768 struct list_head *lru;
769 unsigned long num_pages = mem->num_pages;
772 mutex_lock(&dev->struct_mutex);
774 node = drm_mm_search_free(&man->manager, num_pages,
775 mem->page_alignment, 1);
780 if (lru->next == lru)
783 entry = list_entry(lru->next, struct drm_buffer_object, lru);
784 atomic_inc(&entry->usage);
785 mutex_unlock(&dev->struct_mutex);
786 mutex_lock(&entry->mutex);
787 BUG_ON(entry->mem.flags & (DRM_BO_FLAG_NO_MOVE | DRM_BO_FLAG_NO_EVICT));
789 ret = drm_bo_evict(entry, mem_type, no_wait);
790 mutex_unlock(&entry->mutex);
791 drm_bo_usage_deref_unlocked(&entry);
794 mutex_lock(&dev->struct_mutex);
798 mutex_unlock(&dev->struct_mutex);
802 node = drm_mm_get_block(node, num_pages, mem->page_alignment);
804 mutex_unlock(&dev->struct_mutex);
808 mutex_unlock(&dev->struct_mutex);
810 mem->mem_type = mem_type;
814 static int drm_bo_mt_compatible(struct drm_mem_type_manager *man,
817 uint64_t mask, uint32_t *res_mask)
819 uint64_t cur_flags = drm_bo_type_flags(mem_type);
822 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && disallow_fixed)
824 if (man->flags & _DRM_FLAG_MEMTYPE_CACHED)
825 cur_flags |= DRM_BO_FLAG_CACHED;
826 if (man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)
827 cur_flags |= DRM_BO_FLAG_MAPPABLE;
828 if (man->flags & _DRM_FLAG_MEMTYPE_CSELECT)
829 DRM_FLAG_MASKED(cur_flags, mask, DRM_BO_FLAG_CACHED);
831 if ((cur_flags & mask & DRM_BO_MASK_MEM) == 0)
834 if (mem_type == DRM_BO_MEM_LOCAL) {
835 *res_mask = cur_flags;
839 flag_diff = (mask ^ cur_flags);
840 if (flag_diff & DRM_BO_FLAG_CACHED_MAPPED)
841 cur_flags |= DRM_BO_FLAG_CACHED_MAPPED;
843 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
844 (!(mask & DRM_BO_FLAG_CACHED) ||
845 (mask & DRM_BO_FLAG_FORCE_CACHING)))
848 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
849 ((mask & DRM_BO_FLAG_MAPPABLE) ||
850 (mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
853 *res_mask = cur_flags;
858 * Creates space for memory region @mem according to its type.
860 * This function first searches for free space in compatible memory types in
861 * the priority order defined by the driver. If free space isn't found, then
862 * drm_bo_mem_force_space is attempted in priority order to evict and find
865 int drm_bo_mem_space(struct drm_buffer_object *bo,
866 struct drm_bo_mem_reg *mem, int no_wait)
868 struct drm_device *dev = bo->dev;
869 struct drm_buffer_manager *bm = &dev->bm;
870 struct drm_mem_type_manager *man;
872 uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio;
873 const uint32_t *prios = dev->driver->bo_driver->mem_type_prio;
875 uint32_t mem_type = DRM_BO_MEM_LOCAL;
880 struct drm_mm_node *node = NULL;
884 for (i = 0; i < num_prios; ++i) {
886 man = &bm->man[mem_type];
888 type_ok = drm_bo_mt_compatible(man,
889 bo->type == drm_bo_type_user,
896 if (mem_type == DRM_BO_MEM_LOCAL)
899 if ((mem_type == bo->pinned_mem_type) &&
900 (bo->pinned_node != NULL)) {
901 node = bo->pinned_node;
905 mutex_lock(&dev->struct_mutex);
906 if (man->has_type && man->use_type) {
908 node = drm_mm_search_free(&man->manager, mem->num_pages,
909 mem->page_alignment, 1);
911 node = drm_mm_get_block(node, mem->num_pages,
912 mem->page_alignment);
914 mutex_unlock(&dev->struct_mutex);
919 if ((type_ok && (mem_type == DRM_BO_MEM_LOCAL)) || node) {
921 mem->mem_type = mem_type;
922 mem->flags = cur_flags;
929 num_prios = dev->driver->bo_driver->num_mem_busy_prio;
930 prios = dev->driver->bo_driver->mem_busy_prio;
932 for (i = 0; i < num_prios; ++i) {
934 man = &bm->man[mem_type];
939 if (!drm_bo_mt_compatible(man,
940 bo->type == drm_bo_type_user,
946 ret = drm_bo_mem_force_space(dev, mem, mem_type, no_wait);
948 if (ret == 0 && mem->mm_node) {
949 mem->flags = cur_flags;
957 ret = (has_eagain) ? -EAGAIN : -ENOMEM;
960 EXPORT_SYMBOL(drm_bo_mem_space);
962 static int drm_bo_new_mask(struct drm_buffer_object *bo,
963 uint64_t new_flags, uint64_t used_mask)
967 if (bo->type == drm_bo_type_user &&
968 ((new_flags & (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING)) !=
969 (DRM_BO_FLAG_CACHED | DRM_BO_FLAG_FORCE_CACHING))) {
970 DRM_ERROR("User buffers require cache-coherent memory.\n");
974 if ((used_mask & DRM_BO_FLAG_NO_EVICT) && !DRM_SUSER(DRM_CURPROC)) {
975 DRM_ERROR("DRM_BO_FLAG_NO_EVICT is only available to priviliged processes.\n");
979 if (likely(used_mask & DRM_BO_MASK_MEM) &&
980 (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) &&
981 !DRM_SUSER(DRM_CURPROC)) {
982 if (likely(bo->mem.flags & new_flags & used_mask &
984 new_flags = (new_flags & ~DRM_BO_MASK_MEM) |
985 (bo->mem.flags & DRM_BO_MASK_MEM);
987 DRM_ERROR("Incompatible memory type specification "
988 "for NO_EVICT buffer.\n");
993 if ((new_flags & DRM_BO_FLAG_NO_MOVE)) {
994 DRM_ERROR("DRM_BO_FLAG_NO_MOVE is not properly implemented yet.\n");
998 new_props = new_flags & (DRM_BO_FLAG_EXE | DRM_BO_FLAG_WRITE |
1002 DRM_ERROR("Invalid buffer object rwx properties\n");
1006 bo->mem.mask = new_flags;
1011 * Call dev->struct_mutex locked.
1014 struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
1015 uint32_t handle, int check_owner)
1017 struct drm_user_object *uo;
1018 struct drm_buffer_object *bo;
1020 uo = drm_lookup_user_object(file_priv, handle);
1022 if (!uo || (uo->type != drm_buffer_type)) {
1023 DRM_ERROR("Could not find buffer object 0x%08x\n", handle);
1027 if (check_owner && file_priv != uo->owner) {
1028 if (!drm_lookup_ref_object(file_priv, uo, _DRM_REF_USE))
1032 bo = drm_user_object_entry(uo, struct drm_buffer_object, base);
1033 atomic_inc(&bo->usage);
1036 EXPORT_SYMBOL(drm_lookup_buffer_object);
1039 * Call bo->mutex locked.
1040 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1041 * Doesn't do any fence flushing as opposed to the drm_bo_busy function.
1044 static int drm_bo_quick_busy(struct drm_buffer_object *bo)
1046 struct drm_fence_object *fence = bo->fence;
1048 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1050 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1051 drm_fence_usage_deref_unlocked(&bo->fence);
1060 * Call bo->mutex locked.
1061 * Returns 1 if the buffer is currently rendered to or from. 0 otherwise.
1064 static int drm_bo_busy(struct drm_buffer_object *bo)
1066 struct drm_fence_object *fence = bo->fence;
1068 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1070 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1071 drm_fence_usage_deref_unlocked(&bo->fence);
1074 drm_fence_object_flush(fence, DRM_FENCE_TYPE_EXE);
1075 if (drm_fence_object_signaled(fence, bo->fence_type)) {
1076 drm_fence_usage_deref_unlocked(&bo->fence);
1084 static int drm_bo_evict_cached(struct drm_buffer_object *bo)
1088 BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1089 if (bo->mem.mm_node)
1090 ret = drm_bo_evict(bo, DRM_BO_MEM_TT, 1);
1095 * Wait until a buffer is unmapped.
1098 static int drm_bo_wait_unmapped(struct drm_buffer_object *bo, int no_wait)
1102 if ((atomic_read(&bo->mapped) >= 0) && no_wait)
1105 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1106 atomic_read(&bo->mapped) == -1);
1114 static int drm_bo_check_unfenced(struct drm_buffer_object *bo)
1118 mutex_lock(&bo->mutex);
1119 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1120 mutex_unlock(&bo->mutex);
1125 * Wait until a buffer, scheduled to be fenced moves off the unfenced list.
1126 * Until then, we cannot really do anything with it except delete it.
1129 static int drm_bo_wait_unfenced(struct drm_buffer_object *bo, int no_wait,
1132 int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1140 mutex_unlock(&bo->mutex);
1141 DRM_WAIT_ON(ret, bo->event_queue, 3 * DRM_HZ,
1142 !drm_bo_check_unfenced(bo));
1143 mutex_lock(&bo->mutex);
1146 ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED);
1148 DRM_ERROR("Timeout waiting for buffer to become fenced\n");
1158 * Fill in the ioctl reply argument with buffer info.
1162 void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
1163 struct drm_bo_info_rep *rep)
1168 rep->handle = bo->base.hash.key;
1169 rep->flags = bo->mem.flags;
1170 rep->size = bo->num_pages * PAGE_SIZE;
1171 rep->offset = bo->offset;
1173 if (bo->type == drm_bo_type_dc)
1174 rep->arg_handle = bo->map_list.user_token;
1176 rep->arg_handle = 0;
1178 rep->mask = bo->mem.mask;
1179 rep->buffer_start = bo->buffer_start;
1180 rep->fence_flags = bo->fence_type;
1182 rep->page_alignment = bo->mem.page_alignment;
1184 if ((bo->priv_flags & _DRM_BO_FLAG_UNFENCED) || drm_bo_quick_busy(bo)) {
1185 DRM_FLAG_MASKED(rep->rep_flags, DRM_BO_REP_BUSY,
1189 EXPORT_SYMBOL(drm_bo_fill_rep_arg);
1192 * Wait for buffer idle and register that we've mapped the buffer.
1193 * Mapping is registered as a drm_ref_object with type _DRM_REF_TYPE1,
1194 * so that if the client dies, the mapping is automatically
1198 static int drm_buffer_object_map(struct drm_file *file_priv, uint32_t handle,
1199 uint32_t map_flags, unsigned hint,
1200 struct drm_bo_info_rep *rep)
1202 struct drm_buffer_object *bo;
1203 struct drm_device *dev = file_priv->head->dev;
1205 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1207 mutex_lock(&dev->struct_mutex);
1208 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1209 mutex_unlock(&dev->struct_mutex);
1214 mutex_lock(&bo->mutex);
1215 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1220 * If this returns true, we are currently unmapped.
1221 * We need to do this test, because unmapping can
1222 * be done without the bo->mutex held.
1226 if (atomic_inc_and_test(&bo->mapped)) {
1227 if (no_wait && drm_bo_busy(bo)) {
1228 atomic_dec(&bo->mapped);
1233 ret = drm_bo_wait(bo, 0, 0, no_wait);
1235 atomic_dec(&bo->mapped);
1239 if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED)
1240 drm_bo_evict_cached(bo);
1243 } else if (bo->mem.flags & DRM_BO_FLAG_CACHED_MAPPED) {
1246 * We are already mapped with different flags.
1247 * need to wait for unmap.
1250 ret = drm_bo_wait_unmapped(bo, no_wait);
1259 mutex_lock(&dev->struct_mutex);
1260 ret = drm_add_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1261 mutex_unlock(&dev->struct_mutex);
1263 if (atomic_add_negative(-1, &bo->mapped))
1264 wake_up_all(&bo->event_queue);
1267 drm_bo_fill_rep_arg(bo, rep);
1269 mutex_unlock(&bo->mutex);
1270 drm_bo_usage_deref_unlocked(&bo);
1274 static int drm_buffer_object_unmap(struct drm_file *file_priv, uint32_t handle)
1276 struct drm_device *dev = file_priv->head->dev;
1277 struct drm_buffer_object *bo;
1278 struct drm_ref_object *ro;
1281 mutex_lock(&dev->struct_mutex);
1283 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1289 ro = drm_lookup_ref_object(file_priv, &bo->base, _DRM_REF_TYPE1);
1295 drm_remove_ref_object(file_priv, ro);
1296 drm_bo_usage_deref_locked(&bo);
1298 mutex_unlock(&dev->struct_mutex);
1303 * Call struct-sem locked.
1306 static void drm_buffer_user_object_unmap(struct drm_file *file_priv,
1307 struct drm_user_object *uo,
1308 enum drm_ref_type action)
1310 struct drm_buffer_object *bo =
1311 drm_user_object_entry(uo, struct drm_buffer_object, base);
1314 * We DON'T want to take the bo->lock here, because we want to
1315 * hold it when we wait for unmapped buffer.
1318 BUG_ON(action != _DRM_REF_TYPE1);
1320 if (atomic_add_negative(-1, &bo->mapped))
1321 wake_up_all(&bo->event_queue);
1326 * Note that new_mem_flags are NOT transferred to the bo->mem.mask.
1329 int drm_bo_move_buffer(struct drm_buffer_object *bo, uint64_t new_mem_flags,
1330 int no_wait, int move_unfenced)
1332 struct drm_device *dev = bo->dev;
1333 struct drm_buffer_manager *bm = &dev->bm;
1335 struct drm_bo_mem_reg mem;
1337 * Flush outstanding fences.
1343 * Wait for outstanding fences.
1346 ret = drm_bo_wait(bo, 0, 0, no_wait);
1350 mem.num_pages = bo->num_pages;
1351 mem.size = mem.num_pages << PAGE_SHIFT;
1352 mem.mask = new_mem_flags;
1353 mem.page_alignment = bo->mem.page_alignment;
1355 mutex_lock(&bm->evict_mutex);
1356 mutex_lock(&dev->struct_mutex);
1357 list_del_init(&bo->lru);
1358 mutex_unlock(&dev->struct_mutex);
1361 * Determine where to move the buffer.
1363 ret = drm_bo_mem_space(bo, &mem, no_wait);
1367 ret = drm_bo_handle_move_mem(bo, &mem, 0, no_wait);
1370 mutex_lock(&dev->struct_mutex);
1371 if (ret || !move_unfenced) {
1373 if (mem.mm_node != bo->pinned_node)
1374 drm_mm_put_block(mem.mm_node);
1377 drm_bo_add_to_lru(bo);
1378 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1379 wake_up_all(&bo->event_queue);
1380 DRM_FLAG_MASKED(bo->priv_flags, 0,
1381 _DRM_BO_FLAG_UNFENCED);
1384 list_add_tail(&bo->lru, &bm->unfenced);
1385 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1386 _DRM_BO_FLAG_UNFENCED);
1388 mutex_unlock(&dev->struct_mutex);
1389 mutex_unlock(&bm->evict_mutex);
1393 static int drm_bo_mem_compat(struct drm_bo_mem_reg *mem)
1395 uint32_t flag_diff = (mem->mask ^ mem->flags);
1397 if ((mem->mask & mem->flags & DRM_BO_MASK_MEM) == 0)
1399 if ((flag_diff & DRM_BO_FLAG_CACHED) &&
1400 (/* !(mem->mask & DRM_BO_FLAG_CACHED) ||*/
1401 (mem->mask & DRM_BO_FLAG_FORCE_CACHING)))
1404 if ((flag_diff & DRM_BO_FLAG_MAPPABLE) &&
1405 ((mem->mask & DRM_BO_FLAG_MAPPABLE) ||
1406 (mem->mask & DRM_BO_FLAG_FORCE_MAPPABLE)))
1415 static int drm_buffer_object_validate(struct drm_buffer_object *bo,
1416 uint32_t fence_class,
1417 int move_unfenced, int no_wait)
1419 struct drm_device *dev = bo->dev;
1420 struct drm_buffer_manager *bm = &dev->bm;
1421 struct drm_bo_driver *driver = dev->driver->bo_driver;
1425 DRM_DEBUG("New flags 0x%016llx, Old flags 0x%016llx\n",
1426 (unsigned long long) bo->mem.mask,
1427 (unsigned long long) bo->mem.flags);
1429 ret = driver->fence_type(bo, &fence_class, &ftype);
1432 DRM_ERROR("Driver did not support given buffer permissions\n");
1437 * We're switching command submission mechanism,
1438 * or cannot simply rely on the hardware serializing for us.
1440 * Insert a driver-dependant barrier or wait for buffer idle.
1443 if ((fence_class != bo->fence_class) ||
1444 ((ftype ^ bo->fence_type) & bo->fence_type)) {
1447 if (driver->command_stream_barrier) {
1448 ret = driver->command_stream_barrier(bo,
1454 ret = drm_bo_wait(bo, 0, 0, no_wait);
1461 bo->new_fence_class = fence_class;
1462 bo->new_fence_type = ftype;
1464 ret = drm_bo_wait_unmapped(bo, no_wait);
1466 DRM_ERROR("Timed out waiting for buffer unmap.\n");
1471 * Check whether we need to move buffer.
1474 if (!drm_bo_mem_compat(&bo->mem)) {
1475 ret = drm_bo_move_buffer(bo, bo->mem.mask, no_wait,
1479 DRM_ERROR("Failed moving buffer.\n");
1481 DRM_ERROR("Out of aperture space.\n");
1490 if (bo->mem.mask & (DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE)) {
1491 bo->pinned_mem_type = bo->mem.mem_type;
1492 mutex_lock(&dev->struct_mutex);
1493 list_del_init(&bo->pinned_lru);
1494 drm_bo_add_to_pinned_lru(bo);
1496 if (bo->pinned_node != bo->mem.mm_node) {
1497 if (bo->pinned_node != NULL)
1498 drm_mm_put_block(bo->pinned_node);
1499 bo->pinned_node = bo->mem.mm_node;
1502 mutex_unlock(&dev->struct_mutex);
1504 } else if (bo->pinned_node != NULL) {
1506 mutex_lock(&dev->struct_mutex);
1508 if (bo->pinned_node != bo->mem.mm_node)
1509 drm_mm_put_block(bo->pinned_node);
1511 list_del_init(&bo->pinned_lru);
1512 bo->pinned_node = NULL;
1513 mutex_unlock(&dev->struct_mutex);
1518 * We might need to add a TTM.
1521 if (bo->mem.mem_type == DRM_BO_MEM_LOCAL && bo->ttm == NULL) {
1522 ret = drm_bo_add_ttm(bo);
1526 DRM_FLAG_MASKED(bo->mem.flags, bo->mem.mask, ~DRM_BO_MASK_MEMTYPE);
1529 * Finally, adjust lru to be sure.
1532 mutex_lock(&dev->struct_mutex);
1534 if (move_unfenced) {
1535 list_add_tail(&bo->lru, &bm->unfenced);
1536 DRM_FLAG_MASKED(bo->priv_flags, _DRM_BO_FLAG_UNFENCED,
1537 _DRM_BO_FLAG_UNFENCED);
1539 drm_bo_add_to_lru(bo);
1540 if (bo->priv_flags & _DRM_BO_FLAG_UNFENCED) {
1541 wake_up_all(&bo->event_queue);
1542 DRM_FLAG_MASKED(bo->priv_flags, 0,
1543 _DRM_BO_FLAG_UNFENCED);
1546 mutex_unlock(&dev->struct_mutex);
1551 int drm_bo_do_validate(struct drm_buffer_object *bo,
1552 uint64_t flags, uint64_t mask, uint32_t hint,
1553 uint32_t fence_class,
1555 struct drm_bo_info_rep *rep)
1559 mutex_lock(&bo->mutex);
1560 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1565 DRM_FLAG_MASKED(flags, bo->mem.mask, ~mask);
1566 ret = drm_bo_new_mask(bo, flags, mask);
1570 ret = drm_buffer_object_validate(bo,
1572 !(hint & DRM_BO_HINT_DONT_FENCE),
1576 drm_bo_fill_rep_arg(bo, rep);
1578 mutex_unlock(&bo->mutex);
1581 EXPORT_SYMBOL(drm_bo_do_validate);
1584 int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
1585 uint32_t fence_class,
1586 uint64_t flags, uint64_t mask,
1588 int use_old_fence_class,
1589 struct drm_bo_info_rep *rep,
1590 struct drm_buffer_object **bo_rep)
1592 struct drm_device *dev = file_priv->head->dev;
1593 struct drm_buffer_object *bo;
1595 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1597 mutex_lock(&dev->struct_mutex);
1598 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1599 mutex_unlock(&dev->struct_mutex);
1604 if (use_old_fence_class)
1605 fence_class = bo->fence_class;
1608 * Only allow creator to change shared buffer mask.
1611 if (bo->base.owner != file_priv)
1612 mask &= ~(DRM_BO_FLAG_NO_EVICT | DRM_BO_FLAG_NO_MOVE);
1615 ret = drm_bo_do_validate(bo, flags, mask, hint, fence_class,
1621 drm_bo_usage_deref_unlocked(&bo);
1625 EXPORT_SYMBOL(drm_bo_handle_validate);
1627 static int drm_bo_handle_info(struct drm_file *file_priv, uint32_t handle,
1628 struct drm_bo_info_rep *rep)
1630 struct drm_device *dev = file_priv->head->dev;
1631 struct drm_buffer_object *bo;
1633 mutex_lock(&dev->struct_mutex);
1634 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1635 mutex_unlock(&dev->struct_mutex);
1640 mutex_lock(&bo->mutex);
1641 if (!(bo->priv_flags & _DRM_BO_FLAG_UNFENCED))
1642 (void)drm_bo_busy(bo);
1643 drm_bo_fill_rep_arg(bo, rep);
1644 mutex_unlock(&bo->mutex);
1645 drm_bo_usage_deref_unlocked(&bo);
1649 static int drm_bo_handle_wait(struct drm_file *file_priv, uint32_t handle,
1651 struct drm_bo_info_rep *rep)
1653 struct drm_device *dev = file_priv->head->dev;
1654 struct drm_buffer_object *bo;
1655 int no_wait = hint & DRM_BO_HINT_DONT_BLOCK;
1658 mutex_lock(&dev->struct_mutex);
1659 bo = drm_lookup_buffer_object(file_priv, handle, 1);
1660 mutex_unlock(&dev->struct_mutex);
1665 mutex_lock(&bo->mutex);
1666 ret = drm_bo_wait_unfenced(bo, no_wait, 0);
1669 ret = drm_bo_wait(bo, hint & DRM_BO_HINT_WAIT_LAZY, 0, no_wait);
1673 drm_bo_fill_rep_arg(bo, rep);
1676 mutex_unlock(&bo->mutex);
1677 drm_bo_usage_deref_unlocked(&bo);
1681 static int drm_bo_reserve_size(struct drm_device *dev,
1683 unsigned long num_pages,
1684 unsigned long *size)
1686 struct drm_bo_driver *driver = dev->driver->bo_driver;
1688 *size = drm_size_align(sizeof(struct drm_buffer_object)) +
1689 /* Always account for a TTM, even for fixed memory types */
1690 drm_ttm_size(dev, num_pages, user_bo) +
1691 /* user space mapping structure */
1692 drm_size_align(sizeof(drm_local_map_t)) +
1693 /* file offset space, aperture space, pinned space */
1694 3*drm_size_align(sizeof(struct drm_mm_node *)) +
1696 driver->backend_size(dev, num_pages);
1698 return drm_alloc_memctl(*size);
1701 int drm_buffer_object_create(struct drm_device *dev,
1703 enum drm_bo_type type,
1706 uint32_t page_alignment,
1707 unsigned long buffer_start,
1708 struct drm_buffer_object **buf_obj)
1710 struct drm_buffer_manager *bm = &dev->bm;
1711 struct drm_buffer_object *bo;
1713 unsigned long num_pages;
1714 unsigned long reserved_size;
1716 size += buffer_start & ~PAGE_MASK;
1717 num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1718 if (num_pages == 0) {
1719 DRM_ERROR("Illegal buffer object size.\n");
1723 ret = drm_bo_reserve_size(dev, type == drm_bo_type_user,
1724 num_pages, &reserved_size);
1727 DRM_DEBUG("Failed reserving space for buffer object.\n");
1731 bo = drm_calloc(1, sizeof(*bo), DRM_MEM_BUFOBJ);
1734 drm_bo_unreserve_size(num_pages);
1738 mutex_init(&bo->mutex);
1739 mutex_lock(&bo->mutex);
1741 bo->reserved_size = reserved_size;
1742 atomic_set(&bo->usage, 1);
1743 atomic_set(&bo->mapped, -1);
1744 DRM_INIT_WAITQUEUE(&bo->event_queue);
1745 INIT_LIST_HEAD(&bo->lru);
1746 INIT_LIST_HEAD(&bo->pinned_lru);
1747 INIT_LIST_HEAD(&bo->ddestroy);
1748 #ifdef DRM_ODD_MM_COMPAT
1749 INIT_LIST_HEAD(&bo->p_mm_list);
1750 INIT_LIST_HEAD(&bo->vma_list);
1754 bo->num_pages = num_pages;
1755 bo->mem.mem_type = DRM_BO_MEM_LOCAL;
1756 bo->mem.num_pages = bo->num_pages;
1757 bo->mem.mm_node = NULL;
1758 bo->mem.page_alignment = page_alignment;
1759 bo->buffer_start = buffer_start & PAGE_MASK;
1761 bo->mem.flags = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1762 DRM_BO_FLAG_MAPPABLE;
1763 bo->mem.mask = DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_CACHED |
1764 DRM_BO_FLAG_MAPPABLE;
1765 atomic_inc(&bm->count);
1766 ret = drm_bo_new_mask(bo, mask, mask);
1770 if (bo->type == drm_bo_type_dc) {
1771 mutex_lock(&dev->struct_mutex);
1772 ret = drm_bo_setup_vm_locked(bo);
1773 mutex_unlock(&dev->struct_mutex);
1778 ret = drm_buffer_object_validate(bo, 0, 0, hint & DRM_BO_HINT_DONT_BLOCK);
1782 mutex_unlock(&bo->mutex);
1787 mutex_unlock(&bo->mutex);
1789 drm_bo_usage_deref_unlocked(&bo);
1792 EXPORT_SYMBOL(drm_buffer_object_create);
1795 static int drm_bo_add_user_object(struct drm_file *file_priv,
1796 struct drm_buffer_object *bo, int shareable)
1798 struct drm_device *dev = file_priv->head->dev;
1801 mutex_lock(&dev->struct_mutex);
1802 ret = drm_add_user_object(file_priv, &bo->base, shareable);
1806 bo->base.remove = drm_bo_base_deref_locked;
1807 bo->base.type = drm_buffer_type;
1808 bo->base.ref_struct_locked = NULL;
1809 bo->base.unref = drm_buffer_user_object_unmap;
1812 mutex_unlock(&dev->struct_mutex);
1816 int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1818 struct drm_bo_create_arg *arg = data;
1819 struct drm_bo_create_req *req = &arg->d.req;
1820 struct drm_bo_info_rep *rep = &arg->d.rep;
1821 struct drm_buffer_object *entry;
1822 enum drm_bo_type bo_type;
1825 DRM_DEBUG("drm_bo_create_ioctl: %dkb, %dkb align\n",
1826 (int)(req->size / 1024), req->page_alignment * 4);
1828 if (!dev->bm.initialized) {
1829 DRM_ERROR("Buffer object manager is not initialized.\n");
1833 bo_type = (req->buffer_start) ? drm_bo_type_user : drm_bo_type_dc;
1835 if (bo_type == drm_bo_type_user)
1836 req->mask &= ~DRM_BO_FLAG_SHAREABLE;
1838 ret = drm_buffer_object_create(file_priv->head->dev,
1839 req->size, bo_type, req->mask,
1840 req->hint, req->page_alignment,
1841 req->buffer_start, &entry);
1845 ret = drm_bo_add_user_object(file_priv, entry,
1846 req->mask & DRM_BO_FLAG_SHAREABLE);
1848 drm_bo_usage_deref_unlocked(&entry);
1852 mutex_lock(&entry->mutex);
1853 drm_bo_fill_rep_arg(entry, rep);
1854 mutex_unlock(&entry->mutex);
1860 int drm_bo_setstatus_ioctl(struct drm_device *dev,
1861 void *data, struct drm_file *file_priv)
1863 struct drm_bo_map_wait_idle_arg *arg = data;
1864 struct drm_bo_info_req *req = &arg->d.req;
1865 struct drm_bo_info_rep *rep = &arg->d.rep;
1868 if (!dev->bm.initialized) {
1869 DRM_ERROR("Buffer object manager is not initialized.\n");
1873 ret = drm_bo_read_lock(&dev->bm.bm_lock, 1);
1877 ret = drm_bo_handle_validate(file_priv, req->handle, req->fence_class,
1880 req->hint | DRM_BO_HINT_DONT_FENCE,
1884 (void) drm_bo_read_unlock(&dev->bm.bm_lock);
1891 int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1893 struct drm_bo_map_wait_idle_arg *arg = data;
1894 struct drm_bo_info_req *req = &arg->d.req;
1895 struct drm_bo_info_rep *rep = &arg->d.rep;
1897 if (!dev->bm.initialized) {
1898 DRM_ERROR("Buffer object manager is not initialized.\n");
1902 ret = drm_buffer_object_map(file_priv, req->handle, req->mask,
1910 int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1912 struct drm_bo_handle_arg *arg = data;
1914 if (!dev->bm.initialized) {
1915 DRM_ERROR("Buffer object manager is not initialized.\n");
1919 ret = drm_buffer_object_unmap(file_priv, arg->handle);
1924 int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1926 struct drm_bo_reference_info_arg *arg = data;
1927 struct drm_bo_handle_arg *req = &arg->d.req;
1928 struct drm_bo_info_rep *rep = &arg->d.rep;
1929 struct drm_user_object *uo;
1932 if (!dev->bm.initialized) {
1933 DRM_ERROR("Buffer object manager is not initialized.\n");
1937 ret = drm_user_object_ref(file_priv, req->handle,
1938 drm_buffer_type, &uo);
1942 ret = drm_bo_handle_info(file_priv, req->handle, rep);
1949 int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1951 struct drm_bo_handle_arg *arg = data;
1954 if (!dev->bm.initialized) {
1955 DRM_ERROR("Buffer object manager is not initialized.\n");
1959 ret = drm_user_object_unref(file_priv, arg->handle, drm_buffer_type);
1963 int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1965 struct drm_bo_reference_info_arg *arg = data;
1966 struct drm_bo_handle_arg *req = &arg->d.req;
1967 struct drm_bo_info_rep *rep = &arg->d.rep;
1970 if (!dev->bm.initialized) {
1971 DRM_ERROR("Buffer object manager is not initialized.\n");
1975 ret = drm_bo_handle_info(file_priv, req->handle, rep);
1982 int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
1984 struct drm_bo_map_wait_idle_arg *arg = data;
1985 struct drm_bo_info_req *req = &arg->d.req;
1986 struct drm_bo_info_rep *rep = &arg->d.rep;
1988 if (!dev->bm.initialized) {
1989 DRM_ERROR("Buffer object manager is not initialized.\n");
1993 ret = drm_bo_handle_wait(file_priv, req->handle,
2001 static int drm_bo_leave_list(struct drm_buffer_object *bo,
2006 struct drm_device *dev = bo->dev;
2009 mutex_lock(&bo->mutex);
2011 ret = drm_bo_expire_fence(bo, allow_errors);
2016 DRM_FLAG_MASKED(bo->mem.flags, 0, DRM_BO_FLAG_NO_MOVE);
2017 mutex_lock(&dev->struct_mutex);
2018 list_del_init(&bo->pinned_lru);
2019 if (bo->pinned_node == bo->mem.mm_node)
2020 bo->pinned_node = NULL;
2021 if (bo->pinned_node != NULL) {
2022 drm_mm_put_block(bo->pinned_node);
2023 bo->pinned_node = NULL;
2025 mutex_unlock(&dev->struct_mutex);
2028 if (bo->mem.flags & DRM_BO_FLAG_NO_EVICT) {
2029 DRM_ERROR("A DRM_BO_NO_EVICT buffer present at "
2030 "cleanup. Removing flag and evicting.\n");
2031 bo->mem.flags &= ~DRM_BO_FLAG_NO_EVICT;
2032 bo->mem.mask &= ~DRM_BO_FLAG_NO_EVICT;
2035 if (bo->mem.mem_type == mem_type)
2036 ret = drm_bo_evict(bo, mem_type, 0);
2043 DRM_ERROR("Cleanup eviction failed\n");
2048 mutex_unlock(&bo->mutex);
2053 static struct drm_buffer_object *drm_bo_entry(struct list_head *list,
2057 return list_entry(list, struct drm_buffer_object, pinned_lru);
2059 return list_entry(list, struct drm_buffer_object, lru);
2063 * dev->struct_mutex locked.
2066 static int drm_bo_force_list_clean(struct drm_device *dev,
2067 struct list_head *head,
2073 struct list_head *list, *next, *prev;
2074 struct drm_buffer_object *entry, *nentry;
2079 * The list traversal is a bit odd here, because an item may
2080 * disappear from the list when we release the struct_mutex or
2081 * when we decrease the usage count. Also we're not guaranteed
2082 * to drain pinned lists, so we can't always restart.
2087 list_for_each_safe(list, next, head) {
2090 entry = (nentry != NULL) ? nentry: drm_bo_entry(list, pinned_list);
2091 atomic_inc(&entry->usage);
2093 atomic_dec(&nentry->usage);
2098 * Protect the next item from destruction, so we can check
2099 * its list pointers later on.
2103 nentry = drm_bo_entry(next, pinned_list);
2104 atomic_inc(&nentry->usage);
2106 mutex_unlock(&dev->struct_mutex);
2108 ret = drm_bo_leave_list(entry, mem_type, free_pinned,
2110 mutex_lock(&dev->struct_mutex);
2112 drm_bo_usage_deref_locked(&entry);
2117 * Has the next item disappeared from the list?
2120 do_restart = ((next->prev != list) && (next->prev != prev));
2122 if (nentry != NULL && do_restart)
2123 drm_bo_usage_deref_locked(&nentry);
2131 int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type)
2133 struct drm_buffer_manager *bm = &dev->bm;
2134 struct drm_mem_type_manager *man = &bm->man[mem_type];
2137 if (mem_type >= DRM_BO_MEM_TYPES) {
2138 DRM_ERROR("Illegal memory type %d\n", mem_type);
2142 if (!man->has_type) {
2143 DRM_ERROR("Trying to take down uninitialized "
2144 "memory manager type %u\n", mem_type);
2152 BUG_ON(!list_empty(&bm->unfenced));
2153 drm_bo_force_list_clean(dev, &man->lru, mem_type, 1, 0, 0);
2154 drm_bo_force_list_clean(dev, &man->pinned, mem_type, 1, 0, 1);
2156 if (drm_mm_clean(&man->manager)) {
2157 drm_mm_takedown(&man->manager);
2165 EXPORT_SYMBOL(drm_bo_clean_mm);
2168 *Evict all buffers of a particular mem_type, but leave memory manager
2169 *regions for NO_MOVE buffers intact. New buffers cannot be added at this
2170 *point since we have the hardware lock.
2173 static int drm_bo_lock_mm(struct drm_device *dev, unsigned mem_type)
2176 struct drm_buffer_manager *bm = &dev->bm;
2177 struct drm_mem_type_manager *man = &bm->man[mem_type];
2179 if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) {
2180 DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type);
2184 if (!man->has_type) {
2185 DRM_ERROR("Memory type %u has not been initialized.\n",
2190 ret = drm_bo_force_list_clean(dev, &man->lru, mem_type, 0, 1, 0);
2193 ret = drm_bo_force_list_clean(dev, &man->pinned, mem_type, 0, 1, 1);
2198 int drm_bo_init_mm(struct drm_device *dev,
2200 unsigned long p_offset, unsigned long p_size)
2202 struct drm_buffer_manager *bm = &dev->bm;
2204 struct drm_mem_type_manager *man;
2206 if (type >= DRM_BO_MEM_TYPES) {
2207 DRM_ERROR("Illegal memory type %d\n", type);
2211 man = &bm->man[type];
2212 if (man->has_type) {
2213 DRM_ERROR("Memory manager already initialized for type %d\n",
2218 ret = dev->driver->bo_driver->init_mem_type(dev, type, man);
2223 if (type != DRM_BO_MEM_LOCAL) {
2225 DRM_ERROR("Zero size memory manager type %d\n", type);
2228 ret = drm_mm_init(&man->manager, p_offset, p_size);
2235 INIT_LIST_HEAD(&man->lru);
2236 INIT_LIST_HEAD(&man->pinned);
2240 EXPORT_SYMBOL(drm_bo_init_mm);
2243 * This function is intended to be called on drm driver unload.
2244 * If you decide to call it from lastclose, you must protect the call
2245 * from a potentially racing drm_bo_driver_init in firstopen.
2246 * (This may happen on X server restart).
2249 int drm_bo_driver_finish(struct drm_device *dev)
2251 struct drm_buffer_manager *bm = &dev->bm;
2253 unsigned i = DRM_BO_MEM_TYPES;
2254 struct drm_mem_type_manager *man;
2256 mutex_lock(&dev->struct_mutex);
2258 if (!bm->initialized)
2260 bm->initialized = 0;
2264 if (man->has_type) {
2266 if ((i != DRM_BO_MEM_LOCAL) && drm_bo_clean_mm(dev, i)) {
2268 DRM_ERROR("DRM memory manager type %d "
2269 "is not clean.\n", i);
2274 mutex_unlock(&dev->struct_mutex);
2276 if (!cancel_delayed_work(&bm->wq))
2277 flush_scheduled_work();
2279 mutex_lock(&dev->struct_mutex);
2280 drm_bo_delayed_delete(dev, 1);
2281 if (list_empty(&bm->ddestroy))
2282 DRM_DEBUG("Delayed destroy list was clean\n");
2284 if (list_empty(&bm->man[0].lru))
2285 DRM_DEBUG("Swap list was clean\n");
2287 if (list_empty(&bm->man[0].pinned))
2288 DRM_DEBUG("NO_MOVE list was clean\n");
2290 if (list_empty(&bm->unfenced))
2291 DRM_DEBUG("Unfenced list was clean\n");
2293 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2294 ClearPageReserved(bm->dummy_read_page);
2296 __free_page(bm->dummy_read_page);
2299 mutex_unlock(&dev->struct_mutex);
2302 EXPORT_SYMBOL(drm_bo_driver_finish);
2305 * This function is intended to be called on drm driver load.
2306 * If you decide to call it from firstopen, you must protect the call
2307 * from a potentially racing drm_bo_driver_finish in lastclose.
2308 * (This may happen on X server restart).
2311 int drm_bo_driver_init(struct drm_device *dev)
2313 struct drm_bo_driver *driver = dev->driver->bo_driver;
2314 struct drm_buffer_manager *bm = &dev->bm;
2317 bm->dummy_read_page = NULL;
2318 drm_bo_init_lock(&bm->bm_lock);
2319 mutex_lock(&dev->struct_mutex);
2323 bm->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
2324 if (!bm->dummy_read_page) {
2329 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
2330 SetPageReserved(bm->dummy_read_page);
2334 * Initialize the system memory buffer type.
2335 * Other types need to be driver / IOCTL initialized.
2337 ret = drm_bo_init_mm(dev, DRM_BO_MEM_LOCAL, 0, 0);
2341 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
2342 INIT_WORK(&bm->wq, &drm_bo_delayed_workqueue, dev);
2344 INIT_DELAYED_WORK(&bm->wq, drm_bo_delayed_workqueue);
2346 bm->initialized = 1;
2348 atomic_set(&bm->count, 0);
2350 INIT_LIST_HEAD(&bm->unfenced);
2351 INIT_LIST_HEAD(&bm->ddestroy);
2353 mutex_unlock(&dev->struct_mutex);
2356 EXPORT_SYMBOL(drm_bo_driver_init);
2358 int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2360 struct drm_mm_init_arg *arg = data;
2361 struct drm_buffer_manager *bm = &dev->bm;
2362 struct drm_bo_driver *driver = dev->driver->bo_driver;
2366 DRM_ERROR("Buffer objects are not supported by this driver\n");
2370 ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
2375 if (arg->magic != DRM_BO_INIT_MAGIC) {
2376 DRM_ERROR("You are using an old libdrm that is not compatible with\n"
2377 "\tthe kernel DRM module. Please upgrade your libdrm.\n");
2380 if (arg->major != DRM_BO_INIT_MAJOR) {
2381 DRM_ERROR("libdrm and kernel DRM buffer object interface major\n"
2382 "\tversion don't match. Got %d, expected %d.\n",
2383 arg->major, DRM_BO_INIT_MAJOR);
2387 mutex_lock(&dev->struct_mutex);
2388 if (!bm->initialized) {
2389 DRM_ERROR("DRM memory manager was not initialized.\n");
2392 if (arg->mem_type == 0) {
2393 DRM_ERROR("System memory buffers already initialized.\n");
2396 ret = drm_bo_init_mm(dev, arg->mem_type,
2397 arg->p_offset, arg->p_size);
2400 mutex_unlock(&dev->struct_mutex);
2401 (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2409 int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2411 struct drm_mm_type_arg *arg = data;
2412 struct drm_buffer_manager *bm = &dev->bm;
2413 struct drm_bo_driver *driver = dev->driver->bo_driver;
2417 DRM_ERROR("Buffer objects are not supported by this driver\n");
2421 ret = drm_bo_write_lock(&bm->bm_lock, 1, file_priv);
2425 mutex_lock(&dev->struct_mutex);
2427 if (!bm->initialized) {
2428 DRM_ERROR("DRM memory manager was not initialized\n");
2431 if (arg->mem_type == 0) {
2432 DRM_ERROR("No takedown for System memory buffers.\n");
2436 if (drm_bo_clean_mm(dev, arg->mem_type)) {
2437 DRM_ERROR("Memory manager type %d not clean. "
2438 "Delaying takedown\n", arg->mem_type);
2441 mutex_unlock(&dev->struct_mutex);
2442 (void) drm_bo_write_unlock(&bm->bm_lock, file_priv);
2450 int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv)
2452 struct drm_mm_type_arg *arg = data;
2453 struct drm_bo_driver *driver = dev->driver->bo_driver;
2457 DRM_ERROR("Buffer objects are not supported by this driver\n");
2461 if (arg->lock_flags & DRM_BO_LOCK_IGNORE_NO_EVICT) {
2462 DRM_ERROR("Lock flag DRM_BO_LOCK_IGNORE_NO_EVICT not supported yet.\n");
2466 if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2467 ret = drm_bo_write_lock(&dev->bm.bm_lock, 1, file_priv);
2472 mutex_lock(&dev->struct_mutex);
2473 ret = drm_bo_lock_mm(dev, arg->mem_type);
2474 mutex_unlock(&dev->struct_mutex);
2476 (void) drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2483 int drm_mm_unlock_ioctl(struct drm_device *dev,
2485 struct drm_file *file_priv)
2487 struct drm_mm_type_arg *arg = data;
2488 struct drm_bo_driver *driver = dev->driver->bo_driver;
2492 DRM_ERROR("Buffer objects are not supported by this driver\n");
2496 if (arg->lock_flags & DRM_BO_LOCK_UNLOCK_BM) {
2497 ret = drm_bo_write_unlock(&dev->bm.bm_lock, file_priv);
2506 * buffer object vm functions.
2509 int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem)
2511 struct drm_buffer_manager *bm = &dev->bm;
2512 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2514 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) {
2515 if (mem->mem_type == DRM_BO_MEM_LOCAL)
2518 if (man->flags & _DRM_FLAG_MEMTYPE_CMA)
2521 if (mem->flags & DRM_BO_FLAG_CACHED)
2526 EXPORT_SYMBOL(drm_mem_reg_is_pci);
2529 * \c Get the PCI offset for the buffer object memory.
2531 * \param bo The buffer object.
2532 * \param bus_base On return the base of the PCI region
2533 * \param bus_offset On return the byte offset into the PCI region
2534 * \param bus_size On return the byte size of the buffer object or zero if
2535 * the buffer object memory is not accessible through a PCI region.
2536 * \return Failure indication.
2538 * Returns -EINVAL if the buffer object is currently not mappable.
2539 * Otherwise returns zero.
2542 int drm_bo_pci_offset(struct drm_device *dev,
2543 struct drm_bo_mem_reg *mem,
2544 unsigned long *bus_base,
2545 unsigned long *bus_offset, unsigned long *bus_size)
2547 struct drm_buffer_manager *bm = &dev->bm;
2548 struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
2551 if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE))
2554 if (drm_mem_reg_is_pci(dev, mem)) {
2555 *bus_offset = mem->mm_node->start << PAGE_SHIFT;
2556 *bus_size = mem->num_pages << PAGE_SHIFT;
2557 *bus_base = man->io_offset;
2564 * \c Kill all user-space virtual mappings of this buffer object.
2566 * \param bo The buffer object.
2568 * Call bo->mutex locked.
2571 void drm_bo_unmap_virtual(struct drm_buffer_object *bo)
2573 struct drm_device *dev = bo->dev;
2574 loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT;
2575 loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
2577 if (!dev->dev_mapping)
2580 unmap_mapping_range(dev->dev_mapping, offset, holelen, 1);
2583 static void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo)
2585 struct drm_map_list *list;
2586 drm_local_map_t *map;
2587 struct drm_device *dev = bo->dev;
2589 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2590 if (bo->type != drm_bo_type_dc)
2593 list = &bo->map_list;
2594 if (list->user_token) {
2595 drm_ht_remove_item(&dev->map_hash, &list->hash);
2596 list->user_token = 0;
2598 if (list->file_offset_node) {
2599 drm_mm_put_block(list->file_offset_node);
2600 list->file_offset_node = NULL;
2607 drm_free(map, sizeof(*map), DRM_MEM_BUFOBJ);
2609 list->user_token = 0ULL;
2610 drm_bo_usage_deref_locked(&bo);
2613 static int drm_bo_setup_vm_locked(struct drm_buffer_object *bo)
2615 struct drm_map_list *list = &bo->map_list;
2616 drm_local_map_t *map;
2617 struct drm_device *dev = bo->dev;
2619 DRM_ASSERT_LOCKED(&dev->struct_mutex);
2620 list->map = drm_calloc(1, sizeof(*map), DRM_MEM_BUFOBJ);
2626 map->type = _DRM_TTM;
2627 map->flags = _DRM_REMOVABLE;
2628 map->size = bo->mem.num_pages * PAGE_SIZE;
2629 atomic_inc(&bo->usage);
2630 map->handle = (void *)bo;
2632 list->file_offset_node = drm_mm_search_free(&dev->offset_manager,
2633 bo->mem.num_pages, 0, 0);
2635 if (!list->file_offset_node) {
2636 drm_bo_takedown_vm_locked(bo);
2640 list->file_offset_node = drm_mm_get_block(list->file_offset_node,
2641 bo->mem.num_pages, 0);
2642 if (!list->file_offset_node) {
2643 drm_bo_takedown_vm_locked(bo);
2647 list->hash.key = list->file_offset_node->start;
2648 if (drm_ht_insert_item(&dev->map_hash, &list->hash)) {
2649 drm_bo_takedown_vm_locked(bo);
2653 list->user_token = ((uint64_t) list->hash.key) << PAGE_SHIFT;
2658 int drm_bo_version_ioctl(struct drm_device *dev, void *data,
2659 struct drm_file *file_priv)
2661 struct drm_bo_version_arg *arg = (struct drm_bo_version_arg *)data;
2663 arg->major = DRM_BO_INIT_MAJOR;
2664 arg->minor = DRM_BO_INIT_MINOR;
2665 arg->patchlevel = DRM_BO_INIT_PATCH;