1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
31 #ifndef _DRM_OBJECTS_H
32 #define _DRM_OBJECTS_H
35 struct drm_bo_mem_reg;
37 /***************************************************
38 * User space objects. (drm_object.c)
41 #define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
43 enum drm_object_type {
48 * Add other user space object types here.
50 drm_driver_type0 = 256,
58 * A user object is a structure that helps the drm give out user handles
59 * to kernel internal objects and to keep track of these objects so that
60 * they can be destroyed, for example when the user space process exits.
61 * Designed to be accessible using a user space 32-bit handle.
64 struct drm_user_object {
65 struct drm_hash_item hash;
66 struct list_head list;
67 enum drm_object_type type;
70 struct drm_file *owner;
71 void (*ref_struct_locked) (struct drm_file *priv,
72 struct drm_user_object *obj,
73 enum drm_ref_type ref_action);
74 void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
75 enum drm_ref_type unref_action);
76 void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
80 * A ref object is a structure which is used to
81 * keep track of references to user objects and to keep track of these
82 * references so that they can be destroyed for example when the user space
83 * process exits. Designed to be accessible using a pointer to the _user_ object.
86 struct drm_ref_object {
87 struct drm_hash_item hash;
88 struct list_head list;
90 enum drm_ref_type unref_action;
94 * Must be called with the struct_mutex held.
97 extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
100 * Must be called with the struct_mutex held.
103 extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
107 * Must be called with the struct_mutex held. May temporarily release it.
110 extern int drm_add_ref_object(struct drm_file *priv,
111 struct drm_user_object *referenced_object,
112 enum drm_ref_type ref_action);
115 * Must be called with the struct_mutex held.
118 struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
119 struct drm_user_object *referenced_object,
120 enum drm_ref_type ref_action);
122 * Must be called with the struct_mutex held.
123 * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
124 * release the struct_mutex before calling drm_remove_ref_object.
125 * This function may temporarily release the struct_mutex.
128 extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
129 extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
130 enum drm_object_type type,
131 struct drm_user_object **object);
132 extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
133 enum drm_object_type type);
135 /***************************************************
136 * Fence objects. (drm_fence.c)
139 struct drm_fence_object {
140 struct drm_user_object base;
141 struct drm_device *dev;
145 * The below three fields are protected by the fence manager spinlock.
148 struct list_head ring;
150 uint32_t native_types;
152 uint32_t signaled_types;
154 uint32_t waiting_types;
158 #define _DRM_FENCE_CLASSES 8
160 struct drm_fence_class_manager {
161 struct list_head ring;
162 uint32_t pending_flush;
163 uint32_t waiting_types;
164 wait_queue_head_t fence_queue;
165 uint32_t highest_waiting_sequence;
166 uint32_t latest_queued_sequence;
169 struct drm_fence_manager {
172 struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
173 uint32_t num_classes;
177 struct drm_fence_driver {
178 unsigned long *waiting_jiffies;
179 uint32_t num_classes;
182 uint32_t sequence_mask;
185 * Driver implemented functions:
186 * has_irq() : 1 if the hardware can update the indicated type_flags using an
187 * irq handler. 0 if polling is required.
189 * emit() : Emit a sequence number to the command stream.
190 * Return the sequence number.
192 * flush() : Make sure the flags indicated in fc->pending_flush will eventually
193 * signal for fc->highest_received_sequence and all preceding sequences.
194 * Acknowledge by clearing the flags fc->pending_flush.
196 * poll() : Call drm_fence_handler with any new information.
198 * needed_flush() : Given the current state of the fence->type flags and previusly
199 * executed or queued flushes, return the type_flags that need flushing.
201 * wait(): Wait for the "mask" flags to signal on a given fence, performing
202 * whatever's necessary to make this happen.
205 int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
207 int (*emit) (struct drm_device *dev, uint32_t fence_class,
208 uint32_t flags, uint32_t *breadcrumb,
209 uint32_t *native_type);
210 void (*flush) (struct drm_device *dev, uint32_t fence_class);
211 void (*poll) (struct drm_device *dev, uint32_t fence_class,
213 uint32_t (*needed_flush) (struct drm_fence_object *fence);
214 int (*wait) (struct drm_fence_object *fence, int lazy,
215 int interruptible, uint32_t mask);
218 extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
219 int interruptible, uint32_t mask,
220 unsigned long end_jiffies);
221 extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
222 uint32_t sequence, uint32_t type,
224 extern void drm_fence_manager_init(struct drm_device *dev);
225 extern void drm_fence_manager_takedown(struct drm_device *dev);
226 extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
228 extern int drm_fence_object_flush(struct drm_fence_object *fence,
230 extern int drm_fence_object_signaled(struct drm_fence_object *fence,
232 extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
233 extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
234 extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
235 extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
236 struct drm_fence_object *src);
237 extern int drm_fence_object_wait(struct drm_fence_object *fence,
238 int lazy, int ignore_signals, uint32_t mask);
239 extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
240 uint32_t fence_flags, uint32_t fence_class,
241 struct drm_fence_object **c_fence);
242 extern int drm_fence_object_emit(struct drm_fence_object *fence,
243 uint32_t fence_flags, uint32_t class,
245 extern void drm_fence_fill_arg(struct drm_fence_object *fence,
246 struct drm_fence_arg *arg);
248 extern int drm_fence_add_user_object(struct drm_file *priv,
249 struct drm_fence_object *fence,
252 extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
253 struct drm_file *file_priv);
254 extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
255 struct drm_file *file_priv);
256 extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
257 struct drm_file *file_priv);
258 extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
259 struct drm_file *file_priv);
260 extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
261 struct drm_file *file_priv);
262 extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
263 struct drm_file *file_priv);
264 extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
265 struct drm_file *file_priv);
266 extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
267 struct drm_file *file_priv);
268 extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
269 struct drm_file *file_priv);
270 /**************************************************
275 * The ttm backend GTT interface. (In our case AGP).
276 * Any similar type of device (PCIE?)
277 * needs only to implement these functions to be usable with the TTM interface.
278 * The AGP backend implementation lives in drm_agpsupport.c
279 * basically maps these calls to available functions in agpgart.
280 * Each drm device driver gets an
281 * additional function pointer that creates these types,
282 * so that the device can choose the correct aperture.
283 * (Multiple AGP apertures, etc.)
284 * Most device drivers will let this point to the standard AGP implementation.
287 #define DRM_BE_FLAG_NEEDS_FREE 0x00000001
288 #define DRM_BE_FLAG_BOUND_CACHED 0x00000002
290 struct drm_ttm_backend;
291 struct drm_ttm_backend_func {
292 int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
293 int (*populate) (struct drm_ttm_backend *backend,
294 unsigned long num_pages, struct page **pages,
295 struct page *dummy_read_page);
296 void (*clear) (struct drm_ttm_backend *backend);
297 int (*bind) (struct drm_ttm_backend *backend,
298 struct drm_bo_mem_reg *bo_mem);
299 int (*unbind) (struct drm_ttm_backend *backend);
300 void (*destroy) (struct drm_ttm_backend *backend);
304 * This structure associates a set of flags and methods with a drm_ttm
305 * object, and will also be subclassed by the particular backend.
307 * \sa #drm_agp_ttm_backend
309 struct drm_ttm_backend {
310 struct drm_device *dev;
312 struct drm_ttm_backend_func *func;
316 struct page *dummy_read_page;
318 long first_himem_page;
319 long last_lomem_page;
321 unsigned long num_pages;
323 struct drm_device *dev;
325 uint32_t mapping_offset;
326 struct drm_ttm_backend *be;
327 unsigned long highest_lomem_entry;
328 unsigned long lowest_himem_entry;
338 extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
340 struct page *dummy_read_page);
341 extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
342 extern void drm_ttm_unbind(struct drm_ttm *ttm);
343 extern void drm_ttm_evict(struct drm_ttm *ttm);
344 extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
345 extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
346 extern void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages);
347 extern int drm_ttm_populate(struct drm_ttm *ttm);
348 extern int drm_ttm_set_user(struct drm_ttm *ttm,
349 struct task_struct *tsk,
351 unsigned long num_pages);
354 * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
355 * this which calls this function iff there are no vmas referencing it anymore.
356 * Otherwise it is called when the last vma exits.
359 extern int drm_ttm_destroy(struct drm_ttm *ttm);
361 #define DRM_FLAG_MASKED(_old, _new, _mask) {\
362 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
365 #define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
366 #define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
373 * This ttm should not be cached by the CPU
375 #define DRM_TTM_PAGE_UNCACHED (1 << 0)
377 * This flat is not used at this time; I don't know what the
380 #define DRM_TTM_PAGE_USED (1 << 1)
382 * This flat is not used at this time; I don't know what the
385 #define DRM_TTM_PAGE_BOUND (1 << 2)
387 * This flat is not used at this time; I don't know what the
390 #define DRM_TTM_PAGE_PRESENT (1 << 3)
392 * The array of page pointers was allocated with vmalloc
393 * instead of drm_calloc.
395 #define DRM_TTM_PAGEDIR_VMALLOC (1 << 4)
397 * This ttm is mapped from user space
399 #define DRM_TTM_PAGE_USER (1 << 5)
401 * This ttm will be written to by the GPU
403 #define DRM_TTM_PAGE_WRITE (1 << 6)
405 * This ttm was mapped to the GPU, and so the contents may have
408 #define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
410 * This flag is not used at this time; I don't know what the
413 #define DRM_TTM_PAGE_USER_DMA (1 << 8)
415 /***************************************************
416 * Buffer objects. (drm_bo.c, drm_bo_move.c)
419 struct drm_bo_mem_reg {
420 struct drm_mm_node *mm_node;
422 unsigned long num_pages;
423 uint32_t page_alignment;
426 * Current buffer status flags, indicating
427 * where the buffer is located and which
428 * access modes are in effect
432 * These are the flags proposed for
433 * a validate operation. If the
434 * validate succeeds, they'll get moved
435 * into the flags field
437 uint64_t proposed_flags;
439 uint32_t desired_tile_stride;
440 uint32_t hw_tile_stride;
445 * drm_bo_type_device are 'normal' drm allocations,
446 * pages are allocated from within the kernel automatically
447 * and the objects can be mmap'd from the drm device. Each
448 * drm_bo_type_device object has a unique name which can be
449 * used by other processes to share access to the underlying
454 * drm_bo_type_user are buffers of pages that already exist
455 * in the process address space. They are more limited than
456 * drm_bo_type_device buffers in that they must always
457 * remain cached (as we assume the user pages are mapped cached),
458 * and they are not sharable to other processes through DRM
459 * (although, regular shared memory should still work fine).
463 * drm_bo_type_kernel are buffers that exist solely for use
464 * within the kernel. The pages cannot be mapped into the
465 * process. One obvious use would be for the ring
466 * buffer where user access would not (ideally) be required.
471 struct drm_buffer_object {
472 struct drm_device *dev;
473 struct drm_user_object base;
476 * If there is a possibility that the usage variable is zero,
477 * then dev->struct_mutext should be locked before incrementing it.
481 unsigned long buffer_start;
482 enum drm_bo_type type;
483 unsigned long offset;
485 struct drm_bo_mem_reg mem;
487 struct list_head lru;
488 struct list_head ddestroy;
491 uint32_t fence_class;
492 uint32_t new_fence_type;
493 uint32_t new_fence_class;
494 struct drm_fence_object *fence;
496 wait_queue_head_t event_queue;
498 unsigned long num_pages;
500 /* For pinned buffers */
501 struct drm_mm_node *pinned_node;
502 uint32_t pinned_mem_type;
503 struct list_head pinned_lru;
507 struct drm_map_list map_list;
508 uint32_t memory_type;
509 unsigned long bus_offset;
513 #ifdef DRM_ODD_MM_COMPAT
514 /* dev->struct_mutex only protected. */
515 struct list_head vma_list;
516 struct list_head p_mm_list;
521 #define _DRM_BO_FLAG_UNFENCED 0x00000001
522 #define _DRM_BO_FLAG_EVICTED 0x00000002
525 * This flag indicates that a flag called with bo->mutex held has
526 * temporarily released the buffer object mutex, (usually to wait for something).
527 * and thus any post-lock validation needs to be rerun.
530 #define _DRM_BO_FLAG_UNLOCKED 0x00000004
532 struct drm_mem_type_manager {
536 struct drm_mm manager;
537 struct list_head lru;
538 struct list_head pinned;
540 uint32_t drm_bus_maptype;
541 unsigned long gpu_offset;
542 unsigned long io_offset;
543 unsigned long io_size;
545 uint64_t size; /* size of managed area for reporting to userspace */
549 struct drm_user_object base;
550 wait_queue_head_t queue;
551 atomic_t write_lock_pending;
555 #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
556 #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
557 #define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
558 #define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
559 before kernel access. */
560 #define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
561 #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
563 struct drm_buffer_manager {
564 struct drm_bo_lock bm_lock;
565 struct mutex evict_mutex;
568 struct drm_file *last_to_validate;
569 struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
570 struct list_head unfenced;
571 struct list_head ddestroy;
572 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
573 struct work_struct wq;
575 struct delayed_work wq;
578 unsigned long cur_pages;
580 struct page *dummy_read_page;
583 struct drm_bo_driver {
584 const uint32_t *mem_type_prio;
585 const uint32_t *mem_busy_prio;
586 uint32_t num_mem_type_prio;
587 uint32_t num_mem_busy_prio;
588 struct drm_ttm_backend *(*create_ttm_backend_entry)
589 (struct drm_device *dev);
590 int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
592 int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
593 int (*init_mem_type) (struct drm_device *dev, uint32_t type,
594 struct drm_mem_type_manager *man);
598 * @bo: the buffer object to be evicted
600 * Return the bo flags for a buffer which is not mapped to the hardware.
601 * These will be placed in proposed_flags so that when the move is
602 * finished, they'll end up in bo->mem.flags
604 uint64_t(*evict_flags) (struct drm_buffer_object *bo);
608 * @bo: the buffer to move
610 * @evict: whether this motion is evicting the buffer from
611 * the graphics address space
613 * @no_wait: whether this should give up and return -EBUSY
614 * if this move would require sleeping
616 * @new_mem: the new memory region receiving the buffer
618 * Move a buffer between two memory regions.
620 int (*move) (struct drm_buffer_object *bo,
621 int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
625 void (*ttm_cache_flush)(struct drm_ttm *ttm);
628 * command_stream_barrier
630 * @dev: The drm device.
632 * @bo: The buffer object to validate.
634 * @new_fence_class: The new fence class for the buffer object.
636 * @new_fence_type: The new fence type for the buffer object.
638 * @no_wait: whether this should give up and return -EBUSY
639 * if this operation would require sleeping
641 * Insert a command stream barrier that makes sure that the
642 * buffer is idle once the commands associated with the
643 * current validation are starting to execute. If an error
644 * condition is returned, or the function pointer is NULL,
645 * the drm core will force buffer idle
649 int (*command_stream_barrier) (struct drm_buffer_object *bo,
650 uint32_t new_fence_class,
651 uint32_t new_fence_type,
656 * buffer objects (drm_bo.c)
658 extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
659 extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
660 extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
661 extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
662 extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
663 extern int drm_bo_set_pin(struct drm_device *dev, struct drm_buffer_object *bo, int pin);
664 extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
665 extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
666 extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
667 extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
668 extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
669 extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
670 extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
671 extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
672 extern int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
673 extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
674 extern int drm_bo_driver_finish(struct drm_device *dev);
675 extern int drm_bo_driver_init(struct drm_device *dev);
676 extern int drm_bo_pci_offset(struct drm_device *dev,
677 struct drm_bo_mem_reg *mem,
678 unsigned long *bus_base,
679 unsigned long *bus_offset,
680 unsigned long *bus_size);
681 extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
683 extern int drm_bo_add_user_object(struct drm_file *file_priv,
684 struct drm_buffer_object *bo, int shareable);
685 extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
686 extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
687 extern void drm_putback_buffer_objects(struct drm_device *dev);
688 extern int drm_fence_buffer_objects(struct drm_device *dev,
689 struct list_head *list,
690 uint32_t fence_flags,
691 struct drm_fence_object *fence,
692 struct drm_fence_object **used_fence);
693 extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
694 extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
695 enum drm_bo_type type, uint64_t flags,
696 uint32_t hint, uint32_t page_alignment,
697 unsigned long buffer_start,
698 struct drm_buffer_object **bo);
699 extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
700 int no_wait, int check_unfenced);
701 extern int drm_bo_mem_space(struct drm_buffer_object *bo,
702 struct drm_bo_mem_reg *mem, int no_wait);
703 extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
704 uint64_t new_mem_flags,
705 int no_wait, int move_unfenced);
706 extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean);
707 extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
708 unsigned long p_offset, unsigned long p_size,
710 extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
711 uint64_t flags, uint64_t mask, uint32_t hint,
712 uint32_t fence_class,
713 struct drm_bo_info_rep *rep,
714 struct drm_buffer_object **bo_rep);
715 extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
718 extern int drm_bo_do_validate(struct drm_buffer_object *bo,
719 uint64_t flags, uint64_t mask, uint32_t hint,
720 uint32_t fence_class,
721 struct drm_bo_info_rep *rep);
722 extern int drm_bo_evict_cached(struct drm_buffer_object *bo);
724 extern void drm_bo_takedown_vm_locked(struct drm_buffer_object *bo);
726 * Buffer object memory move- and map helpers.
730 extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
731 int evict, int no_wait,
732 struct drm_bo_mem_reg *new_mem);
733 extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
735 int no_wait, struct drm_bo_mem_reg *new_mem);
736 extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
737 int evict, int no_wait,
738 uint32_t fence_class, uint32_t fence_type,
739 uint32_t fence_flags,
740 struct drm_bo_mem_reg *new_mem);
741 extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
742 extern unsigned long drm_bo_offset_end(unsigned long offset,
745 struct drm_bo_kmap_obj {
756 static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
758 *is_iomem = (map->bo_kmap_type == bo_map_iomap ||
759 map->bo_kmap_type == bo_map_premapped);
762 extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
763 extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
764 unsigned long num_pages, struct drm_bo_kmap_obj *map);
765 extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
766 unsigned long dst_offset,
769 extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
770 struct drm_bo_info_rep *rep);
778 struct list_head head;
779 struct drm_fence_object *fence;
781 uint32_t new_fence_type;
784 struct drm_reg_manager {
785 struct list_head free;
786 struct list_head lru;
787 struct list_head unfenced;
789 int (*reg_reusable)(const struct drm_reg *reg, const void *data);
790 void (*reg_destroy)(struct drm_reg *reg);
793 extern int drm_regs_alloc(struct drm_reg_manager *manager,
795 uint32_t fence_class,
799 struct drm_reg **reg);
801 extern void drm_regs_fence(struct drm_reg_manager *regs,
802 struct drm_fence_object *fence);
804 extern void drm_regs_free(struct drm_reg_manager *manager);
805 extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
806 extern void drm_regs_init(struct drm_reg_manager *manager,
807 int (*reg_reusable)(const struct drm_reg *,
809 void (*reg_destroy)(struct drm_reg *));
811 extern int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
813 extern void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg * mem,
817 * Simple replacement for the hardware lock on buffer manager init and clean.
821 extern void drm_bo_init_lock(struct drm_bo_lock *lock);
822 extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
823 extern int drm_bo_read_lock(struct drm_bo_lock *lock,
825 extern int drm_bo_write_lock(struct drm_bo_lock *lock,
827 struct drm_file *file_priv);
829 extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
830 struct drm_file *file_priv);
832 #ifdef CONFIG_DEBUG_MUTEXES
833 #define DRM_ASSERT_LOCKED(_mutex) \
834 BUG_ON(!mutex_is_locked(_mutex) || \
835 ((_mutex)->owner != current_thread_info()))
837 #define DRM_ASSERT_LOCKED(_mutex)