1 /**************************************************************************
3 * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **************************************************************************/
28 * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
31 #ifndef _DRM_OBJECTS_H
32 #define _DRM_OBJECTS_H
35 struct drm_bo_mem_reg;
37 /***************************************************
38 * User space objects. (drm_object.c)
41 #define drm_user_object_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
43 enum drm_object_type {
48 * Add other user space object types here.
50 drm_driver_type0 = 256,
58 * A user object is a structure that helps the drm give out user handles
59 * to kernel internal objects and to keep track of these objects so that
60 * they can be destroyed, for example when the user space process exits.
61 * Designed to be accessible using a user space 32-bit handle.
64 struct drm_user_object {
65 struct drm_hash_item hash;
66 struct list_head list;
67 enum drm_object_type type;
70 struct drm_file *owner;
71 void (*ref_struct_locked) (struct drm_file *priv,
72 struct drm_user_object *obj,
73 enum drm_ref_type ref_action);
74 void (*unref) (struct drm_file *priv, struct drm_user_object *obj,
75 enum drm_ref_type unref_action);
76 void (*remove) (struct drm_file *priv, struct drm_user_object *obj);
80 * A ref object is a structure which is used to
81 * keep track of references to user objects and to keep track of these
82 * references so that they can be destroyed for example when the user space
83 * process exits. Designed to be accessible using a pointer to the _user_ object.
86 struct drm_ref_object {
87 struct drm_hash_item hash;
88 struct list_head list;
90 enum drm_ref_type unref_action;
94 * Must be called with the struct_mutex held.
97 extern int drm_add_user_object(struct drm_file *priv, struct drm_user_object *item,
100 * Must be called with the struct_mutex held.
103 extern struct drm_user_object *drm_lookup_user_object(struct drm_file *priv,
107 * Must be called with the struct_mutex held. May temporarily release it.
110 extern int drm_add_ref_object(struct drm_file *priv,
111 struct drm_user_object *referenced_object,
112 enum drm_ref_type ref_action);
115 * Must be called with the struct_mutex held.
118 struct drm_ref_object *drm_lookup_ref_object(struct drm_file *priv,
119 struct drm_user_object *referenced_object,
120 enum drm_ref_type ref_action);
122 * Must be called with the struct_mutex held.
123 * If "item" has been obtained by a call to drm_lookup_ref_object. You may not
124 * release the struct_mutex before calling drm_remove_ref_object.
125 * This function may temporarily release the struct_mutex.
128 extern void drm_remove_ref_object(struct drm_file *priv, struct drm_ref_object *item);
129 extern int drm_user_object_ref(struct drm_file *priv, uint32_t user_token,
130 enum drm_object_type type,
131 struct drm_user_object **object);
132 extern int drm_user_object_unref(struct drm_file *priv, uint32_t user_token,
133 enum drm_object_type type);
135 /***************************************************
136 * Fence objects. (drm_fence.c)
139 struct drm_fence_object {
140 struct drm_user_object base;
141 struct drm_device *dev;
145 * The below three fields are protected by the fence manager spinlock.
148 struct list_head ring;
150 uint32_t native_types;
152 uint32_t signaled_types;
154 uint32_t waiting_types;
158 #define _DRM_FENCE_CLASSES 8
159 #define _DRM_FENCE_TYPE_EXE 0x00
161 struct drm_fence_class_manager {
162 struct list_head ring;
163 uint32_t pending_flush;
164 uint32_t waiting_types;
165 wait_queue_head_t fence_queue;
166 uint32_t highest_waiting_sequence;
167 uint32_t latest_queued_sequence;
170 struct drm_fence_manager {
173 struct drm_fence_class_manager fence_class[_DRM_FENCE_CLASSES];
174 uint32_t num_classes;
178 struct drm_fence_driver {
179 unsigned long *waiting_jiffies;
180 uint32_t num_classes;
183 uint32_t sequence_mask;
186 * Driver implemented functions:
187 * has_irq() : 1 if the hardware can update the indicated type_flags using an
188 * irq handler. 0 if polling is required.
190 * emit() : Emit a sequence number to the command stream.
191 * Return the sequence number.
193 * flush() : Make sure the flags indicated in fc->pending_flush will eventually
194 * signal for fc->highest_received_sequence and all preceding sequences.
195 * Acknowledge by clearing the flags fc->pending_flush.
197 * poll() : Call drm_fence_handler with any new information.
199 * needed_flush() : Given the current state of the fence->type flags and previusly
200 * executed or queued flushes, return the type_flags that need flushing.
202 * wait(): Wait for the "mask" flags to signal on a given fence, performing
203 * whatever's necessary to make this happen.
206 int (*has_irq) (struct drm_device *dev, uint32_t fence_class,
208 int (*emit) (struct drm_device *dev, uint32_t fence_class,
209 uint32_t flags, uint32_t *breadcrumb,
210 uint32_t *native_type);
211 void (*flush) (struct drm_device *dev, uint32_t fence_class);
212 void (*poll) (struct drm_device *dev, uint32_t fence_class,
214 uint32_t (*needed_flush) (struct drm_fence_object *fence);
215 int (*wait) (struct drm_fence_object *fence, int lazy,
216 int interruptible, uint32_t mask);
219 extern int drm_fence_wait_polling(struct drm_fence_object *fence, int lazy,
220 int interruptible, uint32_t mask,
221 unsigned long end_jiffies);
222 extern void drm_fence_handler(struct drm_device *dev, uint32_t fence_class,
223 uint32_t sequence, uint32_t type,
225 extern void drm_fence_manager_init(struct drm_device *dev);
226 extern void drm_fence_manager_takedown(struct drm_device *dev);
227 extern void drm_fence_flush_old(struct drm_device *dev, uint32_t fence_class,
229 extern int drm_fence_object_flush(struct drm_fence_object *fence,
231 extern int drm_fence_object_signaled(struct drm_fence_object *fence,
233 extern void drm_fence_usage_deref_locked(struct drm_fence_object **fence);
234 extern void drm_fence_usage_deref_unlocked(struct drm_fence_object **fence);
235 extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src);
236 extern void drm_fence_reference_unlocked(struct drm_fence_object **dst,
237 struct drm_fence_object *src);
238 extern int drm_fence_object_wait(struct drm_fence_object *fence,
239 int lazy, int ignore_signals, uint32_t mask);
240 extern int drm_fence_object_create(struct drm_device *dev, uint32_t type,
241 uint32_t fence_flags, uint32_t fence_class,
242 struct drm_fence_object **c_fence);
243 extern int drm_fence_object_emit(struct drm_fence_object *fence,
244 uint32_t fence_flags, uint32_t class,
246 extern void drm_fence_fill_arg(struct drm_fence_object *fence,
247 struct drm_fence_arg *arg);
249 extern int drm_fence_add_user_object(struct drm_file *priv,
250 struct drm_fence_object *fence,
253 extern int drm_fence_create_ioctl(struct drm_device *dev, void *data,
254 struct drm_file *file_priv);
255 extern int drm_fence_destroy_ioctl(struct drm_device *dev, void *data,
256 struct drm_file *file_priv);
257 extern int drm_fence_reference_ioctl(struct drm_device *dev, void *data,
258 struct drm_file *file_priv);
259 extern int drm_fence_unreference_ioctl(struct drm_device *dev, void *data,
260 struct drm_file *file_priv);
261 extern int drm_fence_signaled_ioctl(struct drm_device *dev, void *data,
262 struct drm_file *file_priv);
263 extern int drm_fence_flush_ioctl(struct drm_device *dev, void *data,
264 struct drm_file *file_priv);
265 extern int drm_fence_wait_ioctl(struct drm_device *dev, void *data,
266 struct drm_file *file_priv);
267 extern int drm_fence_emit_ioctl(struct drm_device *dev, void *data,
268 struct drm_file *file_priv);
269 extern int drm_fence_buffers_ioctl(struct drm_device *dev, void *data,
270 struct drm_file *file_priv);
271 /**************************************************
276 * The ttm backend GTT interface. (In our case AGP).
277 * Any similar type of device (PCIE?)
278 * needs only to implement these functions to be usable with the TTM interface.
279 * The AGP backend implementation lives in drm_agpsupport.c
280 * basically maps these calls to available functions in agpgart.
281 * Each drm device driver gets an
282 * additional function pointer that creates these types,
283 * so that the device can choose the correct aperture.
284 * (Multiple AGP apertures, etc.)
285 * Most device drivers will let this point to the standard AGP implementation.
288 #define DRM_BE_FLAG_NEEDS_FREE 0x00000001
289 #define DRM_BE_FLAG_BOUND_CACHED 0x00000002
291 struct drm_ttm_backend;
292 struct drm_ttm_backend_func {
293 int (*needs_ub_cache_adjust) (struct drm_ttm_backend *backend);
294 int (*populate) (struct drm_ttm_backend *backend,
295 unsigned long num_pages, struct page **pages,
296 struct page *dummy_read_page);
297 void (*clear) (struct drm_ttm_backend *backend);
298 int (*bind) (struct drm_ttm_backend *backend,
299 struct drm_bo_mem_reg *bo_mem);
300 int (*unbind) (struct drm_ttm_backend *backend);
301 void (*destroy) (struct drm_ttm_backend *backend);
305 * This structure associates a set of flags and methods with a drm_ttm
306 * object, and will also be subclassed by the particular backend.
308 * \sa #drm_agp_ttm_backend
310 struct drm_ttm_backend {
311 struct drm_device *dev;
313 struct drm_ttm_backend_func *func;
317 struct page *dummy_read_page;
319 long first_himem_page;
320 long last_lomem_page;
322 unsigned long num_pages;
324 struct drm_device *dev;
326 uint32_t mapping_offset;
327 struct drm_ttm_backend *be;
328 unsigned long highest_lomem_entry;
329 unsigned long lowest_himem_entry;
339 extern struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
341 struct page *dummy_read_page);
342 extern int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem);
343 extern void drm_ttm_unbind(struct drm_ttm *ttm);
344 extern void drm_ttm_evict(struct drm_ttm *ttm);
345 extern void drm_ttm_fixup_caching(struct drm_ttm *ttm);
346 extern struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index);
347 extern void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages);
348 extern int drm_ttm_populate(struct drm_ttm *ttm);
349 extern int drm_ttm_set_user(struct drm_ttm *ttm,
350 struct task_struct *tsk,
352 unsigned long num_pages);
355 * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do
356 * this which calls this function iff there are no vmas referencing it anymore.
357 * Otherwise it is called when the last vma exits.
360 extern int drm_ttm_destroy(struct drm_ttm *ttm);
362 #define DRM_FLAG_MASKED(_old, _new, _mask) {\
363 (_old) ^= (((_old) ^ (_new)) & (_mask)); \
366 #define DRM_TTM_MASK_FLAGS ((1 << PAGE_SHIFT) - 1)
367 #define DRM_TTM_MASK_PFN (0xFFFFFFFFU - DRM_TTM_MASK_FLAGS)
374 * This ttm should not be cached by the CPU
376 #define DRM_TTM_PAGE_UNCACHED (1 << 0)
378 * This flat is not used at this time; I don't know what the
381 #define DRM_TTM_PAGE_USED (1 << 1)
383 * This flat is not used at this time; I don't know what the
386 #define DRM_TTM_PAGE_BOUND (1 << 2)
388 * This flat is not used at this time; I don't know what the
391 #define DRM_TTM_PAGE_PRESENT (1 << 3)
393 * The array of page pointers was allocated with vmalloc
394 * instead of drm_calloc.
396 #define DRM_TTM_PAGEDIR_VMALLOC (1 << 4)
398 * This ttm is mapped from user space
400 #define DRM_TTM_PAGE_USER (1 << 5)
402 * This ttm will be written to by the GPU
404 #define DRM_TTM_PAGE_WRITE (1 << 6)
406 * This ttm was mapped to the GPU, and so the contents may have
409 #define DRM_TTM_PAGE_USER_DIRTY (1 << 7)
411 * This flag is not used at this time; I don't know what the
414 #define DRM_TTM_PAGE_USER_DMA (1 << 8)
416 /***************************************************
417 * Buffer objects. (drm_bo.c, drm_bo_move.c)
420 struct drm_bo_mem_reg {
421 struct drm_memrange_node *mm_node;
423 unsigned long num_pages;
424 uint32_t page_alignment;
427 * Current buffer status flags, indicating
428 * where the buffer is located and which
429 * access modes are in effect
433 * These are the flags proposed for
434 * a validate operation. If the
435 * validate succeeds, they'll get moved
436 * into the flags field
438 uint64_t proposed_flags;
440 uint32_t desired_tile_stride;
441 uint32_t hw_tile_stride;
446 * drm_bo_type_device are 'normal' drm allocations,
447 * pages are allocated from within the kernel automatically
448 * and the objects can be mmap'd from the drm device. Each
449 * drm_bo_type_device object has a unique name which can be
450 * used by other processes to share access to the underlying
455 * drm_bo_type_user are buffers of pages that already exist
456 * in the process address space. They are more limited than
457 * drm_bo_type_device buffers in that they must always
458 * remain cached (as we assume the user pages are mapped cached),
459 * and they are not sharable to other processes through DRM
460 * (although, regular shared memory should still work fine).
464 * drm_bo_type_kernel are buffers that exist solely for use
465 * within the kernel. The pages cannot be mapped into the
466 * process. One obvious use would be for the ring
467 * buffer where user access would not (ideally) be required.
472 struct drm_buffer_object {
473 struct drm_device *dev;
474 struct drm_user_object base;
477 * If there is a possibility that the usage variable is zero,
478 * then dev->struct_mutext should be locked before incrementing it.
482 unsigned long buffer_start;
483 enum drm_bo_type type;
484 unsigned long offset;
486 struct drm_bo_mem_reg mem;
488 struct list_head lru;
489 struct list_head ddestroy;
492 uint32_t fence_class;
493 uint32_t new_fence_type;
494 uint32_t new_fence_class;
495 struct drm_fence_object *fence;
497 wait_queue_head_t event_queue;
499 unsigned long num_pages;
501 /* For pinned buffers */
502 struct drm_memrange_node *pinned_node;
503 uint32_t pinned_mem_type;
504 struct list_head pinned_lru;
508 struct drm_map_list map_list;
509 uint32_t memory_type;
510 unsigned long bus_offset;
514 #ifdef DRM_ODD_MM_COMPAT
515 /* dev->struct_mutex only protected. */
516 struct list_head vma_list;
517 struct list_head p_mm_list;
522 #define _DRM_BO_FLAG_UNFENCED 0x00000001
523 #define _DRM_BO_FLAG_EVICTED 0x00000002
526 * This flag indicates that a flag called with bo->mutex held has
527 * temporarily released the buffer object mutex, (usually to wait for something).
528 * and thus any post-lock validation needs to be rerun.
531 #define _DRM_BO_FLAG_UNLOCKED 0x00000004
533 struct drm_mem_type_manager {
537 struct drm_memrange manager;
538 struct list_head lru;
539 struct list_head pinned;
541 uint32_t drm_bus_maptype;
542 unsigned long gpu_offset;
543 unsigned long io_offset;
544 unsigned long io_size;
546 uint64_t size; /* size of managed area for reporting to userspace */
550 struct drm_user_object base;
551 wait_queue_head_t queue;
552 atomic_t write_lock_pending;
556 #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */
557 #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */
558 #define _DRM_FLAG_MEMTYPE_CACHED 0x00000004 /* Cached binding */
559 #define _DRM_FLAG_NEEDS_IOREMAP 0x00000008 /* Fixed memory needs ioremap
560 before kernel access. */
561 #define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */
562 #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */
564 struct drm_buffer_manager {
565 struct drm_bo_lock bm_lock;
566 struct mutex evict_mutex;
569 struct drm_file *last_to_validate;
570 struct drm_mem_type_manager man[DRM_BO_MEM_TYPES];
571 struct list_head unfenced;
572 struct list_head ddestroy;
573 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
574 struct work_struct wq;
576 struct delayed_work wq;
579 unsigned long cur_pages;
581 struct page *dummy_read_page;
584 struct drm_bo_driver {
585 const uint32_t *mem_type_prio;
586 const uint32_t *mem_busy_prio;
587 uint32_t num_mem_type_prio;
588 uint32_t num_mem_busy_prio;
589 struct drm_ttm_backend *(*create_ttm_backend_entry)
590 (struct drm_device *dev);
591 int (*fence_type) (struct drm_buffer_object *bo, uint32_t *fclass,
593 int (*invalidate_caches) (struct drm_device *dev, uint64_t flags);
594 int (*init_mem_type) (struct drm_device *dev, uint32_t type,
595 struct drm_mem_type_manager *man);
599 * @bo: the buffer object to be evicted
601 * Return the bo flags for a buffer which is not mapped to the hardware.
602 * These will be placed in proposed_flags so that when the move is
603 * finished, they'll end up in bo->mem.flags
605 uint64_t(*evict_flags) (struct drm_buffer_object *bo);
609 * @bo: the buffer to move
611 * @evict: whether this motion is evicting the buffer from
612 * the graphics address space
614 * @no_wait: whether this should give up and return -EBUSY
615 * if this move would require sleeping
617 * @new_mem: the new memory region receiving the buffer
619 * Move a buffer between two memory regions.
621 int (*move) (struct drm_buffer_object *bo,
622 int evict, int no_wait, struct drm_bo_mem_reg *new_mem);
626 void (*ttm_cache_flush)(struct drm_ttm *ttm);
629 * command_stream_barrier
631 * @dev: The drm device.
633 * @bo: The buffer object to validate.
635 * @new_fence_class: The new fence class for the buffer object.
637 * @new_fence_type: The new fence type for the buffer object.
639 * @no_wait: whether this should give up and return -EBUSY
640 * if this operation would require sleeping
642 * Insert a command stream barrier that makes sure that the
643 * buffer is idle once the commands associated with the
644 * current validation are starting to execute. If an error
645 * condition is returned, or the function pointer is NULL,
646 * the drm core will force buffer idle
650 int (*command_stream_barrier) (struct drm_buffer_object *bo,
651 uint32_t new_fence_class,
652 uint32_t new_fence_type,
657 * buffer objects (drm_bo.c)
660 extern int drm_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
661 extern int drm_bo_destroy_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
662 extern int drm_bo_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
663 extern int drm_bo_unmap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
664 extern int drm_bo_reference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
665 extern int drm_bo_unreference_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
666 extern int drm_bo_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
667 extern int drm_bo_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
668 extern int drm_bo_setstatus_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
669 extern int drm_mm_init_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
670 extern int drm_mm_takedown_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
671 extern int drm_mm_lock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
672 extern int drm_mm_unlock_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
673 extern int drm_mm_info_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
674 extern int drm_bo_version_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv);
675 extern int drm_bo_driver_finish(struct drm_device *dev);
676 extern int drm_bo_driver_init(struct drm_device *dev);
677 extern int drm_bo_pci_offset(struct drm_device *dev,
678 struct drm_bo_mem_reg *mem,
679 unsigned long *bus_base,
680 unsigned long *bus_offset,
681 unsigned long *bus_size);
682 extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg *mem);
684 extern void drm_bo_usage_deref_locked(struct drm_buffer_object **bo);
685 extern void drm_bo_usage_deref_unlocked(struct drm_buffer_object **bo);
686 extern void drm_putback_buffer_objects(struct drm_device *dev);
687 extern int drm_fence_buffer_objects(struct drm_device *dev,
688 struct list_head *list,
689 uint32_t fence_flags,
690 struct drm_fence_object *fence,
691 struct drm_fence_object **used_fence);
692 extern void drm_bo_add_to_lru(struct drm_buffer_object *bo);
693 extern int drm_buffer_object_create(struct drm_device *dev, unsigned long size,
694 enum drm_bo_type type, uint64_t flags,
695 uint32_t hint, uint32_t page_alignment,
696 unsigned long buffer_start,
697 struct drm_buffer_object **bo);
698 extern int drm_bo_wait(struct drm_buffer_object *bo, int lazy, int interruptible,
699 int no_wait, int check_unfenced);
700 extern int drm_bo_mem_space(struct drm_buffer_object *bo,
701 struct drm_bo_mem_reg *mem, int no_wait);
702 extern int drm_bo_move_buffer(struct drm_buffer_object *bo,
703 uint64_t new_mem_flags,
704 int no_wait, int move_unfenced);
705 extern int drm_bo_clean_mm(struct drm_device *dev, unsigned mem_type, int kern_clean);
706 extern int drm_bo_init_mm(struct drm_device *dev, unsigned type,
707 unsigned long p_offset, unsigned long p_size,
709 extern int drm_bo_handle_validate(struct drm_file *file_priv, uint32_t handle,
710 uint64_t flags, uint64_t mask, uint32_t hint,
711 uint32_t fence_class,
712 struct drm_bo_info_rep *rep,
713 struct drm_buffer_object **bo_rep);
714 extern struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file *file_priv,
717 extern int drm_bo_do_validate(struct drm_buffer_object *bo,
718 uint64_t flags, uint64_t mask, uint32_t hint,
719 uint32_t fence_class,
720 struct drm_bo_info_rep *rep);
721 extern int drm_bo_evict_cached(struct drm_buffer_object *bo);
723 * Buffer object memory move- and map helpers.
727 extern int drm_bo_move_ttm(struct drm_buffer_object *bo,
728 int evict, int no_wait,
729 struct drm_bo_mem_reg *new_mem);
730 extern int drm_bo_move_memcpy(struct drm_buffer_object *bo,
732 int no_wait, struct drm_bo_mem_reg *new_mem);
733 extern int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
734 int evict, int no_wait,
735 uint32_t fence_class, uint32_t fence_type,
736 uint32_t fence_flags,
737 struct drm_bo_mem_reg *new_mem);
738 extern int drm_bo_same_page(unsigned long offset, unsigned long offset2);
739 extern unsigned long drm_bo_offset_end(unsigned long offset,
742 struct drm_bo_kmap_obj {
753 static inline void *drm_bmo_virtual(struct drm_bo_kmap_obj *map, int *is_iomem)
755 *is_iomem = (map->bo_kmap_type == bo_map_iomap ||
756 map->bo_kmap_type == bo_map_premapped);
759 extern void drm_bo_kunmap(struct drm_bo_kmap_obj *map);
760 extern int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
761 unsigned long num_pages, struct drm_bo_kmap_obj *map);
762 extern int drm_bo_pfn_prot(struct drm_buffer_object *bo,
763 unsigned long dst_offset,
766 extern void drm_bo_fill_rep_arg(struct drm_buffer_object *bo,
767 struct drm_bo_info_rep *rep);
775 struct list_head head;
776 struct drm_fence_object *fence;
778 uint32_t new_fence_type;
781 struct drm_reg_manager {
782 struct list_head free;
783 struct list_head lru;
784 struct list_head unfenced;
786 int (*reg_reusable)(const struct drm_reg *reg, const void *data);
787 void (*reg_destroy)(struct drm_reg *reg);
790 extern int drm_regs_alloc(struct drm_reg_manager *manager,
792 uint32_t fence_class,
796 struct drm_reg **reg);
798 extern void drm_regs_fence(struct drm_reg_manager *regs,
799 struct drm_fence_object *fence);
801 extern void drm_regs_free(struct drm_reg_manager *manager);
802 extern void drm_regs_add(struct drm_reg_manager *manager, struct drm_reg *reg);
803 extern void drm_regs_init(struct drm_reg_manager *manager,
804 int (*reg_reusable)(const struct drm_reg *,
806 void (*reg_destroy)(struct drm_reg *));
810 * Simple replacement for the hardware lock on buffer manager init and clean.
814 extern void drm_bo_init_lock(struct drm_bo_lock *lock);
815 extern void drm_bo_read_unlock(struct drm_bo_lock *lock);
816 extern int drm_bo_read_lock(struct drm_bo_lock *lock,
818 extern int drm_bo_write_lock(struct drm_bo_lock *lock,
820 struct drm_file *file_priv);
822 extern int drm_bo_write_unlock(struct drm_bo_lock *lock,
823 struct drm_file *file_priv);
825 #ifdef CONFIG_DEBUG_MUTEXES
826 #define DRM_ASSERT_LOCKED(_mutex) \
827 BUG_ON(!mutex_is_locked(_mutex) || \
828 ((_mutex)->owner != current_thread_info()))
830 #define DRM_ASSERT_LOCKED(_mutex)