From be85ad0333b0c28129c2e4635f92780816308aa6 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 16 Jul 2007 13:37:02 +1000 Subject: [PATCH] drm: detypedef ttm/bo/fence code --- linux-core/drmP.h | 8 +- linux-core/drm_agpsupport.c | 12 +-- linux-core/drm_bo.c | 242 ++++++++++++++++++++++---------------------- linux-core/drm_bo_move.c | 58 +++++------ linux-core/drm_compat.c | 18 ++-- linux-core/drm_fence.c | 124 +++++++++++------------ linux-core/drm_objects.h | 126 +++++++++++------------ linux-core/drm_proc.c | 4 +- linux-core/drm_ttm.c | 40 ++++---- linux-core/drm_vm.c | 12 +-- linux-core/i915_buffer.c | 26 ++--- linux-core/i915_drv.c | 4 +- linux-core/i915_fence.c | 10 +- linux-core/via_buffer.c | 8 +- linux-core/via_fence.c | 8 +- shared-core/i915_drv.h | 12 +-- shared-core/via_drv.c | 4 +- shared-core/via_drv.h | 12 +-- 18 files changed, 364 insertions(+), 364 deletions(-) diff --git a/linux-core/drmP.h b/linux-core/drmP.h index 87a194af..142a04a1 100644 --- a/linux-core/drmP.h +++ b/linux-core/drmP.h @@ -806,8 +806,8 @@ struct drm_device { unsigned int agp_buffer_token; struct drm_head primary; /**< primary screen head */ - drm_fence_manager_t fm; - drm_buffer_manager_t bm; + struct drm_fence_manager fm; + struct drm_buffer_manager bm; /** \name Drawable information */ /*@{ */ @@ -818,7 +818,7 @@ struct drm_device { #if __OS_HAS_AGP struct drm_agp_ttm_backend { - drm_ttm_backend_t backend; + struct drm_ttm_backend backend; DRM_AGP_MEM *mem; struct agp_bridge_data *bridge; int populated; @@ -1103,7 +1103,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size extern int drm_agp_free_memory(DRM_AGP_MEM * handle); extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); -extern drm_ttm_backend_t *drm_agp_init_ttm(struct drm_device *dev); +extern struct drm_ttm_backend *drm_agp_init_ttm(struct drm_device *dev); /* Stub support (drm_stub.h) */ extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, struct drm_driver *driver); diff --git a/linux-core/drm_agpsupport.c b/linux-core/drm_agpsupport.c index 541d95cd..57c88638 100644 --- a/linux-core/drm_agpsupport.c +++ b/linux-core/drm_agpsupport.c @@ -554,7 +554,7 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle) #define AGP_REQUIRED_MAJOR 0 #define AGP_REQUIRED_MINOR 102 -static int drm_agp_needs_unbind_cache_adjust(drm_ttm_backend_t *backend) { +static int drm_agp_needs_unbind_cache_adjust(struct drm_ttm_backend *backend) { return ((backend->flags & DRM_BE_FLAG_BOUND_CACHED) ? 0 : 1); } @@ -590,7 +590,7 @@ static int drm_agp_populate(struct drm_ttm_backend *backend, unsigned long num_p return 0; } -static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, +static int drm_agp_bind_ttm(struct drm_ttm_backend *backend, unsigned long offset, int cached) { @@ -612,7 +612,7 @@ static int drm_agp_bind_ttm(drm_ttm_backend_t *backend, return ret; } -static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { +static int drm_agp_unbind_ttm(struct drm_ttm_backend *backend) { struct drm_agp_ttm_backend *agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); @@ -624,7 +624,7 @@ static int drm_agp_unbind_ttm(drm_ttm_backend_t *backend) { return 0; } -static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { +static void drm_agp_clear_ttm(struct drm_ttm_backend *backend) { struct drm_agp_ttm_backend *agp_be = container_of(backend, struct drm_agp_ttm_backend, backend); @@ -640,7 +640,7 @@ static void drm_agp_clear_ttm(drm_ttm_backend_t *backend) { agp_be->mem = NULL; } -static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { +static void drm_agp_destroy_ttm(struct drm_ttm_backend *backend) { struct drm_agp_ttm_backend *agp_be; @@ -656,7 +656,7 @@ static void drm_agp_destroy_ttm(drm_ttm_backend_t *backend) { } } -static drm_ttm_backend_func_t agp_ttm_backend = +static struct drm_ttm_backend_func agp_ttm_backend = { .needs_ub_cache_adjust = drm_agp_needs_unbind_cache_adjust, .populate = drm_agp_populate, diff --git a/linux-core/drm_bo.c b/linux-core/drm_bo.c index 30664632..a81dfbde 100644 --- a/linux-core/drm_bo.c +++ b/linux-core/drm_bo.c @@ -49,10 +49,10 @@ * */ -static void drm_bo_destroy_locked(drm_buffer_object_t * bo); -static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo); -static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo); -static void drm_bo_unmap_virtual(drm_buffer_object_t * bo); +static void drm_bo_destroy_locked(struct drm_buffer_object * bo); +static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo); +static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo); +static void drm_bo_unmap_virtual(struct drm_buffer_object * bo); static inline uint32_t drm_bo_type_flags(unsigned type) { @@ -63,9 +63,9 @@ static inline uint32_t drm_bo_type_flags(unsigned type) * bo locked. dev->struct_mutex locked. */ -void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) +void drm_bo_add_to_pinned_lru(struct drm_buffer_object * bo) { - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); DRM_ASSERT_LOCKED(&bo->mutex); @@ -74,9 +74,9 @@ void drm_bo_add_to_pinned_lru(drm_buffer_object_t * bo) list_add_tail(&bo->pinned_lru, &man->pinned); } -void drm_bo_add_to_lru(drm_buffer_object_t * bo) +void drm_bo_add_to_lru(struct drm_buffer_object * bo) { - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); @@ -89,7 +89,7 @@ void drm_bo_add_to_lru(drm_buffer_object_t * bo) } } -static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) +static int drm_bo_vm_pre_move(struct drm_buffer_object * bo, int old_is_pci) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -112,7 +112,7 @@ static int drm_bo_vm_pre_move(drm_buffer_object_t * bo, int old_is_pci) return 0; } -static void drm_bo_vm_post_move(drm_buffer_object_t * bo) +static void drm_bo_vm_post_move(struct drm_buffer_object * bo) { #ifdef DRM_ODD_MM_COMPAT int ret; @@ -133,7 +133,7 @@ static void drm_bo_vm_post_move(drm_buffer_object_t * bo) * Call bo->mutex locked. */ -static int drm_bo_add_ttm(drm_buffer_object_t * bo) +static int drm_bo_add_ttm(struct drm_buffer_object * bo) { struct drm_device *dev = bo->dev; int ret = 0; @@ -164,16 +164,16 @@ static int drm_bo_add_ttm(drm_buffer_object_t * bo) return ret; } -static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, +static int drm_bo_handle_move_mem(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int evict, int no_wait) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int old_is_pci = drm_mem_reg_is_pci(dev, &bo->mem); int new_is_pci = drm_mem_reg_is_pci(dev, mem); - drm_mem_type_manager_t *old_man = &bm->man[bo->mem.mem_type]; - drm_mem_type_manager_t *new_man = &bm->man[mem->mem_type]; + struct drm_mem_type_manager *old_man = &bm->man[bo->mem.mem_type]; + struct drm_mem_type_manager *new_man = &bm->man[mem->mem_type]; int ret = 0; if (old_is_pci || new_is_pci) @@ -201,7 +201,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, if ((bo->mem.mem_type == DRM_BO_MEM_LOCAL) && bo->ttm == NULL) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; uint64_t save_flags = old_mem->flags; uint64_t save_mask = old_mem->mask; @@ -266,7 +266,7 @@ static int drm_bo_handle_move_mem(drm_buffer_object_t * bo, * Wait until the buffer is idle. */ -int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, +int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait) { int ret; @@ -292,10 +292,10 @@ int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, return 0; } -static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) +static int drm_bo_expire_fence(struct drm_buffer_object * bo, int allow_errors) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; if (bo->fence) { if (bm->nice_mode) { @@ -327,10 +327,10 @@ static int drm_bo_expire_fence(drm_buffer_object_t * bo, int allow_errors) * fence object and removing from lru lists and memory managers. */ -static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) +static void drm_bo_cleanup_refs(struct drm_buffer_object * bo, int remove_all) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -389,10 +389,10 @@ static void drm_bo_cleanup_refs(drm_buffer_object_t * bo, int remove_all) * to the buffer object. Then destroy it. */ -static void drm_bo_destroy_locked(drm_buffer_object_t * bo) +static void drm_bo_destroy_locked(struct drm_buffer_object * bo) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; DRM_ASSERT_LOCKED(&dev->struct_mutex); @@ -440,17 +440,17 @@ static void drm_bo_destroy_locked(drm_buffer_object_t * bo) static void drm_bo_delayed_delete(struct drm_device * dev, int remove_all) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; - drm_buffer_object_t *entry, *nentry; + struct drm_buffer_object *entry, *nentry; struct list_head *list, *next; list_for_each_safe(list, next, &bm->ddestroy) { - entry = list_entry(list, drm_buffer_object_t, ddestroy); + entry = list_entry(list, struct drm_buffer_object, ddestroy); nentry = NULL; if (next != &bm->ddestroy) { - nentry = list_entry(next, drm_buffer_object_t, + nentry = list_entry(next, struct drm_buffer_object, ddestroy); atomic_inc(&nentry->usage); } @@ -471,10 +471,10 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) struct drm_device *dev = (struct drm_device *) data; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; #else - drm_buffer_manager_t *bm = - container_of(work, drm_buffer_manager_t, wq.work); + struct drm_buffer_manager *bm = + container_of(work, struct drm_buffer_manager, wq.work); struct drm_device *dev = container_of(bm, struct drm_device, bm); #endif @@ -493,7 +493,7 @@ static void drm_bo_delayed_workqueue(struct work_struct *work) mutex_unlock(&dev->struct_mutex); } -void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) +void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; bo = NULL; @@ -507,8 +507,8 @@ void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo) static void drm_bo_base_deref_locked(struct drm_file * priv, struct drm_user_object * uo) { - drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); DRM_ASSERT_LOCKED(&bo->dev->struct_mutex); @@ -516,7 +516,7 @@ static void drm_bo_base_deref_locked(struct drm_file * priv, struct drm_user_obj drm_bo_usage_deref_locked(&bo); } -static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) +static void drm_bo_usage_deref_unlocked(struct drm_buffer_object ** bo) { struct drm_buffer_object *tmp_bo = *bo; struct drm_device *dev = tmp_bo->dev; @@ -538,13 +538,13 @@ static void drm_bo_usage_deref_unlocked(drm_buffer_object_t ** bo) int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, - drm_fence_object_t * fence, - drm_fence_object_t ** used_fence) + struct drm_fence_object * fence, + struct drm_fence_object ** used_fence) { struct drm_device *dev = priv->head->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; uint32_t fence_type = 0; int count = 0; int ret = 0; @@ -602,7 +602,7 @@ int drm_fence_buffer_objects(struct drm_file * priv, l = f_list.next; while (l != &f_list) { prefetch(l->next); - entry = list_entry(l, drm_buffer_object_t, lru); + entry = list_entry(l, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); @@ -635,12 +635,12 @@ EXPORT_SYMBOL(drm_fence_buffer_objects); * bo->mutex locked */ -static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, +static int drm_bo_evict(struct drm_buffer_object * bo, unsigned mem_type, int no_wait) { int ret = 0; struct drm_device *dev = bo->dev; - drm_bo_mem_reg_t evict_mem; + struct drm_bo_mem_reg evict_mem; /* * Someone might have modified the buffer before we took the buffer mutex. @@ -706,13 +706,13 @@ static int drm_bo_evict(drm_buffer_object_t * bo, unsigned mem_type, } static int drm_bo_mem_force_space(struct drm_device * dev, - drm_bo_mem_reg_t * mem, + struct drm_bo_mem_reg * mem, uint32_t mem_type, int no_wait) { struct drm_mm_node *node; - drm_buffer_manager_t *bm = &dev->bm; - drm_buffer_object_t *entry; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *entry; + struct drm_mem_type_manager *man = &bm->man[mem_type]; struct list_head *lru; unsigned long num_pages = mem->num_pages; int ret; @@ -728,7 +728,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev, if (lru->next == lru) break; - entry = list_entry(lru->next, drm_buffer_object_t, lru); + entry = list_entry(lru->next, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); mutex_lock(&entry->mutex); @@ -754,7 +754,7 @@ static int drm_bo_mem_force_space(struct drm_device * dev, return 0; } -static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, +static int drm_bo_mt_compatible(struct drm_mem_type_manager * man, uint32_t mem_type, uint32_t mask, uint32_t * res_mask) { @@ -791,12 +791,12 @@ static int drm_bo_mt_compatible(drm_mem_type_manager_t * man, return 1; } -int drm_bo_mem_space(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, int no_wait) +int drm_bo_mem_space(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int no_wait) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; @@ -883,7 +883,7 @@ int drm_bo_mem_space(drm_buffer_object_t * bo, EXPORT_SYMBOL(drm_bo_mem_space); -static int drm_bo_new_mask(drm_buffer_object_t * bo, +static int drm_bo_new_mask(struct drm_buffer_object * bo, uint64_t new_mask, uint32_t hint) { uint32_t new_props; @@ -921,11 +921,11 @@ static int drm_bo_new_mask(drm_buffer_object_t * bo, * Call dev->struct_mutex locked. */ -drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, +struct drm_buffer_object *drm_lookup_buffer_object(struct drm_file * priv, uint32_t handle, int check_owner) { struct drm_user_object *uo; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; uo = drm_lookup_user_object(priv, handle); @@ -939,7 +939,7 @@ drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, return NULL; } - bo = drm_user_object_entry(uo, drm_buffer_object_t, base); + bo = drm_user_object_entry(uo, struct drm_buffer_object, base); atomic_inc(&bo->usage); return bo; } @@ -950,9 +950,9 @@ drm_buffer_object_t *drm_lookup_buffer_object(struct drm_file * priv, * Doesn't do any fence flushing as opposed to the drm_bo_busy function. */ -static int drm_bo_quick_busy(drm_buffer_object_t * bo) +static int drm_bo_quick_busy(struct drm_buffer_object * bo) { - drm_fence_object_t *fence = bo->fence; + struct drm_fence_object *fence = bo->fence; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { @@ -970,9 +970,9 @@ static int drm_bo_quick_busy(drm_buffer_object_t * bo) * Returns 1 if the buffer is currently rendered to or from. 0 otherwise. */ -static int drm_bo_busy(drm_buffer_object_t * bo) +static int drm_bo_busy(struct drm_buffer_object * bo) { - drm_fence_object_t *fence = bo->fence; + struct drm_fence_object *fence = bo->fence; BUG_ON(bo->priv_flags & _DRM_BO_FLAG_UNFENCED); if (fence) { @@ -990,7 +990,7 @@ static int drm_bo_busy(drm_buffer_object_t * bo) return 0; } -static int drm_bo_read_cached(drm_buffer_object_t * bo) +static int drm_bo_read_cached(struct drm_buffer_object * bo) { int ret = 0; @@ -1004,7 +1004,7 @@ static int drm_bo_read_cached(drm_buffer_object_t * bo) * Wait until a buffer is unmapped. */ -static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) +static int drm_bo_wait_unmapped(struct drm_buffer_object * bo, int no_wait) { int ret = 0; @@ -1020,7 +1020,7 @@ static int drm_bo_wait_unmapped(drm_buffer_object_t * bo, int no_wait) return ret; } -static int drm_bo_check_unfenced(drm_buffer_object_t * bo) +static int drm_bo_check_unfenced(struct drm_buffer_object * bo) { int ret; @@ -1042,7 +1042,7 @@ static int drm_bo_check_unfenced(drm_buffer_object_t * bo) * the buffer "unfenced" after validating, but before fencing. */ -static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, +static int drm_bo_wait_unfenced(struct drm_buffer_object * bo, int no_wait, int eagain_if_wait) { int ret = (bo->priv_flags & _DRM_BO_FLAG_UNFENCED); @@ -1075,7 +1075,7 @@ static int drm_bo_wait_unfenced(drm_buffer_object_t * bo, int no_wait, * Bo locked. */ -static void drm_bo_fill_rep_arg(drm_buffer_object_t * bo, +static void drm_bo_fill_rep_arg(struct drm_buffer_object * bo, struct drm_bo_info_rep *rep) { rep->handle = bo->base.hash.key; @@ -1106,7 +1106,7 @@ static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, uint32_t map_flags, unsigned hint, struct drm_bo_info_rep *rep) { - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; struct drm_device *dev = priv->head->dev; int ret = 0; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1186,7 +1186,7 @@ static int drm_buffer_object_map(struct drm_file * priv, uint32_t handle, static int drm_buffer_object_unmap(struct drm_file * priv, uint32_t handle) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; struct drm_ref_object *ro; int ret = 0; @@ -1219,8 +1219,8 @@ static void drm_buffer_user_object_unmap(struct drm_file * priv, struct drm_user_object * uo, drm_ref_t action) { - drm_buffer_object_t *bo = - drm_user_object_entry(uo, drm_buffer_object_t, base); + struct drm_buffer_object *bo = + drm_user_object_entry(uo, struct drm_buffer_object, base); /* * We DON'T want to take the bo->lock here, because we want to @@ -1238,13 +1238,13 @@ static void drm_buffer_user_object_unmap(struct drm_file * priv, * Note that new_mem_flags are NOT transferred to the bo->mem.mask. */ -int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, +int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = 0; - drm_bo_mem_reg_t mem; + struct drm_bo_mem_reg mem; /* * Flush outstanding fences. */ @@ -1300,7 +1300,7 @@ int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, return ret; } -static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) +static int drm_bo_mem_compat(struct drm_bo_mem_reg * mem) { uint32_t flag_diff = (mem->mask ^ mem->flags); @@ -1318,10 +1318,10 @@ static int drm_bo_mem_compat(drm_bo_mem_reg_t * mem) return 1; } -static int drm_bo_check_fake(struct drm_device * dev, drm_bo_mem_reg_t * mem) +static int drm_bo_check_fake(struct drm_device * dev, struct drm_bo_mem_reg * mem) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man; uint32_t num_prios = dev->driver->bo_driver->num_mem_type_prio; const uint32_t *prios = dev->driver->bo_driver->mem_type_prio; uint32_t i; @@ -1360,13 +1360,13 @@ static int drm_bo_check_fake(struct drm_device * dev, drm_bo_mem_reg_t * mem) * bo locked. */ -static int drm_buffer_object_validate(drm_buffer_object_t * bo, +static int drm_buffer_object_validate(struct drm_buffer_object * bo, uint32_t fence_class, int move_unfenced, int no_wait) { struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; uint32_t ftype; int ret; @@ -1496,7 +1496,7 @@ static int drm_bo_handle_validate(struct drm_file * priv, struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; int ret; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; @@ -1536,7 +1536,7 @@ static int drm_bo_handle_info(struct drm_file *priv, uint32_t handle, struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; mutex_lock(&dev->struct_mutex); bo = drm_lookup_buffer_object(priv, handle, 1); @@ -1559,7 +1559,7 @@ static int drm_bo_handle_wait(struct drm_file *priv, uint32_t handle, struct drm_bo_info_rep *rep) { struct drm_device *dev = priv->head->dev; - drm_buffer_object_t *bo; + struct drm_buffer_object *bo; int no_wait = hint & DRM_BO_HINT_DONT_BLOCK; int ret; @@ -1594,10 +1594,10 @@ int drm_buffer_object_create(struct drm_device *dev, uint32_t hint, uint32_t page_alignment, unsigned long buffer_start, - drm_buffer_object_t ** buf_obj) + struct drm_buffer_object ** buf_obj) { - drm_buffer_manager_t *bm = &dev->bm; - drm_buffer_object_t *bo; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_buffer_object *bo; int ret = 0; unsigned long num_pages; @@ -1672,7 +1672,7 @@ int drm_buffer_object_create(struct drm_device *dev, return ret; } -static int drm_bo_add_user_object(struct drm_file * priv, drm_buffer_object_t * bo, +static int drm_bo_add_user_object(struct drm_file * priv, struct drm_buffer_object * bo, int shareable) { struct drm_device *dev = priv->head->dev; @@ -1769,7 +1769,7 @@ int drm_bo_create_ioctl(DRM_IOCTL_ARGS) struct drm_bo_create_arg arg; struct drm_bo_create_req *req = &arg.d.req; struct drm_bo_info_rep *rep = &arg.d.rep; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; int ret = 0; if (!dev->bm.initialized) { @@ -1975,16 +1975,16 @@ int drm_bo_wait_idle_ioctl(DRM_IOCTL_ARGS) static void drm_bo_clean_unfenced(struct drm_device *dev) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; struct list_head *head, *list; - drm_buffer_object_t *entry; + struct drm_buffer_object *entry; head = &bm->unfenced; list = head->next; while(list != head) { prefetch(list->next); - entry = list_entry(list, drm_buffer_object_t, lru); + entry = list_entry(list, struct drm_buffer_object, lru); atomic_inc(&entry->usage); mutex_unlock(&dev->struct_mutex); @@ -1999,7 +1999,7 @@ static void drm_bo_clean_unfenced(struct drm_device *dev) } } -static int drm_bo_leave_list(drm_buffer_object_t * bo, +static int drm_bo_leave_list(struct drm_buffer_object * bo, uint32_t mem_type, int free_pinned, int allow_errors) { @@ -2050,13 +2050,13 @@ static int drm_bo_leave_list(drm_buffer_object_t * bo, } -static drm_buffer_object_t *drm_bo_entry(struct list_head *list, +static struct drm_buffer_object *drm_bo_entry(struct list_head *list, int pinned_list) { if (pinned_list) - return list_entry(list, drm_buffer_object_t, pinned_lru); + return list_entry(list, struct drm_buffer_object, pinned_lru); else - return list_entry(list, drm_buffer_object_t, lru); + return list_entry(list, struct drm_buffer_object, lru); } /* @@ -2071,7 +2071,7 @@ static int drm_bo_force_list_clean(struct drm_device * dev, int pinned_list) { struct list_head *list, *next, *prev; - drm_buffer_object_t *entry, *nentry; + struct drm_buffer_object *entry, *nentry; int ret; int do_restart; @@ -2130,8 +2130,8 @@ restart: int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; int ret = -EINVAL; if (mem_type >= DRM_BO_MEM_TYPES) { @@ -2173,8 +2173,8 @@ int drm_bo_clean_mm(struct drm_device * dev, unsigned mem_type) static int drm_bo_lock_mm(struct drm_device * dev, unsigned mem_type) { int ret; - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem_type]; if (mem_type == 0 || mem_type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory manager memory type %u.\n", mem_type); @@ -2200,9 +2200,9 @@ int drm_bo_init_mm(struct drm_device * dev, unsigned type, unsigned long p_offset, unsigned long p_size) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; if (type >= DRM_BO_MEM_TYPES) { DRM_ERROR("Illegal memory type %d\n", type); @@ -2247,10 +2247,10 @@ EXPORT_SYMBOL(drm_bo_init_mm); int drm_bo_driver_finish(struct drm_device * dev) { - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; int ret = 0; unsigned i = DRM_BO_MEM_TYPES; - drm_mem_type_manager_t *man; + struct drm_mem_type_manager *man; mutex_lock(&dev->bm.init_mutex); mutex_lock(&dev->struct_mutex); @@ -2298,8 +2298,8 @@ int drm_bo_driver_finish(struct drm_device * dev) int drm_bo_driver_init(struct drm_device * dev) { - drm_bo_driver_t *driver = dev->driver->bo_driver; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; int ret = -EINVAL; mutex_lock(&dev->bm.init_mutex); @@ -2339,8 +2339,8 @@ int drm_mm_init_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_init_arg arg; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2396,8 +2396,8 @@ int drm_mm_takedown_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_type_arg arg; - drm_buffer_manager_t *bm = &dev->bm; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2438,7 +2438,7 @@ int drm_mm_lock_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_type_arg arg; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2465,7 +2465,7 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; struct drm_mm_type_arg arg; - drm_bo_driver_t *driver = dev->driver->bo_driver; + struct drm_bo_driver *driver = dev->driver->bo_driver; int ret; if (!driver) { @@ -2492,10 +2492,10 @@ int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS) * buffer object vm functions. */ -int drm_mem_reg_is_pci(struct drm_device * dev, drm_bo_mem_reg_t * mem) +int drm_mem_reg_is_pci(struct drm_device * dev, struct drm_bo_mem_reg * mem) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED)) { if (mem->mem_type == DRM_BO_MEM_LOCAL) @@ -2526,13 +2526,13 @@ EXPORT_SYMBOL(drm_mem_reg_is_pci); * Otherwise returns zero. */ -int drm_bo_pci_offset(struct drm_device * dev, - drm_bo_mem_reg_t * mem, +int drm_bo_pci_offset(struct drm_device *dev, + struct drm_bo_mem_reg *mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; *bus_size = 0; if (!(man->flags & _DRM_FLAG_MEMTYPE_MAPPABLE)) @@ -2555,7 +2555,7 @@ int drm_bo_pci_offset(struct drm_device * dev, * Call bo->mutex locked. */ -void drm_bo_unmap_virtual(drm_buffer_object_t * bo) +void drm_bo_unmap_virtual(struct drm_buffer_object * bo) { struct drm_device *dev = bo->dev; loff_t offset = ((loff_t) bo->map_list.hash.key) << PAGE_SHIFT; @@ -2567,7 +2567,7 @@ void drm_bo_unmap_virtual(drm_buffer_object_t * bo) unmap_mapping_range(dev->dev_mapping, offset, holelen, 1); } -static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) +static void drm_bo_takedown_vm_locked(struct drm_buffer_object * bo) { struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; @@ -2593,7 +2593,7 @@ static void drm_bo_takedown_vm_locked(drm_buffer_object_t * bo) drm_bo_usage_deref_locked(&bo); } -static int drm_bo_setup_vm_locked(drm_buffer_object_t * bo) +static int drm_bo_setup_vm_locked(struct drm_buffer_object * bo) { struct drm_map_list *list = &bo->map_list; drm_local_map_t *map; diff --git a/linux-core/drm_bo_move.c b/linux-core/drm_bo_move.c index 1e0d26ce..5e21173c 100644 --- a/linux-core/drm_bo_move.c +++ b/linux-core/drm_bo_move.c @@ -35,9 +35,9 @@ * have not been requested to free also pinned regions. */ -static void drm_bo_free_old_node(drm_buffer_object_t * bo) +static void drm_bo_free_old_node(struct drm_buffer_object * bo) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) { mutex_lock(&bo->dev->struct_mutex); @@ -48,11 +48,11 @@ static void drm_bo_free_old_node(drm_buffer_object_t * bo) old_mem->mm_node = NULL; } -int drm_bo_move_ttm(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int drm_bo_move_ttm(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_ttm_t *ttm = bo->ttm; - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; int ret; @@ -102,11 +102,11 @@ EXPORT_SYMBOL(drm_bo_move_ttm); * Call bo->mutex locked. */ -int drm_mem_reg_ioremap(struct drm_device * dev, drm_bo_mem_reg_t * mem, +int drm_mem_reg_ioremap(struct drm_device * dev, struct drm_bo_mem_reg * mem, void **virtual) { - drm_buffer_manager_t *bm = &dev->bm; - drm_mem_type_manager_t *man = &bm->man[mem->mem_type]; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_mem_type_manager *man = &bm->man[mem->mem_type]; unsigned long bus_offset; unsigned long bus_size; unsigned long bus_base; @@ -137,11 +137,11 @@ int drm_mem_reg_ioremap(struct drm_device * dev, drm_bo_mem_reg_t * mem, * Call bo->mutex locked. */ -void drm_mem_reg_iounmap(struct drm_device * dev, drm_bo_mem_reg_t * mem, +void drm_mem_reg_iounmap(struct drm_device * dev, struct drm_bo_mem_reg * mem, void *virtual) { - drm_buffer_manager_t *bm; - drm_mem_type_manager_t *man; + struct drm_buffer_manager *bm; + struct drm_mem_type_manager *man; bm = &dev->bm; man = &bm->man[mem->mem_type]; @@ -164,7 +164,7 @@ static int drm_copy_io_page(void *dst, void *src, unsigned long page) return 0; } -static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) +static int drm_copy_io_ttm_page(struct drm_ttm * ttm, void *src, unsigned long page) { struct page *d = drm_ttm_get_page(ttm, page); void *dst; @@ -182,7 +182,7 @@ static int drm_copy_io_ttm_page(drm_ttm_t * ttm, void *src, unsigned long page) return 0; } -static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) +static int drm_copy_ttm_io_page(struct drm_ttm * ttm, void *dst, unsigned long page) { struct page *s = drm_ttm_get_page(ttm, page); void *src; @@ -200,14 +200,14 @@ static int drm_copy_ttm_io_page(drm_ttm_t * ttm, void *dst, unsigned long page) return 0; } -int drm_bo_move_memcpy(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int drm_bo_move_memcpy(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { struct drm_device *dev = bo->dev; - drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; - drm_ttm_t *ttm = bo->ttm; - drm_bo_mem_reg_t *old_mem = &bo->mem; - drm_bo_mem_reg_t old_copy = *old_mem; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_ttm *ttm = bo->ttm; + struct drm_bo_mem_reg *old_mem = &bo->mem; + struct drm_bo_mem_reg old_copy = *old_mem; void *old_iomap; void *new_iomap; int ret; @@ -281,12 +281,12 @@ EXPORT_SYMBOL(drm_bo_move_memcpy); * object. Call bo->mutex locked. */ -int drm_buffer_object_transfer(drm_buffer_object_t * bo, - drm_buffer_object_t ** new_obj) +int drm_buffer_object_transfer(struct drm_buffer_object * bo, + struct drm_buffer_object ** new_obj) { - drm_buffer_object_t *fbo; + struct drm_buffer_object *fbo; struct drm_device *dev = bo->dev; - drm_buffer_manager_t *bm = &dev->bm; + struct drm_buffer_manager *bm = &dev->bm; fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ); if (!fbo) @@ -323,20 +323,20 @@ int drm_buffer_object_transfer(drm_buffer_object_t * bo, * We cannot restart until it has finished. */ -int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, +int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, int evict, int no_wait, uint32_t fence_class, uint32_t fence_type, - uint32_t fence_flags, drm_bo_mem_reg_t * new_mem) + uint32_t fence_flags, struct drm_bo_mem_reg * new_mem) { struct drm_device *dev = bo->dev; - drm_mem_type_manager_t *man = &dev->bm.man[new_mem->mem_type]; - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type]; + struct drm_bo_mem_reg *old_mem = &bo->mem; int ret; uint32_t save_flags = old_mem->flags; uint32_t save_mask = old_mem->mask; - drm_buffer_object_t *old_obj; + struct drm_buffer_object *old_obj; if (bo->fence) drm_fence_usage_deref_unlocked(&bo->fence); diff --git a/linux-core/drm_compat.c b/linux-core/drm_compat.c index 5d1d62fa..38ca497f 100644 --- a/linux-core/drm_compat.c +++ b/linux-core/drm_compat.c @@ -201,7 +201,7 @@ static struct page *drm_bo_vm_fault(struct vm_area_struct *vma, struct fault_data *data) { unsigned long address = data->address; - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; drm_ttm_t *ttm; @@ -351,7 +351,7 @@ struct page *drm_bo_vm_nopage(struct vm_area_struct *vma, unsigned long address, int *type) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page; drm_ttm_t *ttm; @@ -395,7 +395,7 @@ out_unlock: int drm_bo_map_bound(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *)vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data; int ret = 0; unsigned long bus_base; unsigned long bus_offset; @@ -418,7 +418,7 @@ int drm_bo_map_bound(struct vm_area_struct *vma) } -int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) +int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n_entry; vma_entry_t *v_entry; @@ -454,7 +454,7 @@ int drm_bo_add_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) return 0; } -void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) +void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma) { p_mm_entry_t *entry, *n; vma_entry_t *v_entry, *v_n; @@ -486,7 +486,7 @@ void drm_bo_delete_vma(drm_buffer_object_t * bo, struct vm_area_struct *vma) -int drm_bo_lock_kmm(drm_buffer_object_t * bo) +int drm_bo_lock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; int lock_ok = 1; @@ -518,7 +518,7 @@ int drm_bo_lock_kmm(drm_buffer_object_t * bo) return -EAGAIN; } -void drm_bo_unlock_kmm(drm_buffer_object_t * bo) +void drm_bo_unlock_kmm(struct drm_buffer_object * bo) { p_mm_entry_t *entry; @@ -529,7 +529,7 @@ void drm_bo_unlock_kmm(drm_buffer_object_t * bo) } } -int drm_bo_remap_bound(drm_buffer_object_t *bo) +int drm_bo_remap_bound(struct drm_buffer_object *bo) { vma_entry_t *v_entry; int ret = 0; @@ -545,7 +545,7 @@ int drm_bo_remap_bound(drm_buffer_object_t *bo) return ret; } -void drm_bo_finish_unmap(drm_buffer_object_t *bo) +void drm_bo_finish_unmap(struct drm_buffer_object *bo) { vma_entry_t *v_entry; diff --git a/linux-core/drm_fence.c b/linux-core/drm_fence.c index f925621a..9b2fa405 100644 --- a/linux-core/drm_fence.c +++ b/linux-core/drm_fence.c @@ -40,11 +40,11 @@ void drm_fence_handler(struct drm_device * dev, uint32_t class, int wake = 0; uint32_t diff; uint32_t relevant; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; struct list_head *head; - drm_fence_object_t *fence, *next; + struct drm_fence_object *fence, *next; int found = 0; int is_exe = (type & DRM_FENCE_TYPE_EXE); int ge_last_exe; @@ -116,7 +116,7 @@ EXPORT_SYMBOL(drm_fence_handler); static void drm_fence_unring(struct drm_device * dev, struct list_head *ring) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; write_lock_irqsave(&fm->lock, flags); @@ -124,11 +124,11 @@ static void drm_fence_unring(struct drm_device * dev, struct list_head *ring) write_unlock_irqrestore(&fm->lock, flags); } -void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) +void drm_fence_usage_deref_locked(struct drm_fence_object ** fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; DRM_ASSERT_LOCKED(&dev->struct_mutex); *fence = NULL; @@ -142,11 +142,11 @@ void drm_fence_usage_deref_locked(drm_fence_object_t ** fence) } } -void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence) +void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence) { struct drm_fence_object *tmp_fence = *fence; struct drm_device *dev = tmp_fence->dev; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; *fence = NULL; if (atomic_dec_and_test(&tmp_fence->usage)) { @@ -182,20 +182,20 @@ void drm_fence_reference_unlocked(struct drm_fence_object **dst, static void drm_fence_object_destroy(struct drm_file *priv, struct drm_user_object * base) { - drm_fence_object_t *fence = - drm_user_object_entry(base, drm_fence_object_t, base); + struct drm_fence_object *fence = + drm_user_object_entry(base, struct drm_fence_object, base); drm_fence_usage_deref_locked(&fence); } -int drm_fence_object_signaled(drm_fence_object_t * fence, +int drm_fence_object_signaled(struct drm_fence_object * fence, uint32_t mask, int poke_flush) { unsigned long flags; int signaled; struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; if (poke_flush) driver->poke_flush(dev, fence->class); @@ -207,8 +207,8 @@ int drm_fence_object_signaled(drm_fence_object_t * fence, return signaled; } -static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, - drm_fence_driver_t * driver, uint32_t sequence) +static void drm_fence_flush_exe(struct drm_fence_class_manager * fc, + struct drm_fence_driver * driver, uint32_t sequence) { uint32_t diff; @@ -224,13 +224,13 @@ static void drm_fence_flush_exe(drm_fence_class_manager_t * fc, } } -int drm_fence_object_flush(drm_fence_object_t * fence, +int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; unsigned long flags; if (type & ~fence->type) { @@ -264,12 +264,12 @@ int drm_fence_object_flush(drm_fence_object_t * fence, void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t sequence) { - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[class]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[class]; + struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t old_sequence; unsigned long flags; - drm_fence_object_t *fence; + struct drm_fence_object *fence; uint32_t diff; write_lock_irqsave(&fm->lock, flags); @@ -290,7 +290,7 @@ void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t seque mutex_unlock(&dev->struct_mutex); return; } - fence = drm_fence_reference_locked(list_entry(fc->ring.next, drm_fence_object_t, ring)); + fence = drm_fence_reference_locked(list_entry(fc->ring.next, struct drm_fence_object, ring)); mutex_unlock(&dev->struct_mutex); diff = (old_sequence - fence->sequence) & driver->sequence_mask; read_unlock_irqrestore(&fm->lock, flags); @@ -302,13 +302,13 @@ void drm_fence_flush_old(struct drm_device * dev, uint32_t class, uint32_t seque EXPORT_SYMBOL(drm_fence_flush_old); -static int drm_fence_lazy_wait(drm_fence_object_t *fence, +static int drm_fence_lazy_wait(struct drm_fence_object *fence, int ignore_signals, uint32_t mask) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; int signaled; unsigned long _end = jiffies + 3*DRM_HZ; int ret = 0; @@ -336,11 +336,11 @@ static int drm_fence_lazy_wait(drm_fence_object_t *fence, return 0; } -int drm_fence_object_wait(drm_fence_object_t * fence, +int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask) { struct drm_device *dev = fence->dev; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_driver *driver = dev->driver->fence_driver; int ret = 0; unsigned long _end; int signaled; @@ -403,13 +403,13 @@ int drm_fence_object_wait(drm_fence_object_t * fence, return 0; } -int drm_fence_object_emit(drm_fence_object_t * fence, +int drm_fence_object_emit(struct drm_fence_object * fence, uint32_t fence_flags, uint32_t class, uint32_t type) { struct drm_device *dev = fence->dev; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_driver_t *driver = dev->driver->fence_driver; - drm_fence_class_manager_t *fc = &fm->class[fence->class]; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_driver *driver = dev->driver->fence_driver; + struct drm_fence_class_manager *fc = &fm->class[fence->class]; unsigned long flags; uint32_t sequence; uint32_t native_type; @@ -438,11 +438,11 @@ int drm_fence_object_emit(drm_fence_object_t * fence, static int drm_fence_object_init(struct drm_device * dev, uint32_t class, uint32_t type, uint32_t fence_flags, - drm_fence_object_t * fence) + struct drm_fence_object * fence) { int ret = 0; unsigned long flags; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; mutex_lock(&dev->struct_mutex); atomic_set(&fence->usage, 1); @@ -471,7 +471,7 @@ static int drm_fence_object_init(struct drm_device * dev, uint32_t class, return ret; } -int drm_fence_add_user_object(struct drm_file * priv, drm_fence_object_t * fence, +int drm_fence_add_user_object(struct drm_file * priv, struct drm_fence_object * fence, int shareable) { struct drm_device *dev = priv->head->dev; @@ -492,11 +492,11 @@ out: EXPORT_SYMBOL(drm_fence_add_user_object); int drm_fence_object_create(struct drm_device * dev, uint32_t class, uint32_t type, - unsigned flags, drm_fence_object_t ** c_fence) + unsigned flags, struct drm_fence_object ** c_fence) { - drm_fence_object_t *fence; + struct drm_fence_object *fence; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; fence = drm_ctl_calloc(1, sizeof(*fence), DRM_MEM_FENCE); if (!fence) @@ -516,9 +516,9 @@ EXPORT_SYMBOL(drm_fence_object_create); void drm_fence_manager_init(struct drm_device * dev) { - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *class; - drm_fence_driver_t *fed = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *class; + struct drm_fence_driver *fed = dev->driver->fence_driver; int i; rwlock_init(&fm->lock); @@ -548,11 +548,11 @@ void drm_fence_manager_takedown(struct drm_device * dev) { } -drm_fence_object_t *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) +struct drm_fence_object *drm_lookup_fence_object(struct drm_file * priv, uint32_t handle) { struct drm_device *dev = priv->head->dev; struct drm_user_object *uo; - drm_fence_object_t *fence; + struct drm_fence_object *fence; mutex_lock(&dev->struct_mutex); uo = drm_lookup_user_object(priv, handle); @@ -560,7 +560,7 @@ drm_fence_object_t *drm_lookup_fence_object(struct drm_file * priv, uint32_t han mutex_unlock(&dev->struct_mutex); return NULL; } - fence = drm_fence_reference_locked(drm_user_object_entry(uo, drm_fence_object_t, base)); + fence = drm_fence_reference_locked(drm_user_object_entry(uo, struct drm_fence_object, base)); mutex_unlock(&dev->struct_mutex); return fence; } @@ -569,9 +569,9 @@ int drm_fence_create_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -617,7 +617,7 @@ int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; struct drm_user_object *uo; ret = 0; @@ -645,9 +645,9 @@ int drm_fence_reference_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; struct drm_user_object *uo; unsigned long flags; ret = 0; @@ -679,7 +679,7 @@ int drm_fence_unreference_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; ret = 0; @@ -696,9 +696,9 @@ int drm_fence_signaled_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -728,9 +728,9 @@ int drm_fence_flush_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -762,9 +762,9 @@ int drm_fence_wait_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -798,9 +798,9 @@ int drm_fence_emit_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; @@ -833,9 +833,9 @@ int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS) { DRM_DEVICE; int ret; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; struct drm_fence_arg arg; - drm_fence_object_t *fence; + struct drm_fence_object *fence; unsigned long flags; ret = 0; diff --git a/linux-core/drm_objects.h b/linux-core/drm_objects.h index c4428a7b..441c19f2 100644 --- a/linux-core/drm_objects.h +++ b/linux-core/drm_objects.h @@ -139,7 +139,7 @@ extern int drm_user_object_unref(struct drm_file * priv, uint32_t user_token, * Fence objects. (drm_fence.c) */ -typedef struct drm_fence_object { +struct drm_fence_object { struct drm_user_object base; struct drm_device *dev; atomic_t usage; @@ -156,29 +156,29 @@ typedef struct drm_fence_object { uint32_t sequence; uint32_t flush_mask; uint32_t submitted_flush; -} drm_fence_object_t; +}; #define _DRM_FENCE_CLASSES 8 #define _DRM_FENCE_TYPE_EXE 0x00 -typedef struct drm_fence_class_manager { +struct drm_fence_class_manager { struct list_head ring; uint32_t pending_flush; wait_queue_head_t fence_queue; int pending_exe_flush; uint32_t last_exe_flush; uint32_t exe_flush_sequence; -} drm_fence_class_manager_t; +}; -typedef struct drm_fence_manager { +struct drm_fence_manager { int initialized; rwlock_t lock; - drm_fence_class_manager_t class[_DRM_FENCE_CLASSES]; + struct drm_fence_class_manager class[_DRM_FENCE_CLASSES]; uint32_t num_classes; atomic_t count; -} drm_fence_manager_t; +}; -typedef struct drm_fence_driver { +struct drm_fence_driver { uint32_t num_classes; uint32_t wrap_diff; uint32_t flush_diff; @@ -189,7 +189,7 @@ typedef struct drm_fence_driver { int (*emit) (struct drm_device * dev, uint32_t class, uint32_t flags, uint32_t * breadcrumb, uint32_t * native_type); void (*poke_flush) (struct drm_device * dev, uint32_t class); -} drm_fence_driver_t; +}; extern void drm_fence_handler(struct drm_device *dev, uint32_t class, uint32_t sequence, uint32_t type); @@ -197,21 +197,21 @@ extern void drm_fence_manager_init(struct drm_device *dev); extern void drm_fence_manager_takedown(struct drm_device *dev); extern void drm_fence_flush_old(struct drm_device *dev, uint32_t class, uint32_t sequence); -extern int drm_fence_object_flush(drm_fence_object_t * fence, uint32_t type); -extern int drm_fence_object_signaled(drm_fence_object_t * fence, +extern int drm_fence_object_flush(struct drm_fence_object * fence, uint32_t type); +extern int drm_fence_object_signaled(struct drm_fence_object * fence, uint32_t type, int flush); -extern void drm_fence_usage_deref_locked(drm_fence_object_t ** fence); -extern void drm_fence_usage_deref_unlocked(drm_fence_object_t ** fence); +extern void drm_fence_usage_deref_locked(struct drm_fence_object ** fence); +extern void drm_fence_usage_deref_unlocked(struct drm_fence_object ** fence); extern struct drm_fence_object *drm_fence_reference_locked(struct drm_fence_object *src); extern void drm_fence_reference_unlocked(struct drm_fence_object **dst, struct drm_fence_object *src); -extern int drm_fence_object_wait(drm_fence_object_t * fence, +extern int drm_fence_object_wait(struct drm_fence_object * fence, int lazy, int ignore_signals, uint32_t mask); extern int drm_fence_object_create(struct drm_device *dev, uint32_t type, uint32_t fence_flags, uint32_t class, - drm_fence_object_t ** c_fence); + struct drm_fence_object ** c_fence); extern int drm_fence_add_user_object(struct drm_file * priv, - drm_fence_object_t * fence, int shareable); + struct drm_fence_object * fence, int shareable); extern int drm_fence_create_ioctl(DRM_IOCTL_ARGS); extern int drm_fence_destroy_ioctl(DRM_IOCTL_ARGS); @@ -243,7 +243,7 @@ extern int drm_fence_buffers_ioctl(DRM_IOCTL_ARGS); #define DRM_BE_FLAG_BOUND_CACHED 0x00000002 struct drm_ttm_backend; -typedef struct drm_ttm_backend_func { +struct drm_ttm_backend_func { int (*needs_ub_cache_adjust) (struct drm_ttm_backend * backend); int (*populate) (struct drm_ttm_backend * backend, unsigned long num_pages, struct page ** pages); @@ -252,16 +252,16 @@ typedef struct drm_ttm_backend_func { unsigned long offset, int cached); int (*unbind) (struct drm_ttm_backend * backend); void (*destroy) (struct drm_ttm_backend * backend); -} drm_ttm_backend_func_t; +}; -typedef struct drm_ttm_backend { +struct drm_ttm_backend { uint32_t flags; int mem_type; - drm_ttm_backend_func_t *func; -} drm_ttm_backend_t; + struct drm_ttm_backend_func *func; +}; -typedef struct drm_ttm { +struct drm_ttm { struct page **pages; uint32_t page_flags; unsigned long num_pages; @@ -270,7 +270,7 @@ typedef struct drm_ttm { struct drm_device *dev; int destroy; uint32_t mapping_offset; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; enum { ttm_bound, ttm_evicted, @@ -278,14 +278,14 @@ typedef struct drm_ttm { ttm_unpopulated, } state; -} drm_ttm_t; +}; -extern drm_ttm_t *drm_ttm_init(struct drm_device *dev, unsigned long size); -extern int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset); -extern void drm_ttm_unbind(drm_ttm_t * ttm); -extern void drm_ttm_evict(drm_ttm_t * ttm); -extern void drm_ttm_fixup_caching(drm_ttm_t * ttm); -extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); +extern struct drm_ttm *drm_ttm_init(struct drm_device *dev, unsigned long size); +extern int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset); +extern void drm_ttm_unbind(struct drm_ttm * ttm); +extern void drm_ttm_evict(struct drm_ttm * ttm); +extern void drm_ttm_fixup_caching(struct drm_ttm * ttm); +extern struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index); /* * Destroy a ttm. The user normally calls drmRmMap or a similar IOCTL to do this, @@ -293,7 +293,7 @@ extern struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index); * when the last vma exits. */ -extern int drm_destroy_ttm(drm_ttm_t * ttm); +extern int drm_destroy_ttm(struct drm_ttm * ttm); #define DRM_FLAG_MASKED(_old, _new, _mask) {\ (_old) ^= (((_old) ^ (_new)) & (_mask)); \ @@ -316,7 +316,7 @@ extern int drm_destroy_ttm(drm_ttm_t * ttm); * Buffer objects. (drm_bo.c, drm_bo_move.c) */ -typedef struct drm_bo_mem_reg { +struct drm_bo_mem_reg { struct drm_mm_node *mm_node; unsigned long size; unsigned long num_pages; @@ -324,9 +324,9 @@ typedef struct drm_bo_mem_reg { uint32_t mem_type; uint64_t flags; uint64_t mask; -} drm_bo_mem_reg_t; +}; -typedef struct drm_buffer_object { +struct drm_buffer_object { struct drm_device *dev; struct drm_user_object base; @@ -340,14 +340,14 @@ typedef struct drm_buffer_object { enum drm_bo_type type; unsigned long offset; atomic_t mapped; - drm_bo_mem_reg_t mem; + struct drm_bo_mem_reg mem; struct list_head lru; struct list_head ddestroy; uint32_t fence_type; uint32_t fence_class; - drm_fence_object_t *fence; + struct drm_fence_object *fence; uint32_t priv_flags; wait_queue_head_t event_queue; struct mutex mutex; @@ -359,7 +359,7 @@ typedef struct drm_buffer_object { /* For vm */ - drm_ttm_t *ttm; + struct drm_ttm *ttm; struct drm_map_list map_list; uint32_t memory_type; unsigned long bus_offset; @@ -372,12 +372,12 @@ typedef struct drm_buffer_object { struct list_head p_mm_list; #endif -} drm_buffer_object_t; +}; #define _DRM_BO_FLAG_UNFENCED 0x00000001 #define _DRM_BO_FLAG_EVICTED 0x00000002 -typedef struct drm_mem_type_manager { +struct drm_mem_type_manager { int has_type; int use_type; struct drm_mm manager; @@ -388,7 +388,7 @@ typedef struct drm_mem_type_manager { unsigned long io_offset; unsigned long io_size; void *io_addr; -} drm_mem_type_manager_t; +}; #define _DRM_FLAG_MEMTYPE_FIXED 0x00000001 /* Fixed (on-card) PCI memory */ #define _DRM_FLAG_MEMTYPE_MAPPABLE 0x00000002 /* Memory mappable */ @@ -398,13 +398,13 @@ typedef struct drm_mem_type_manager { #define _DRM_FLAG_MEMTYPE_CMA 0x00000010 /* Can't map aperture */ #define _DRM_FLAG_MEMTYPE_CSELECT 0x00000020 /* Select caching */ -typedef struct drm_buffer_manager { +struct drm_buffer_manager { struct mutex init_mutex; struct mutex evict_mutex; int nice_mode; int initialized; struct drm_file *last_to_validate; - drm_mem_type_manager_t man[DRM_BO_MEM_TYPES]; + struct drm_mem_type_manager man[DRM_BO_MEM_TYPES]; struct list_head unfenced; struct list_head ddestroy; #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) @@ -415,23 +415,23 @@ typedef struct drm_buffer_manager { uint32_t fence_type; unsigned long cur_pages; atomic_t count; -} drm_buffer_manager_t; +}; -typedef struct drm_bo_driver { +struct drm_bo_driver { const uint32_t *mem_type_prio; const uint32_t *mem_busy_prio; uint32_t num_mem_type_prio; uint32_t num_mem_busy_prio; - drm_ttm_backend_t *(*create_ttm_backend_entry) + struct drm_ttm_backend *(*create_ttm_backend_entry) (struct drm_device * dev); int (*fence_type) (struct drm_buffer_object *bo, uint32_t * type); int (*invalidate_caches) (struct drm_device * dev, uint64_t flags); int (*init_mem_type) (struct drm_device * dev, uint32_t type, - drm_mem_type_manager_t * man); + struct drm_mem_type_manager * man); uint32_t(*evict_mask) (struct drm_buffer_object *bo); int (*move) (struct drm_buffer_object * bo, int evict, int no_wait, struct drm_bo_mem_reg * new_mem); -} drm_bo_driver_t; +}; /* * buffer objects (drm_bo.c) @@ -455,24 +455,24 @@ extern int drm_mm_unlock_ioctl(DRM_IOCTL_ARGS); extern int drm_bo_driver_finish(struct drm_device *dev); extern int drm_bo_driver_init(struct drm_device *dev); extern int drm_bo_pci_offset(struct drm_device *dev, - drm_bo_mem_reg_t * mem, + struct drm_bo_mem_reg * mem, unsigned long *bus_base, unsigned long *bus_offset, unsigned long *bus_size); -extern int drm_mem_reg_is_pci(struct drm_device *dev, drm_bo_mem_reg_t * mem); +extern int drm_mem_reg_is_pci(struct drm_device *dev, struct drm_bo_mem_reg * mem); -extern void drm_bo_usage_deref_locked(drm_buffer_object_t ** bo); +extern void drm_bo_usage_deref_locked(struct drm_buffer_object ** bo); extern int drm_fence_buffer_objects(struct drm_file * priv, struct list_head *list, uint32_t fence_flags, - drm_fence_object_t * fence, - drm_fence_object_t ** used_fence); -extern void drm_bo_add_to_lru(drm_buffer_object_t * bo); -extern int drm_bo_wait(drm_buffer_object_t * bo, int lazy, int ignore_signals, + struct drm_fence_object * fence, + struct drm_fence_object ** used_fence); +extern void drm_bo_add_to_lru(struct drm_buffer_object * bo); +extern int drm_bo_wait(struct drm_buffer_object * bo, int lazy, int ignore_signals, int no_wait); -extern int drm_bo_mem_space(drm_buffer_object_t * bo, - drm_bo_mem_reg_t * mem, int no_wait); -extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, +extern int drm_bo_mem_space(struct drm_buffer_object * bo, + struct drm_bo_mem_reg * mem, int no_wait); +extern int drm_bo_move_buffer(struct drm_buffer_object * bo, uint32_t new_mem_flags, int no_wait, int move_unfenced); /* @@ -480,18 +480,18 @@ extern int drm_bo_move_buffer(drm_buffer_object_t * bo, uint32_t new_mem_flags, * drm_bo_move.c */ -extern int drm_bo_move_ttm(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem); -extern int drm_bo_move_memcpy(drm_buffer_object_t * bo, +extern int drm_bo_move_ttm(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_move_memcpy(struct drm_buffer_object * bo, int evict, - int no_wait, drm_bo_mem_reg_t * new_mem); -extern int drm_bo_move_accel_cleanup(drm_buffer_object_t * bo, + int no_wait, struct drm_bo_mem_reg * new_mem); +extern int drm_bo_move_accel_cleanup(struct drm_buffer_object * bo, int evict, int no_wait, uint32_t fence_class, uint32_t fence_type, uint32_t fence_flags, - drm_bo_mem_reg_t * new_mem); + struct drm_bo_mem_reg * new_mem); #ifdef CONFIG_DEBUG_MUTEXES #define DRM_ASSERT_LOCKED(_mutex) \ diff --git a/linux-core/drm_proc.c b/linux-core/drm_proc.c index f33bd93d..3f9cb028 100644 --- a/linux-core/drm_proc.c +++ b/linux-core/drm_proc.c @@ -434,8 +434,8 @@ static int drm__objects_info(char *buf, char **start, off_t offset, int request, { struct drm_device *dev = (struct drm_device *) data; int len = 0; - drm_buffer_manager_t *bm = &dev->bm; - drm_fence_manager_t *fm = &dev->fm; + struct drm_buffer_manager *bm = &dev->bm; + struct drm_fence_manager *fm = &dev->fm; drm_u64_t used_mem; drm_u64_t low_mem; drm_u64_t high_mem; diff --git a/linux-core/drm_ttm.c b/linux-core/drm_ttm.c index 31503c9c..60c64cba 100644 --- a/linux-core/drm_ttm.c +++ b/linux-core/drm_ttm.c @@ -45,7 +45,7 @@ static void drm_ttm_cache_flush(void) * Use kmalloc if possible. Otherwise fall back to vmalloc. */ -static void ttm_alloc_pages(drm_ttm_t * ttm) +static void ttm_alloc_pages(struct drm_ttm * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); ttm->pages = NULL; @@ -66,7 +66,7 @@ static void ttm_alloc_pages(drm_ttm_t * ttm) } } -static void ttm_free_pages(drm_ttm_t * ttm) +static void ttm_free_pages(struct drm_ttm * ttm) { unsigned long size = ttm->num_pages * sizeof(*ttm->pages); @@ -105,7 +105,7 @@ static struct page *drm_ttm_alloc_page(void) * for range of pages in a ttm. */ -static int drm_set_caching(drm_ttm_t * ttm, int noncached) +static int drm_set_caching(struct drm_ttm * ttm, int noncached) { int i; struct page **cur_page; @@ -142,12 +142,12 @@ static int drm_set_caching(drm_ttm_t * ttm, int noncached) * Free all resources associated with a ttm. */ -int drm_destroy_ttm(drm_ttm_t * ttm) +int drm_destroy_ttm(struct drm_ttm * ttm) { int i; struct page **cur_page; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (!ttm) return 0; @@ -159,7 +159,7 @@ int drm_destroy_ttm(drm_ttm_t * ttm) } if (ttm->pages) { - drm_buffer_manager_t *bm = &ttm->dev->bm; + struct drm_buffer_manager *bm = &ttm->dev->bm; if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED) drm_set_caching(ttm, 0); @@ -191,10 +191,10 @@ int drm_destroy_ttm(drm_ttm_t * ttm) return 0; } -struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) +struct page *drm_ttm_get_page(struct drm_ttm * ttm, int index) { struct page *p; - drm_buffer_manager_t *bm = &ttm->dev->bm; + struct drm_buffer_manager *bm = &ttm->dev->bm; p = ttm->pages[index]; if (!p) { @@ -207,11 +207,11 @@ struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index) return p; } -static int drm_ttm_populate(drm_ttm_t * ttm) +static int drm_ttm_populate(struct drm_ttm * ttm) { struct page *page; unsigned long i; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (ttm->state != ttm_unpopulated) return 0; @@ -231,10 +231,10 @@ static int drm_ttm_populate(drm_ttm_t * ttm) * Initialize a ttm. */ -drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) +struct drm_ttm *drm_ttm_init(struct drm_device * dev, unsigned long size) { - drm_bo_driver_t *bo_driver = dev->driver->bo_driver; - drm_ttm_t *ttm; + struct drm_bo_driver *bo_driver = dev->driver->bo_driver; + struct drm_ttm *ttm; if (!bo_driver) return NULL; @@ -275,9 +275,9 @@ drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size) * Unbind a ttm region from the aperture. */ -void drm_ttm_evict(drm_ttm_t * ttm) +void drm_ttm_evict(struct drm_ttm * ttm) { - drm_ttm_backend_t *be = ttm->be; + struct drm_ttm_backend *be = ttm->be; int ret; if (ttm->state == ttm_bound) { @@ -288,11 +288,11 @@ void drm_ttm_evict(drm_ttm_t * ttm) ttm->state = ttm_evicted; } -void drm_ttm_fixup_caching(drm_ttm_t * ttm) +void drm_ttm_fixup_caching(struct drm_ttm * ttm) { if (ttm->state == ttm_evicted) { - drm_ttm_backend_t *be = ttm->be; + struct drm_ttm_backend *be = ttm->be; if (be->func->needs_ub_cache_adjust(be)) { drm_set_caching(ttm, 0); } @@ -300,7 +300,7 @@ void drm_ttm_fixup_caching(drm_ttm_t * ttm) } } -void drm_ttm_unbind(drm_ttm_t * ttm) +void drm_ttm_unbind(struct drm_ttm * ttm) { if (ttm->state == ttm_bound) drm_ttm_evict(ttm); @@ -308,11 +308,11 @@ void drm_ttm_unbind(drm_ttm_t * ttm) drm_ttm_fixup_caching(ttm); } -int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset) +int drm_bind_ttm(struct drm_ttm * ttm, int cached, unsigned long aper_offset) { int ret = 0; - drm_ttm_backend_t *be; + struct drm_ttm_backend *be; if (!ttm) return -EINVAL; diff --git a/linux-core/drm_vm.c b/linux-core/drm_vm.c index de2fba1a..265a59d8 100644 --- a/linux-core/drm_vm.c +++ b/linux-core/drm_vm.c @@ -713,10 +713,10 @@ EXPORT_SYMBOL(drm_mmap); static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, unsigned long address) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; unsigned long page_offset; struct page *page = NULL; - drm_ttm_t *ttm; + struct drm_ttm *ttm; struct drm_device *dev; unsigned long pfn; int err; @@ -766,7 +766,7 @@ static unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma, page_offset = (address - vma->vm_start) >> PAGE_SHIFT; if (bus_size) { - drm_mem_type_manager_t *man = &dev->bm.man[bo->mem.mem_type]; + struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type]; pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset; vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma); @@ -798,7 +798,7 @@ out_unlock: static void drm_bo_vm_open_locked(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; drm_vm_open_locked(vma); atomic_inc(&bo->usage); @@ -815,7 +815,7 @@ static void drm_bo_vm_open_locked(struct vm_area_struct *vma) static void drm_bo_vm_open(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; struct drm_device *dev = bo->dev; mutex_lock(&dev->struct_mutex); @@ -831,7 +831,7 @@ static void drm_bo_vm_open(struct vm_area_struct *vma) static void drm_bo_vm_close(struct vm_area_struct *vma) { - drm_buffer_object_t *bo = (drm_buffer_object_t *) vma->vm_private_data; + struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data; struct drm_device *dev = bo->dev; drm_vm_close(vma); diff --git a/linux-core/i915_buffer.c b/linux-core/i915_buffer.c index 6aeccfcb..bf500cc6 100644 --- a/linux-core/i915_buffer.c +++ b/linux-core/i915_buffer.c @@ -33,12 +33,12 @@ #include "i915_drm.h" #include "i915_drv.h" -drm_ttm_backend_t *i915_create_ttm_backend_entry(struct drm_device * dev) +struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } -int i915_fence_types(drm_buffer_object_t *bo, uint32_t * type) +int i915_fence_types(struct drm_buffer_object *bo, uint32_t * type) { if (bo->mem.flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE)) *type = 3; @@ -64,7 +64,7 @@ int i915_invalidate_caches(struct drm_device * dev, uint64_t flags) } int i915_init_mem_type(struct drm_device * dev, uint32_t type, - drm_mem_type_manager_t * man) + struct drm_mem_type_manager * man) { switch (type) { case DRM_BO_MEM_LOCAL: @@ -105,7 +105,7 @@ int i915_init_mem_type(struct drm_device * dev, uint32_t type, return 0; } -uint32_t i915_evict_mask(drm_buffer_object_t *bo) +uint32_t i915_evict_mask(struct drm_buffer_object *bo) { switch (bo->mem.mem_type) { case DRM_BO_MEM_LOCAL: @@ -150,10 +150,10 @@ static void i915_emit_copy_blit(struct drm_device * dev, return; } -static int i915_move_blit(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +static int i915_move_blit(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; int dir = 0; if ((old_mem->mem_type == new_mem->mem_type) && @@ -180,11 +180,11 @@ static int i915_move_blit(drm_buffer_object_t * bo, * then blit and subsequently move out again. */ -static int i915_move_flip(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +static int i915_move_flip(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { struct drm_device *dev = bo->dev; - drm_bo_mem_reg_t tmp_mem; + struct drm_bo_mem_reg tmp_mem; int ret; tmp_mem = *new_mem; @@ -216,10 +216,10 @@ out_cleanup: return ret; } -int i915_move(drm_buffer_object_t * bo, - int evict, int no_wait, drm_bo_mem_reg_t * new_mem) +int i915_move(struct drm_buffer_object * bo, + int evict, int no_wait, struct drm_bo_mem_reg * new_mem) { - drm_bo_mem_reg_t *old_mem = &bo->mem; + struct drm_bo_mem_reg *old_mem = &bo->mem; if (old_mem->mem_type == DRM_BO_MEM_LOCAL) { return drm_bo_move_memcpy(bo, evict, no_wait, new_mem); diff --git a/linux-core/i915_drv.c b/linux-core/i915_drv.c index 49437066..e337e1d2 100644 --- a/linux-core/i915_drv.c +++ b/linux-core/i915_drv.c @@ -39,7 +39,7 @@ static struct pci_device_id pciidlist[] = { }; #ifdef I915_HAVE_FENCE -static drm_fence_driver_t i915_fence_driver = { +static struct drm_fence_driver i915_fence_driver = { .num_classes = 1, .wrap_diff = (1U << (BREADCRUMB_BITS - 1)), .flush_diff = (1U << (BREADCRUMB_BITS - 2)), @@ -55,7 +55,7 @@ static drm_fence_driver_t i915_fence_driver = { static uint32_t i915_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_TT, DRM_BO_MEM_LOCAL}; static uint32_t i915_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_LOCAL}; -static drm_bo_driver_t i915_bo_driver = { +static struct drm_bo_driver i915_bo_driver = { .mem_type_prio = i915_mem_prios, .mem_busy_prio = i915_busy_prios, .num_mem_type_prio = sizeof(i915_mem_prios)/sizeof(uint32_t), diff --git a/linux-core/i915_fence.c b/linux-core/i915_fence.c index a71e5dac..6f0de2ca 100644 --- a/linux-core/i915_fence.c +++ b/linux-core/i915_fence.c @@ -41,9 +41,9 @@ static void i915_perform_flush(struct drm_device * dev) { drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; - drm_fence_class_manager_t *fc = &fm->class[0]; - drm_fence_driver_t *driver = dev->driver->fence_driver; + struct drm_fence_manager *fm = &dev->fm; + struct drm_fence_class_manager *fc = &fm->class[0]; + struct drm_fence_driver *driver = dev->driver->fence_driver; uint32_t flush_flags = 0; uint32_t flush_sequence = 0; uint32_t i_status; @@ -111,7 +111,7 @@ static void i915_perform_flush(struct drm_device * dev) void i915_poke_flush(struct drm_device * dev, uint32_t class) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; write_lock_irqsave(&fm->lock, flags); @@ -137,7 +137,7 @@ int i915_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t f void i915_fence_handler(struct drm_device * dev) { - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; write_lock(&fm->lock); i915_perform_flush(dev); diff --git a/linux-core/via_buffer.c b/linux-core/via_buffer.c index e452611d..0461b3c7 100644 --- a/linux-core/via_buffer.c +++ b/linux-core/via_buffer.c @@ -32,12 +32,12 @@ #include "via_drm.h" #include "via_drv.h" -drm_ttm_backend_t *via_create_ttm_backend_entry(struct drm_device * dev) +struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device * dev) { return drm_agp_init_ttm(dev); } -int via_fence_types(drm_buffer_object_t *bo, uint32_t * type) +int via_fence_types(struct drm_buffer_object *bo, uint32_t * type) { *type = 3; return 0; @@ -82,7 +82,7 @@ static int via_vram_info(struct drm_device *dev, } int via_init_mem_type(struct drm_device * dev, uint32_t type, - drm_mem_type_manager_t * man) + struct drm_mem_type_manager * man) { switch (type) { case DRM_BO_MEM_LOCAL: @@ -143,7 +143,7 @@ int via_init_mem_type(struct drm_device * dev, uint32_t type, return 0; } -uint32_t via_evict_mask(drm_buffer_object_t *bo) +uint32_t via_evict_mask(struct drm_buffer_object *bo) { switch (bo->mem.mem_type) { case DRM_BO_MEM_LOCAL: diff --git a/linux-core/via_fence.c b/linux-core/via_fence.c index ce4366d2..a8db3d12 100644 --- a/linux-core/via_fence.c +++ b/linux-core/via_fence.c @@ -42,7 +42,7 @@ static uint32_t via_perform_flush(struct drm_device *dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_class_manager_t *fc = &dev->fm.class[class]; + struct drm_fence_class_manager *fc = &dev->fm.class[class]; uint32_t pending_flush_types = 0; uint32_t signaled_flush_types = 0; uint32_t status; @@ -155,7 +155,7 @@ int via_fence_emit_sequence(struct drm_device * dev, uint32_t class, uint32_t fl void via_poke_flush(struct drm_device * dev, uint32_t class) { drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; unsigned long flags; uint32_t pending_flush; @@ -202,9 +202,9 @@ void via_fence_timer(unsigned long data) { struct drm_device *dev = (struct drm_device *) data; drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private; - drm_fence_manager_t *fm = &dev->fm; + struct drm_fence_manager *fm = &dev->fm; uint32_t pending_flush; - drm_fence_class_manager_t *fc = &dev->fm.class[0]; + struct drm_fence_class_manager *fc = &dev->fm.class[0]; if (!dev_priv) return; diff --git a/shared-core/i915_drv.h b/shared-core/i915_drv.h index 1ed37c63..ee2b474b 100644 --- a/shared-core/i915_drv.h +++ b/shared-core/i915_drv.h @@ -197,14 +197,14 @@ extern int i915_fence_has_irq(struct drm_device *dev, uint32_t class, uint32_t f #ifdef I915_HAVE_BUFFER /* i915_buffer.c */ -extern drm_ttm_backend_t *i915_create_ttm_backend_entry(struct drm_device *dev); -extern int i915_fence_types(drm_buffer_object_t *bo, uint32_t *type); +extern struct drm_ttm_backend *i915_create_ttm_backend_entry(struct drm_device *dev); +extern int i915_fence_types(struct drm_buffer_object *bo, uint32_t *type); extern int i915_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); extern int i915_init_mem_type(struct drm_device *dev, uint32_t type, - drm_mem_type_manager_t *man); -extern uint32_t i915_evict_mask(drm_buffer_object_t *bo); -extern int i915_move(drm_buffer_object_t *bo, int evict, - int no_wait, drm_bo_mem_reg_t *new_mem); + struct drm_mem_type_manager *man); +extern uint32_t i915_evict_mask(struct drm_buffer_object *bo); +extern int i915_move(struct drm_buffer_object *bo, int evict, + int no_wait, struct drm_bo_mem_reg *new_mem); #endif diff --git a/shared-core/via_drv.c b/shared-core/via_drv.c index 0a478fef..9f099555 100644 --- a/shared-core/via_drv.c +++ b/shared-core/via_drv.c @@ -40,7 +40,7 @@ static struct pci_device_id pciidlist[] = { #ifdef VIA_HAVE_FENCE -static drm_fence_driver_t via_fence_driver = { +static struct drm_fence_driver via_fence_driver = { .num_classes = 1, .wrap_diff = (1 << 30), .flush_diff = (1 << 20), @@ -65,7 +65,7 @@ static uint32_t via_mem_prios[] = {DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM static uint32_t via_busy_prios[] = {DRM_BO_MEM_TT, DRM_BO_MEM_PRIV0, DRM_BO_MEM_VRAM, DRM_BO_MEM_LOCAL}; -static drm_bo_driver_t via_bo_driver = { +static struct drm_bo_driver via_bo_driver = { .mem_type_prio = via_mem_prios, .mem_busy_prio = via_busy_prios, .num_mem_type_prio = ARRAY_SIZE(via_mem_prios), diff --git a/shared-core/via_drv.h b/shared-core/via_drv.h index 9ffc7a51..05935c81 100644 --- a/shared-core/via_drv.h +++ b/shared-core/via_drv.h @@ -204,14 +204,14 @@ extern int via_fence_has_irq(struct drm_device * dev, uint32_t class, #endif #ifdef VIA_HAVE_BUFFER -extern drm_ttm_backend_t *via_create_ttm_backend_entry(struct drm_device *dev); -extern int via_fence_types(drm_buffer_object_t *bo, uint32_t *type); +extern struct drm_ttm_backend *via_create_ttm_backend_entry(struct drm_device *dev); +extern int via_fence_types(struct drm_buffer_object *bo, uint32_t *type); extern int via_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags); extern int via_init_mem_type(struct drm_device *dev, uint32_t type, - drm_mem_type_manager_t *man); -extern uint32_t via_evict_mask(drm_buffer_object_t *bo); -extern int via_move(drm_buffer_object_t *bo, int evict, - int no_wait, drm_bo_mem_reg_t *new_mem); + struct drm_mem_type_manager *man); +extern uint32_t via_evict_mask(struct drm_buffer_object *bo); +extern int via_move(struct drm_buffer_object *bo, int evict, + int no_wait, struct drm_bo_mem_reg *new_mem); #endif #endif -- 2.11.0