struct amdgpu_vm_bo_base *bo_base;
if (vm->bulk_moveable) {
- spin_lock(&ttm_glob.lru_lock);
+ spin_lock(&adev->mman.bdev.lru_lock);
ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&adev->mman.bdev.lru_lock);
return;
}
memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
- spin_lock(&ttm_glob.lru_lock);
+ spin_lock(&adev->mman.bdev.lru_lock);
list_for_each_entry(bo_base, &vm->idle, vm_status) {
struct amdgpu_bo *bo = bo_base->bo;
&bo->shadow->tbo.mem,
&vm->lru_bulk_move);
}
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&adev->mman.bdev.lru_lock);
vm->bulk_moveable = true;
}
release->id | 0xf0000000, release->base.seqno);
trace_dma_fence_emit(&release->base);
- spin_lock(&ttm_glob.lru_lock);
-
list_for_each_entry(entry, &release->bos, head) {
bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, &release->base);
- ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&ttm_glob.lru_lock);
ww_acquire_fini(&release->ticket);
}
* reference it any more. The only tricky case is the trylock on
* the resv object while holding the lru_lock.
*/
- spin_lock(&ttm_glob.lru_lock);
+ spin_lock(&bo->bdev->lru_lock);
bo->base.resv = &bo->base._resv;
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
}
return r;
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
30 * HZ);
else if (lret == 0)
return -EBUSY;
- spin_lock(&ttm_glob.lru_lock);
+ spin_lock(&bo->bdev->lru_lock);
if (unlock_resv && !dma_resv_trylock(bo->base.resv)) {
/*
* We raced, and lost, someone else holds the reservation now,
* delayed destruction would succeed, so just return success
* here.
*/
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
return 0;
}
ret = 0;
if (ret || unlikely(list_empty(&bo->ddestroy))) {
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
return ret;
}
ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
*/
bool ttm_bo_delayed_delete(struct ttm_device *bdev, bool remove_all)
{
- struct ttm_global *glob = &ttm_glob;
struct list_head removed;
bool empty;
INIT_LIST_HEAD(&removed);
- spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->lru_lock);
while (!list_empty(&bdev->ddestroy)) {
struct ttm_buffer_object *bo;
continue;
if (remove_all || bo->base.resv != &bo->base._resv) {
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&bdev->lru_lock);
dma_resv_lock(bo->base.resv, NULL);
- spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->lru_lock);
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
} else if (dma_resv_trylock(bo->base.resv)) {
ttm_bo_cleanup_refs(bo, false, !remove_all, true);
} else {
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&bdev->lru_lock);
}
ttm_bo_put(bo);
- spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->lru_lock);
}
list_splice_tail(&removed, &bdev->ddestroy);
empty = list_empty(&bdev->ddestroy);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&bdev->lru_lock);
return empty;
}
ttm_bo_flush_all_fences(bo);
bo->deleted = true;
- spin_lock(&ttm_glob.lru_lock);
+ spin_lock(&bo->bdev->lru_lock);
/*
* Make pinned bos immediately available to
kref_init(&bo->kref);
list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
schedule_delayed_work(&bdev->wq,
((HZ / 100) < 1) ? 1 : HZ / 100);
return;
}
- spin_lock(&ttm_glob.lru_lock);
+ spin_lock(&bo->bdev->lru_lock);
ttm_bo_del_from_lru(bo);
list_del(&bo->ddestroy);
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
ttm_bo_cleanup_memtype_use(bo);
dma_resv_unlock(bo->base.resv);
unsigned i;
int ret;
- spin_lock(&ttm_glob.lru_lock);
+ spin_lock(&bo->bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &man->lru[i], lru) {
bool busy;
if (!bo) {
if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
busy_bo = NULL;
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
ttm_bo_put(busy_bo);
return ret;
}
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
ret = ttm_bo_evict(bo, ctx);
if (locked)
mem->mem_type = place->mem_type;
mem->placement = place->flags;
- spin_lock(&ttm_glob.lru_lock);
+ spin_lock(&bo->bdev->lru_lock);
ttm_bo_move_to_lru_tail(bo, mem, NULL);
- spin_unlock(&ttm_glob.lru_lock);
-
+ spin_unlock(&bo->bdev->lru_lock);
return 0;
}
int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
- struct ttm_global *glob = &ttm_glob;
bool locked;
int ret;
ttm_bo_del_from_lru(bo);
/* TODO: Cleanup the locking */
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
/*
* Move to system cached
ttm_pool_mgr_init(num_pages * 50 / 100);
ttm_tt_mgr_init();
- spin_lock_init(&glob->lru_lock);
glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
if (unlikely(glob->dummy_read_page == NULL)) {
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags)
{
- struct ttm_global *glob = &ttm_glob;
struct ttm_resource_manager *man;
struct ttm_buffer_object *bo;
unsigned i, j;
int ret;
- spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->lru_lock);
for (i = TTM_PL_SYSTEM; i < TTM_NUM_MEM_TYPES; ++i) {
man = ttm_manager_type(bdev, i);
if (!man || !man->use_tt)
}
}
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&bdev->lru_lock);
return 0;
}
EXPORT_SYMBOL(ttm_device_swapout);
bdev->vma_manager = vma_manager;
INIT_DELAYED_WORK(&bdev->wq, ttm_device_delayed_workqueue);
+ spin_lock_init(&bdev->lru_lock);
INIT_LIST_HEAD(&bdev->ddestroy);
bdev->dev_mapping = mapping;
mutex_lock(&ttm_global_mutex);
void ttm_device_fini(struct ttm_device *bdev)
{
- struct ttm_global *glob = &ttm_glob;
struct ttm_resource_manager *man;
unsigned i;
if (ttm_bo_delayed_delete(bdev, true))
pr_debug("Delayed destroy list was clean\n");
- spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i)
if (list_empty(&man->lru[0]))
pr_debug("Swap list %d was clean\n", i);
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&bdev->lru_lock);
ttm_pool_fini(&bdev->pool);
ttm_global_release();
if (list_empty(list))
return;
- spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&ttm_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
if (list_empty(list))
return;
- spin_lock(&ttm_glob.lru_lock);
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
dma_resv_add_shared_fence(bo->base.resv, fence);
else
dma_resv_add_excl_fence(bo->base.resv, fence);
- ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
+ ttm_bo_move_to_lru_tail_unlocked(bo);
dma_resv_unlock(bo->base.resv);
}
- spin_unlock(&ttm_glob.lru_lock);
if (ticket)
ww_acquire_fini(ticket);
}
.no_wait_gpu = false,
.force_alloc = true
};
- struct ttm_global *glob = &ttm_glob;
struct dma_fence *fence;
int ret;
unsigned i;
* Can't use standard list traversal since we're unlocking.
*/
- spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
while (!list_empty(&man->lru[i])) {
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&bdev->lru_lock);
ret = ttm_mem_evict_first(bdev, man, NULL, &ctx,
NULL);
if (ret)
return ret;
- spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->lru_lock);
}
}
- spin_unlock(&glob->lru_lock);
+ spin_unlock(&bdev->lru_lock);
spin_lock(&man->move_lock);
fence = dma_fence_get(man->move);
static inline void
ttm_bo_move_to_lru_tail_unlocked(struct ttm_buffer_object *bo)
{
- spin_lock(&ttm_glob.lru_lock);
+ spin_lock(&bo->bdev->lru_lock);
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
- spin_unlock(&ttm_glob.lru_lock);
+ spin_unlock(&bo->bdev->lru_lock);
}
static inline void ttm_bo_assign_mem(struct ttm_buffer_object *bo,
*/
struct page *dummy_read_page;
- spinlock_t lru_lock;
/**
* Protected by ttm_global_mutex.
struct ttm_pool pool;
/*
- * Protected by the global:lru lock.
+ * Protection for the per manager LRU and ddestroy lists.
*/
+ spinlock_t lru_lock;
struct list_head ddestroy;
/*