From 2bd9ccfa75e96ba278b96d817bc2135eb761adbd Mon Sep 17 00:00:00 2001 From: =?utf8?q?Christian=20K=C3=B6nig?= Date: Mon, 1 Feb 2016 12:53:58 +0100 Subject: [PATCH] drm/amdgpu: use per VM entity for page table updates (v2) MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Updates from different VMs can be processed independently. v2: agd: rebase on upstream Signed-off-by: Christian König Reviewed-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 8 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 53 +++++++++++++++++++++++---------- 6 files changed, 51 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 99e660fec190..5947a95ac853 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -800,7 +800,8 @@ int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, struct amdgpu_job **job); void amdgpu_job_free(struct amdgpu_job *job); int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, - void *owner, struct fence **f); + struct amd_sched_entity *entity, void *owner, + struct fence **f); struct amdgpu_ring { struct amdgpu_device *adev; @@ -917,6 +918,9 @@ struct amdgpu_vm { /* protecting freed */ spinlock_t freed_lock; + + /* Scheduler entity for page table updates */ + struct amd_sched_entity entity; }; struct amdgpu_vm_manager_id { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 0f6719e0ace0..97db6beeca13 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -80,13 +80,17 @@ void amdgpu_job_free(struct amdgpu_job *job) } int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, - void *owner, struct fence **f) + struct amd_sched_entity *entity, void *owner, + struct fence **f) { struct amdgpu_device *adev = job->adev; + if (!entity) + entity = &adev->kernel_ctx.rings[ring->idx].entity; + job->ring = ring; job->base.sched = &ring->sched; - job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity; + job->base.s_entity = entity; job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner); if (!job->base.s_fence) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e47d5188c886..3deb7d3b218a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1053,7 +1053,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); - r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_UNDEFINED, fence); + r = amdgpu_job_submit(job, ring, NULL, AMDGPU_FENCE_OWNER_UNDEFINED, fence); if (r) goto error_free; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index c536630580f8..f4283432bf4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -880,7 +880,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, amdgpu_job_free(job); } else { - r = amdgpu_job_submit(job, ring, + r = amdgpu_job_submit(job, ring, NULL, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err_free; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index fb2ce3ed9aab..8a3119379cd8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -481,7 +481,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, amdgpu_job_free(job); } else { - r = amdgpu_job_submit(job, ring, + r = amdgpu_job_submit(job, ring, NULL, AMDGPU_FENCE_OWNER_UNDEFINED, &f); if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index b291b1a4611a..5e38b344d56b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -322,6 +322,7 @@ static void amdgpu_vm_update_pages(struct amdgpu_device *adev, * need to reserve bo first before calling it. */ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, + struct amdgpu_vm *vm, struct amdgpu_bo *bo) { struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; @@ -351,7 +352,8 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev, amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > 64); - r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); if (r) goto error_free; @@ -476,7 +478,8 @@ int amdgpu_vm_update_page_directory(struct amdgpu_device *adev, amdgpu_sync_resv(adev, &job->sync, pd->tbo.resv, AMDGPU_FENCE_OWNER_VM); WARN_ON(ib->length_dw > ndw); - r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &fence); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &fence); if (r) goto error_free; @@ -729,7 +732,8 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, amdgpu_ring_pad_ib(ring, ib); WARN_ON(ib->length_dw > ndw); - r = amdgpu_job_submit(job, ring, AMDGPU_FENCE_OWNER_VM, &f); + r = amdgpu_job_submit(job, ring, &vm->entity, + AMDGPU_FENCE_OWNER_VM, &f); if (r) goto error_free; @@ -1104,7 +1108,7 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev, */ pt->parent = amdgpu_bo_ref(vm->page_directory); - r = amdgpu_vm_clear_bo(adev, pt); + r = amdgpu_vm_clear_bo(adev, vm, pt); if (r) { amdgpu_bo_unref(&pt); goto error_free; @@ -1265,9 +1269,11 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, */ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) { + struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE, AMDGPU_VM_PTE_COUNT * 8); unsigned pd_size, pd_entries; + struct amd_sched_rq *rq; int i, r; for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { @@ -1291,6 +1297,13 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) return -ENOMEM; } + /* create scheduler entity for page table updates */ + rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL]; + r = amd_sched_entity_init(&ring->sched, &vm->entity, + rq, amdgpu_sched_jobs); + if (r) + return r; + vm->page_directory_fence = NULL; r = amdgpu_bo_create(adev, pd_size, align, true, @@ -1298,22 +1311,27 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) AMDGPU_GEM_CREATE_NO_CPU_ACCESS, NULL, NULL, &vm->page_directory); if (r) - return r; + goto error_free_sched_entity; + r = amdgpu_bo_reserve(vm->page_directory, false); - if (r) { - amdgpu_bo_unref(&vm->page_directory); - vm->page_directory = NULL; - return r; - } - r = amdgpu_vm_clear_bo(adev, vm->page_directory); + if (r) + goto error_free_page_directory; + + r = amdgpu_vm_clear_bo(adev, vm, vm->page_directory); amdgpu_bo_unreserve(vm->page_directory); - if (r) { - amdgpu_bo_unref(&vm->page_directory); - vm->page_directory = NULL; - return r; - } + if (r) + goto error_free_page_directory; return 0; + +error_free_page_directory: + amdgpu_bo_unref(&vm->page_directory); + vm->page_directory = NULL; + +error_free_sched_entity: + amd_sched_entity_fini(&ring->sched, &vm->entity); + + return r; } /** @@ -1327,9 +1345,12 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) */ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm) { + struct amdgpu_ring *ring = adev->vm_manager.vm_pte_funcs_ring; struct amdgpu_bo_va_mapping *mapping, *tmp; int i; + amd_sched_entity_fini(&ring->sched, &vm->entity); + if (!RB_EMPTY_ROOT(&vm->va)) { dev_err(adev->dev, "still active bo inside vm\n"); } -- 2.11.0