OSDN Git Service

drm/amdgpu: split the VM entity into direct and delayed
authorChristian König <christian.koenig@amd.com>
Fri, 19 Jul 2019 12:41:12 +0000 (14:41 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 16 Sep 2019 15:42:55 +0000 (10:42 -0500)
For page fault handling we need to use a direct update which can't be
blocked by ongoing user CS.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c

index 53734da..6f92897 100644 (file)
@@ -282,7 +282,7 @@ static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm,
            !dma_fence_is_later(updates, (*id)->flushed_updates))
            updates = NULL;
 
-       if ((*id)->owner != vm->entity.fence_context ||
+       if ((*id)->owner != vm->direct.fence_context ||
            job->vm_pd_addr != (*id)->pd_gpu_addr ||
            updates || !(*id)->last_flush ||
            ((*id)->last_flush->context != fence_context &&
@@ -349,7 +349,7 @@ static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm,
                struct dma_fence *flushed;
 
                /* Check all the prerequisites to using this VMID */
-               if ((*id)->owner != vm->entity.fence_context)
+               if ((*id)->owner != vm->direct.fence_context)
                        continue;
 
                if ((*id)->pd_gpu_addr != job->vm_pd_addr)
@@ -449,7 +449,7 @@ int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
        }
 
        id->pd_gpu_addr = job->vm_pd_addr;
-       id->owner = vm->entity.fence_context;
+       id->owner = vm->direct.fence_context;
 
        if (job->vm_needs_flush) {
                dma_fence_put(id->last_flush);
index 17aca26..a8e9ea0 100644 (file)
@@ -2671,12 +2671,17 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
        spin_lock_init(&vm->invalidated_lock);
        INIT_LIST_HEAD(&vm->freed);
 
-       /* create scheduler entity for page table updates */
-       r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
+       /* create scheduler entities for page table updates */
+       r = drm_sched_entity_init(&vm->direct, adev->vm_manager.vm_pte_rqs,
                                  adev->vm_manager.vm_pte_num_rqs, NULL);
        if (r)
                return r;
 
+       r = drm_sched_entity_init(&vm->delayed, adev->vm_manager.vm_pte_rqs,
+                                 adev->vm_manager.vm_pte_num_rqs, NULL);
+       if (r)
+               goto error_free_direct;
+
        vm->pte_support_ats = false;
 
        if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE) {
@@ -2705,7 +2710,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
                bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
        r = amdgpu_bo_create(adev, &bp, &root);
        if (r)
-               goto error_free_sched_entity;
+               goto error_free_delayed;
 
        r = amdgpu_bo_reserve(root, true);
        if (r)
@@ -2748,8 +2753,11 @@ error_free_root:
        amdgpu_bo_unref(&vm->root.base.bo);
        vm->root.base.bo = NULL;
 
-error_free_sched_entity:
-       drm_sched_entity_destroy(&vm->entity);
+error_free_delayed:
+       drm_sched_entity_destroy(&vm->delayed);
+
+error_free_direct:
+       drm_sched_entity_destroy(&vm->direct);
 
        return r;
 }
@@ -2938,7 +2946,8 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
                spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
        }
 
-       drm_sched_entity_destroy(&vm->entity);
+       drm_sched_entity_destroy(&vm->direct);
+       drm_sched_entity_destroy(&vm->delayed);
 
        if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
                dev_err(adev->dev, "still active bo inside vm\n");
index 3352a87..7138722 100644 (file)
@@ -257,8 +257,9 @@ struct amdgpu_vm {
        struct amdgpu_vm_pt     root;
        struct dma_fence        *last_update;
 
-       /* Scheduler entity for page table updates */
-       struct drm_sched_entity entity;
+       /* Scheduler entities for page table updates */
+       struct drm_sched_entity direct;
+       struct drm_sched_entity delayed;
 
        unsigned int            pasid;
        /* dedicated to vm */
index 61fc584..9aecfb3 100644 (file)
@@ -99,12 +99,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
        struct dma_fence *f;
        int r;
 
-       ring = container_of(p->vm->entity.rq->sched, struct amdgpu_ring, sched);
+       ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring,
+                           sched);
 
        WARN_ON(ib->length_dw == 0);
        amdgpu_ring_pad_ib(ring, ib);
        WARN_ON(ib->length_dw > p->num_dw_left);
-       r = amdgpu_job_submit(p->job, &p->vm->entity,
+       r = amdgpu_job_submit(p->job, &p->vm->delayed,
                              AMDGPU_FENCE_OWNER_VM, &f);
        if (r)
                goto error;