From b72ed8a2de8e9dfbd61217d60a7da868ac2cfbff Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Sat, 10 Apr 2021 03:48:56 -0400 Subject: [PATCH] drm/amdgpu: DMA map/unmap when updating GPU mappings DMA map kfd_mem_attachments in update_gpuvm_pte. This function is called with the BO and page tables reserved, so we can safely update the DMA mapping. DMA unmap when a BO is unmapped from a GPU and before updating mappings in restore workers. Signed-off-by: Felix Kuehling Acked-by: Oak Zeng Acked-by: Ramesh Errabolu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 56 ++++++++++++------------ 1 file changed, 29 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 9c31c29ec784..2c02b1504e77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -966,11 +966,12 @@ static int unreserve_bo_and_vms(struct bo_vm_reservation_context *ctx, return ret; } -static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, +static void unmap_bo_from_gpuvm(struct kgd_mem *mem, struct kfd_mem_attachment *entry, struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = entry->bo_va; + struct amdgpu_device *adev = entry->adev; struct amdgpu_vm *vm = bo_va->base.vm; amdgpu_vm_bo_unmap(adev, bo_va, entry->va); @@ -979,15 +980,20 @@ static int unmap_bo_from_gpuvm(struct amdgpu_device *adev, amdgpu_sync_fence(sync, bo_va->last_pt_update); - return 0; + kfd_mem_dmaunmap_attachment(mem, entry); } -static int update_gpuvm_pte(struct amdgpu_device *adev, - struct kfd_mem_attachment *entry, - struct amdgpu_sync *sync) +static int update_gpuvm_pte(struct kgd_mem *mem, + struct kfd_mem_attachment *entry, + struct amdgpu_sync *sync) { - int ret; struct amdgpu_bo_va *bo_va = entry->bo_va; + struct amdgpu_device *adev = entry->adev; + int ret; + + ret = kfd_mem_dmamap_attachment(mem, entry); + if (ret) + return ret; /* Update the page tables */ ret = amdgpu_vm_bo_update(adev, bo_va, false); @@ -999,14 +1005,15 @@ static int update_gpuvm_pte(struct amdgpu_device *adev, return amdgpu_sync_fence(sync, bo_va->last_pt_update); } -static int map_bo_to_gpuvm(struct amdgpu_device *adev, - struct kfd_mem_attachment *entry, struct amdgpu_sync *sync, - bool no_update_pte) +static int map_bo_to_gpuvm(struct kgd_mem *mem, + struct kfd_mem_attachment *entry, + struct amdgpu_sync *sync, + bool no_update_pte) { int ret; /* Set virtual address for the allocation */ - ret = amdgpu_vm_bo_map(adev, entry->bo_va, entry->va, 0, + ret = amdgpu_vm_bo_map(entry->adev, entry->bo_va, entry->va, 0, amdgpu_bo_size(entry->bo_va->base.bo), entry->pte_flags); if (ret) { @@ -1018,7 +1025,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev, if (no_update_pte) return 0; - ret = update_gpuvm_pte(adev, entry, sync); + ret = update_gpuvm_pte(mem, entry, sync); if (ret) { pr_err("update_gpuvm_pte() failed\n"); goto update_gpuvm_pte_failed; @@ -1027,7 +1034,7 @@ static int map_bo_to_gpuvm(struct amdgpu_device *adev, return 0; update_gpuvm_pte_failed: - unmap_bo_from_gpuvm(adev, entry, sync); + unmap_bo_from_gpuvm(mem, entry, sync); return ret; } @@ -1601,7 +1608,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( pr_debug("\t map VA 0x%llx - 0x%llx in entry %p\n", entry->va, entry->va + bo_size, entry); - ret = map_bo_to_gpuvm(adev, entry, ctx.sync, + ret = map_bo_to_gpuvm(mem, entry, ctx.sync, is_invalid_userptr); if (ret) { pr_err("Failed to map bo to gpuvm\n"); @@ -1640,7 +1647,6 @@ out: int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) { - struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); struct amdkfd_process_info *process_info = avm->process_info; unsigned long bo_size = mem->bo->tbo.base.size; @@ -1675,13 +1681,8 @@ int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( pr_debug("\t unmap VA 0x%llx - 0x%llx from entry %p\n", entry->va, entry->va + bo_size, entry); - ret = unmap_bo_from_gpuvm(adev, entry, ctx.sync); - if (ret == 0) { - entry->is_mapped = false; - } else { - pr_err("failed to unmap VA 0x%llx\n", mem->va); - goto unreserve_out; - } + unmap_bo_from_gpuvm(mem, entry, ctx.sync); + entry->is_mapped = false; mem->mapped_to_gpu_memory--; pr_debug("\t DEC mapping count %d\n", @@ -2058,9 +2059,8 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) if (!attachment->is_mapped) continue; - ret = update_gpuvm_pte((struct amdgpu_device *) - attachment->adev, - attachment, &sync); + kfd_mem_dmaunmap_attachment(mem, attachment); + ret = update_gpuvm_pte(mem, attachment, &sync); if (ret) { pr_err("%s: update PTE failed\n", __func__); /* make sure this gets validated again */ @@ -2262,9 +2262,11 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) goto validate_map_fail; } list_for_each_entry(attachment, &mem->attachments, list) { - ret = update_gpuvm_pte((struct amdgpu_device *) - attachment->adev, attachment, - &sync_obj); + if (!attachment->is_mapped) + continue; + + kfd_mem_dmaunmap_attachment(mem, attachment); + ret = update_gpuvm_pte(mem, attachment, &sync_obj); if (ret) { pr_debug("Memory eviction: update PTE failed. Try again\n"); goto validate_map_fail; -- 2.11.0