OSDN Git Service

drm/amdgpu: add amdgpu_pasid_free_delayed v2
authorChristian König <christian.koenig@amd.com>
Fri, 5 Jan 2018 10:16:22 +0000 (11:16 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 19 Feb 2018 19:17:45 +0000 (14:17 -0500)
Free up a pasid after all fences signaled.

v2: also handle the case when we can't allocate a fence array.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Chunming Zhou <david1.zhou@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ids.h

index 5248a32..842caa5 100644 (file)
  */
 static DEFINE_IDA(amdgpu_pasid_ida);
 
+/* Helper to free pasid from a fence callback */
+struct amdgpu_pasid_cb {
+       struct dma_fence_cb cb;
+       unsigned int pasid;
+};
+
 /**
  * amdgpu_pasid_alloc - Allocate a PASID
  * @bits: Maximum width of the PASID in bits, must be at least 1
@@ -75,6 +81,82 @@ void amdgpu_pasid_free(unsigned int pasid)
        ida_simple_remove(&amdgpu_pasid_ida, pasid);
 }
 
+static void amdgpu_pasid_free_cb(struct dma_fence *fence,
+                                struct dma_fence_cb *_cb)
+{
+       struct amdgpu_pasid_cb *cb =
+               container_of(_cb, struct amdgpu_pasid_cb, cb);
+
+       amdgpu_pasid_free(cb->pasid);
+       dma_fence_put(fence);
+       kfree(cb);
+}
+
+/**
+ * amdgpu_pasid_free_delayed - free pasid when fences signal
+ *
+ * @resv: reservation object with the fences to wait for
+ * @pasid: pasid to free
+ *
+ * Free the pasid only after all the fences in resv are signaled.
+ */
+void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+                              unsigned int pasid)
+{
+       struct dma_fence *fence, **fences;
+       struct amdgpu_pasid_cb *cb;
+       unsigned count;
+       int r;
+
+       r = reservation_object_get_fences_rcu(resv, NULL, &count, &fences);
+       if (r)
+               goto fallback;
+
+       if (count == 0) {
+               amdgpu_pasid_free(pasid);
+               return;
+       }
+
+       if (count == 1) {
+               fence = fences[0];
+               kfree(fences);
+       } else {
+               uint64_t context = dma_fence_context_alloc(1);
+               struct dma_fence_array *array;
+
+               array = dma_fence_array_create(count, fences, context,
+                                              1, false);
+               if (!array) {
+                       kfree(fences);
+                       goto fallback;
+               }
+               fence = &array->base;
+       }
+
+       cb = kmalloc(sizeof(*cb), GFP_KERNEL);
+       if (!cb) {
+               /* Last resort when we are OOM */
+               dma_fence_wait(fence, false);
+               dma_fence_put(fence);
+               amdgpu_pasid_free(pasid);
+       } else {
+               cb->pasid = pasid;
+               if (dma_fence_add_callback(fence, &cb->cb,
+                                          amdgpu_pasid_free_cb))
+                       amdgpu_pasid_free_cb(fence, &cb->cb);
+       }
+
+       return;
+
+fallback:
+       /* Not enough memory for the delayed delete, as last resort
+        * block for all the fences to complete.
+        */
+       reservation_object_wait_timeout_rcu(resv, true, false,
+                                           MAX_SCHEDULE_TIMEOUT);
+       amdgpu_pasid_free(pasid);
+}
+
 /*
  * VMID manager
  *
index ad931fa..38f37c1 100644 (file)
@@ -69,6 +69,8 @@ struct amdgpu_vmid_mgr {
 
 int amdgpu_pasid_alloc(unsigned int bits);
 void amdgpu_pasid_free(unsigned int pasid);
+void amdgpu_pasid_free_delayed(struct reservation_object *resv,
+                              unsigned int pasid);
 
 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev,
                               struct amdgpu_vmid *id);