OSDN Git Service

drm/amdgpu: cleanup CS init/fini and pass1
authorChristian König <christian.koenig@amd.com>
Wed, 7 Sep 2022 09:37:06 +0000 (11:37 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Tue, 13 Sep 2022 18:33:01 +0000 (14:33 -0400)
Cleanup the coding style and function names to represent the data
they process. Only initialize and cleanup the CS structure in
init/fini.

Check the size of the IB chunk in pass1.

v2: fix job initialisation order and use correct scheduler instance
v3: try to move all functional changes into a separate patch.
v4: move reordering and pass2 out of this patch as well

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Alex Deucher <alexander.deucher@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c

index 52ba632..ee02fec 100644 (file)
 #include "amdgpu_gem.h"
 #include "amdgpu_ras.h"
 
-static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
-                                     struct drm_amdgpu_cs_chunk_fence *data,
-                                     uint32_t *offset)
+static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p,
+                                struct amdgpu_device *adev,
+                                struct drm_file *filp,
+                                union drm_amdgpu_cs *cs)
+{
+       struct amdgpu_fpriv *fpriv = filp->driver_priv;
+
+       if (cs->in.num_chunks == 0)
+               return -EINVAL;
+
+       memset(p, 0, sizeof(*p));
+       p->adev = adev;
+       p->filp = filp;
+
+       p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
+       if (!p->ctx)
+               return -EINVAL;
+
+       if (atomic_read(&p->ctx->guilty)) {
+               amdgpu_ctx_put(p->ctx);
+               return -ECANCELED;
+       }
+       return 0;
+}
+
+static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p,
+                          struct drm_amdgpu_cs_chunk_ib *chunk_ib,
+                          unsigned int *num_ibs)
+{
+       ++(*num_ibs);
+       return 0;
+}
+
+static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p,
+                                  struct drm_amdgpu_cs_chunk_fence *data,
+                                  uint32_t *offset)
 {
        struct drm_gem_object *gobj;
        struct amdgpu_bo *bo;
@@ -80,11 +113,11 @@ error_unref:
        return r;
 }
 
-static int amdgpu_cs_bo_handles_chunk(struct amdgpu_cs_parser *p,
-                                     struct drm_amdgpu_bo_list_in *data)
+static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p,
+                                  struct drm_amdgpu_bo_list_in *data)
 {
+       struct drm_amdgpu_bo_list_entry *info;
        int r;
-       struct drm_amdgpu_bo_list_entry *info = NULL;
 
        r = amdgpu_bo_create_list_entry_array(data, &info);
        if (r)
@@ -104,7 +137,9 @@ error_free:
        return r;
 }
 
-static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs *cs)
+/* Copy the data from userspace and go over it the first time */
+static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
+                          union drm_amdgpu_cs *cs)
 {
        struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
        struct amdgpu_vm *vm = &fpriv->vm;
@@ -112,28 +147,14 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
        uint64_t *chunk_array;
        unsigned size, num_ibs = 0;
        uint32_t uf_offset = 0;
-       int i;
        int ret;
+       int i;
 
-       if (cs->in.num_chunks == 0)
-               return -EINVAL;
-
-       chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t), GFP_KERNEL);
+       chunk_array = kvmalloc_array(cs->in.num_chunks, sizeof(uint64_t),
+                                    GFP_KERNEL);
        if (!chunk_array)
                return -ENOMEM;
 
-       p->ctx = amdgpu_ctx_get(fpriv, cs->in.ctx_id);
-       if (!p->ctx) {
-               ret = -EINVAL;
-               goto free_chunk;
-       }
-
-       /* skip guilty context job */
-       if (atomic_read(&p->ctx->guilty) == 1) {
-               ret = -ECANCELED;
-               goto free_chunk;
-       }
-
        /* get chunks */
        chunk_array_user = u64_to_user_ptr(cs->in.chunks);
        if (copy_from_user(chunk_array, chunk_array_user,
@@ -168,7 +189,8 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
                size = p->chunks[i].length_dw;
                cdata = u64_to_user_ptr(user_chunk.chunk_data);
 
-               p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t), GFP_KERNEL);
+               p->chunks[i].kdata = kvmalloc_array(size, sizeof(uint32_t),
+                                                   GFP_KERNEL);
                if (p->chunks[i].kdata == NULL) {
                        ret = -ENOMEM;
                        i--;
@@ -180,36 +202,35 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
                        goto free_partial_kdata;
                }
 
+               /* Assume the worst on the following checks */
+               ret = -EINVAL;
                switch (p->chunks[i].chunk_id) {
                case AMDGPU_CHUNK_ID_IB:
-                       ++num_ibs;
+                       if (size < sizeof(struct drm_amdgpu_cs_chunk_ib))
+                               goto free_partial_kdata;
+
+                       ret = amdgpu_cs_p1_ib(p, p->chunks[i].kdata, &num_ibs);
+                       if (ret)
+                               goto free_partial_kdata;
                        break;
 
                case AMDGPU_CHUNK_ID_FENCE:
-                       size = sizeof(struct drm_amdgpu_cs_chunk_fence);
-                       if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
-                               ret = -EINVAL;
+                       if (size < sizeof(struct drm_amdgpu_cs_chunk_fence))
                                goto free_partial_kdata;
-                       }
 
-                       ret = amdgpu_cs_user_fence_chunk(p, p->chunks[i].kdata,
-                                                        &uf_offset);
+                       ret = amdgpu_cs_p1_user_fence(p, p->chunks[i].kdata,
+                                                     &uf_offset);
                        if (ret)
                                goto free_partial_kdata;
-
                        break;
 
                case AMDGPU_CHUNK_ID_BO_HANDLES:
-                       size = sizeof(struct drm_amdgpu_bo_list_in);
-                       if (p->chunks[i].length_dw * sizeof(uint32_t) < size) {
-                               ret = -EINVAL;
+                       if (size < sizeof(struct drm_amdgpu_bo_list_in))
                                goto free_partial_kdata;
-                       }
 
-                       ret = amdgpu_cs_bo_handles_chunk(p, p->chunks[i].kdata);
+                       ret = amdgpu_cs_p1_bo_handles(p, p->chunks[i].kdata);
                        if (ret)
                                goto free_partial_kdata;
-
                        break;
 
                case AMDGPU_CHUNK_ID_DEPENDENCIES:
@@ -221,7 +242,6 @@ static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, union drm_amdgpu_cs
                        break;
 
                default:
-                       ret = -EINVAL;
                        goto free_partial_kdata;
                }
        }
@@ -660,52 +680,6 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
        return 0;
 }
 
-/**
- * amdgpu_cs_parser_fini() - clean parser states
- * @parser:    parser structure holding parsing context.
- * @error:     error number
- * @backoff:   indicator to backoff the reservation
- *
- * If error is set then unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
-                                 bool backoff)
-{
-       unsigned i;
-
-       if (error && backoff) {
-               ttm_eu_backoff_reservation(&parser->ticket,
-                                          &parser->validated);
-               mutex_unlock(&parser->bo_list->bo_list_mutex);
-       }
-
-       for (i = 0; i < parser->num_post_deps; i++) {
-               drm_syncobj_put(parser->post_deps[i].syncobj);
-               kfree(parser->post_deps[i].chain);
-       }
-       kfree(parser->post_deps);
-
-       dma_fence_put(parser->fence);
-
-       if (parser->ctx) {
-               amdgpu_ctx_put(parser->ctx);
-       }
-       if (parser->bo_list)
-               amdgpu_bo_list_put(parser->bo_list);
-
-       for (i = 0; i < parser->nchunks; i++)
-               kvfree(parser->chunks[i].kdata);
-       kvfree(parser->chunks);
-       if (parser->job)
-               amdgpu_job_free(parser->job);
-       if (parser->uf_entry.tv.bo) {
-               struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
-
-               amdgpu_bo_unref(&uf);
-       }
-}
-
 static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)
 {
        struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched);
@@ -1280,11 +1254,47 @@ static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *parser)
                trace_amdgpu_cs(parser, i);
 }
 
+/* Cleanup the parser structure */
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
+                                 bool backoff)
+{
+       unsigned i;
+
+       if (error && backoff) {
+               ttm_eu_backoff_reservation(&parser->ticket,
+                                          &parser->validated);
+               mutex_unlock(&parser->bo_list->bo_list_mutex);
+       }
+
+       for (i = 0; i < parser->num_post_deps; i++) {
+               drm_syncobj_put(parser->post_deps[i].syncobj);
+               kfree(parser->post_deps[i].chain);
+       }
+       kfree(parser->post_deps);
+
+       dma_fence_put(parser->fence);
+
+       if (parser->ctx)
+               amdgpu_ctx_put(parser->ctx);
+       if (parser->bo_list)
+               amdgpu_bo_list_put(parser->bo_list);
+
+       for (i = 0; i < parser->nchunks; i++)
+               kvfree(parser->chunks[i].kdata);
+       kvfree(parser->chunks);
+       if (parser->job)
+               amdgpu_job_free(parser->job);
+       if (parser->uf_entry.tv.bo) {
+               struct amdgpu_bo *uf = ttm_to_amdgpu_bo(parser->uf_entry.tv.bo);
+
+               amdgpu_bo_unref(&uf);
+       }
+}
+
 int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
 {
        struct amdgpu_device *adev = drm_to_adev(dev);
-       union drm_amdgpu_cs *cs = data;
-       struct amdgpu_cs_parser parser = {};
+       struct amdgpu_cs_parser parser;
        bool reserved_buffers = false;
        int r;
 
@@ -1294,16 +1304,17 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        if (!adev->accel_working)
                return -EBUSY;
 
-       parser.adev = adev;
-       parser.filp = filp;
-
-       r = amdgpu_cs_parser_init(&parser, data);
+       r = amdgpu_cs_parser_init(&parser, adev, filp, data);
        if (r) {
                if (printk_ratelimit())
                        DRM_ERROR("Failed to initialize parser %d!\n", r);
                goto out;
        }
 
+       r = amdgpu_cs_pass1(&parser, data);
+       if (r)
+               goto out;
+
        r = amdgpu_cs_ib_fill(adev, &parser);
        if (r)
                goto out;
@@ -1331,7 +1342,7 @@ int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
        if (r)
                goto out;
 
-       r = amdgpu_cs_submit(&parser, cs);
+       r = amdgpu_cs_submit(&parser, data);
 out:
        amdgpu_cs_parser_fini(&parser, r, reserved_buffers);