OSDN Git Service

drm/i915: Allocate kernel_contexts directly
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 8 Aug 2019 11:06:11 +0000 (12:06 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Thu, 8 Aug 2019 14:45:34 +0000 (15:45 +0100)
Ignore the central i915->kernel_context for allocating an engine, as
that GEM context is being phased out. For internal clients, we just need
the per-engine logical state, so allocate it at the point of use.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190808110612.23539-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/mock_engine.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/i915_gem.c

index 3403d79..0331217 100644 (file)
@@ -95,8 +95,6 @@ gt-y += \
        gt/gen7_renderstate.o \
        gt/gen8_renderstate.o \
        gt/gen9_renderstate.o
-gt-$(CONFIG_DRM_I915_SELFTEST) += \
-       gt/mock_engine.o
 i915-y += $(gt-y)
 
 # GEM (Graphics Execution Management) code
index 86247ee..8d44d0d 100644 (file)
@@ -707,26 +707,6 @@ out_frame:
        return dw;
 }
 
-static int pin_context(struct i915_gem_context *ctx,
-                      struct intel_engine_cs *engine,
-                      struct intel_context **out)
-{
-       struct intel_context *ce;
-       int err;
-
-       ce = i915_gem_context_get_engine(ctx, engine->id);
-       if (IS_ERR(ce))
-               return PTR_ERR(ce);
-
-       err = intel_context_pin(ce);
-       intel_context_put(ce);
-       if (err)
-               return err;
-
-       *out = ce;
-       return 0;
-}
-
 void
 intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
 {
@@ -748,6 +728,25 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
 #endif
 }
 
+static struct intel_context *
+create_kernel_context(struct intel_engine_cs *engine)
+{
+       struct intel_context *ce;
+       int err;
+
+       ce = intel_context_create(engine->i915->kernel_context, engine);
+       if (IS_ERR(ce))
+               return ce;
+
+       err = intel_context_pin(ce);
+       if (err) {
+               intel_context_put(ce);
+               return ERR_PTR(err);
+       }
+
+       return ce;
+}
+
 /**
  * intel_engines_init_common - initialize cengine state which might require hw access
  * @engine: Engine to initialize.
@@ -761,22 +760,24 @@ intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
  */
 int intel_engine_init_common(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *i915 = engine->i915;
+       struct intel_context *ce;
        int ret;
 
        engine->set_default_submission(engine);
 
-       /* We may need to do things with the shrinker which
+       /*
+        * We may need to do things with the shrinker which
         * require us to immediately switch back to the default
         * context. This can cause a problem as pinning the
         * default context also requires GTT space which may not
         * be available. To avoid this we always pin the default
         * context.
         */
-       ret = pin_context(i915->kernel_context, engine,
-                         &engine->kernel_context);
-       if (ret)
-               return ret;
+       ce = create_kernel_context(engine);
+       if (IS_ERR(ce))
+               return PTR_ERR(ce);
+
+       engine->kernel_context = ce;
 
        ret = measure_breadcrumb_dw(engine);
        if (ret < 0)
@@ -787,7 +788,8 @@ int intel_engine_init_common(struct intel_engine_cs *engine)
        return 0;
 
 err_unpin:
-       intel_context_unpin(engine->kernel_context);
+       intel_context_unpin(ce);
+       intel_context_put(ce);
        return ret;
 }
 
@@ -812,6 +814,7 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
                i915_gem_object_put(engine->default_state);
 
        intel_context_unpin(engine->kernel_context);
+       intel_context_put(engine->kernel_context);
        GEM_BUG_ON(!llist_empty(&engine->barrier_tasks));
 
        intel_wa_list_free(&engine->ctx_wa_list);
@@ -1573,5 +1576,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
 }
 
 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "mock_engine.c"
 #include "selftest_engine_cs.c"
 #endif
index 8a5f079..c647908 100644 (file)
@@ -286,8 +286,7 @@ struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
 
 int mock_engine_init(struct intel_engine_cs *engine)
 {
-       struct drm_i915_private *i915 = engine->i915;
-       int err;
+       struct intel_context *ce;
 
        intel_engine_init_active(engine, ENGINE_MOCK);
        intel_engine_init_breadcrumbs(engine);
@@ -295,16 +294,11 @@ int mock_engine_init(struct intel_engine_cs *engine)
        intel_engine_init__pm(engine);
        intel_engine_pool_init(&engine->pool);
 
-       engine->kernel_context =
-               i915_gem_context_get_engine(i915->kernel_context, engine->id);
-       if (IS_ERR(engine->kernel_context))
-               goto err_breadcrumbs;
-
-       err = intel_context_pin(engine->kernel_context);
-       intel_context_put(engine->kernel_context);
-       if (err)
+       ce = create_kernel_context(engine);
+       if (IS_ERR(ce))
                goto err_breadcrumbs;
 
+       engine->kernel_context = ce;
        return 0;
 
 err_breadcrumbs:
@@ -338,6 +332,7 @@ void mock_engine_free(struct intel_engine_cs *engine)
        GEM_BUG_ON(timer_pending(&mock->hw_delay));
 
        intel_context_unpin(engine->kernel_context);
+       intel_context_put(engine->kernel_context);
 
        intel_engine_fini_breadcrumbs(engine);
 
index 431fd42..0a0a17b 100644 (file)
@@ -1229,7 +1229,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
                INIT_LIST_HEAD(&s->workload_q_head[i]);
                s->shadow[i] = ERR_PTR(-EINVAL);
 
-               ce = i915_gem_context_get_engine(ctx, i);
+               ce = intel_context_create(ctx, engine);
                if (IS_ERR(ce)) {
                        ret = PTR_ERR(ce);
                        goto out_shadow_ctx;
@@ -1270,6 +1270,7 @@ out_shadow_ctx:
                        break;
 
                intel_context_unpin(s->shadow[i]);
+               intel_context_put(s->shadow[i]);
        }
        i915_gem_context_put(ctx);
        return ret;
index 3f888d6..6bae073 100644 (file)
@@ -1256,9 +1256,8 @@ out:
 
 static int __intel_engines_record_defaults(struct drm_i915_private *i915)
 {
+       struct i915_request *requests[I915_NUM_ENGINES] = {};
        struct intel_engine_cs *engine;
-       struct i915_gem_context *ctx;
-       struct i915_gem_engines *e;
        enum intel_engine_id id;
        int err = 0;
 
@@ -1271,20 +1270,25 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
         * from the same default HW values.
         */
 
-       ctx = i915_gem_context_create_kernel(i915, 0);
-       if (IS_ERR(ctx))
-               return PTR_ERR(ctx);
-
-       e = i915_gem_context_lock_engines(ctx);
-
        for_each_engine(engine, i915, id) {
-               struct intel_context *ce = e->engines[id];
+               struct intel_context *ce;
                struct i915_request *rq;
 
+               /* We must be able to switch to something! */
+               GEM_BUG_ON(!engine->kernel_context);
+               engine->serial++; /* force the kernel context switch */
+
+               ce = intel_context_create(i915->kernel_context, engine);
+               if (IS_ERR(ce)) {
+                       err = PTR_ERR(ce);
+                       goto out;
+               }
+
                rq = intel_context_create_request(ce);
                if (IS_ERR(rq)) {
                        err = PTR_ERR(rq);
-                       goto err_active;
+                       intel_context_put(ce);
+                       goto out;
                }
 
                err = intel_engine_emit_ctx_wa(rq);
@@ -1305,26 +1309,33 @@ static int __intel_engines_record_defaults(struct drm_i915_private *i915)
                        goto err_rq;
 
 err_rq:
+               requests[id] = i915_request_get(rq);
                i915_request_add(rq);
                if (err)
-                       goto err_active;
+                       goto out;
        }
 
        /* Flush the default context image to memory, and enable powersaving. */
        if (!i915_gem_load_power_context(i915)) {
                err = -EIO;
-               goto err_active;
+               goto out;
        }
 
-       for_each_engine(engine, i915, id) {
-               struct intel_context *ce = e->engines[id];
-               struct i915_vma *state = ce->state;
+       for (id = 0; id < ARRAY_SIZE(requests); id++) {
+               struct i915_request *rq;
+               struct i915_vma *state;
                void *vaddr;
 
-               if (!state)
+               rq = requests[id];
+               if (!rq)
                        continue;
 
-               GEM_BUG_ON(intel_context_is_pinned(ce));
+               /* We want to be able to unbind the state from the GGTT */
+               GEM_BUG_ON(intel_context_is_pinned(rq->hw_context));
+
+               state = rq->hw_context->state;
+               if (!state)
+                       continue;
 
                /*
                 * As we will hold a reference to the logical state, it will
@@ -1336,43 +1347,49 @@ err_rq:
                 */
                err = i915_vma_unbind(state);
                if (err)
-                       goto err_active;
+                       goto out;
 
                i915_gem_object_lock(state->obj);
                err = i915_gem_object_set_to_cpu_domain(state->obj, false);
                i915_gem_object_unlock(state->obj);
                if (err)
-                       goto err_active;
+                       goto out;
 
-               engine->default_state = i915_gem_object_get(state->obj);
-               i915_gem_object_set_cache_coherency(engine->default_state,
-                                                   I915_CACHE_LLC);
+               i915_gem_object_set_cache_coherency(state->obj, I915_CACHE_LLC);
 
                /* Check we can acquire the image of the context state */
-               vaddr = i915_gem_object_pin_map(engine->default_state,
-                                               I915_MAP_FORCE_WB);
+               vaddr = i915_gem_object_pin_map(state->obj, I915_MAP_FORCE_WB);
                if (IS_ERR(vaddr)) {
                        err = PTR_ERR(vaddr);
-                       goto err_active;
+                       goto out;
                }
 
-               i915_gem_object_unpin_map(engine->default_state);
+               rq->engine->default_state = i915_gem_object_get(state->obj);
+               i915_gem_object_unpin_map(state->obj);
        }
 
-out_ctx:
-       i915_gem_context_unlock_engines(ctx);
-       i915_gem_context_set_closed(ctx);
-       i915_gem_context_put(ctx);
-       return err;
-
-err_active:
+out:
        /*
         * If we have to abandon now, we expect the engines to be idle
         * and ready to be torn-down. The quickest way we can accomplish
         * this is by declaring ourselves wedged.
         */
-       intel_gt_set_wedged(&i915->gt);
-       goto out_ctx;
+       if (err)
+               intel_gt_set_wedged(&i915->gt);
+
+       for (id = 0; id < ARRAY_SIZE(requests); id++) {
+               struct intel_context *ce;
+               struct i915_request *rq;
+
+               rq = requests[id];
+               if (!rq)
+                       continue;
+
+               ce = rq->hw_context;
+               i915_request_put(rq);
+               intel_context_put(ce);
+       }
+       return err;
 }
 
 static int