OSDN Git Service

drm/i915/gt: Stop holding onto the pinned_default_state
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 4 May 2020 18:07:45 +0000 (19:07 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Tue, 5 May 2020 20:12:33 +0000 (21:12 +0100)
As we only restore the default context state upon banning a context, we
only need enough of the state to run the ring and nothing more. That is
we only need our bare protocontext.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Cc: Andi Shyti <andi.shyti@intel.com>
Reviewed-by: Andi Shyti <andi.shyti@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200504180745.15645-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gt/intel_engine_pm.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/selftest_context.c
drivers/gpu/drm/i915/gt/selftest_lrc.c

index 811debe..d0a1078 100644 (file)
@@ -21,18 +21,11 @@ static int __engine_unpark(struct intel_wakeref *wf)
        struct intel_engine_cs *engine =
                container_of(wf, typeof(*engine), wakeref);
        struct intel_context *ce;
-       void *map;
 
        ENGINE_TRACE(engine, "\n");
 
        intel_gt_pm_get(engine->gt);
 
-       /* Pin the default state for fast resets from atomic context. */
-       map = NULL;
-       if (engine->default_state)
-               map = shmem_pin_map(engine->default_state);
-       engine->pinned_default_state = map;
-
        /* Discard stale context state from across idling */
        ce = engine->kernel_context;
        if (ce) {
@@ -42,6 +35,7 @@ static int __engine_unpark(struct intel_wakeref *wf)
                if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && ce->state) {
                        struct drm_i915_gem_object *obj = ce->state->obj;
                        int type = i915_coherent_map_type(engine->i915);
+                       void *map;
 
                        map = i915_gem_object_pin_map(obj, type);
                        if (!IS_ERR(map)) {
@@ -260,12 +254,6 @@ static int __engine_park(struct intel_wakeref *wf)
        if (engine->park)
                engine->park(engine);
 
-       if (engine->pinned_default_state) {
-               shmem_unpin_map(engine->default_state,
-                               engine->pinned_default_state);
-               engine->pinned_default_state = NULL;
-       }
-
        engine->execlists.no_priolist = false;
 
        /* While gt calls i915_vma_parked(), we have to break the lock cycle */
index b1048f0..c113b78 100644 (file)
@@ -344,7 +344,6 @@ struct intel_engine_cs {
        unsigned long wakeref_serial;
        struct intel_wakeref wakeref;
        struct file *default_state;
-       void *pinned_default_state;
 
        struct {
                struct intel_ring *ring;
index 3ff81c8..dc3f2ee 100644 (file)
@@ -1271,14 +1271,11 @@ execlists_check_context(const struct intel_context *ce,
 static void restore_default_state(struct intel_context *ce,
                                  struct intel_engine_cs *engine)
 {
-       u32 *regs = ce->lrc_reg_state;
+       u32 *regs;
 
-       if (engine->pinned_default_state)
-               memcpy(regs, /* skip restoring the vanilla PPHWSP */
-                      engine->pinned_default_state + LRC_STATE_OFFSET,
-                      engine->context_size - PAGE_SIZE);
+       regs = memset(ce->lrc_reg_state, 0, engine->context_size - PAGE_SIZE);
+       execlists_init_reg_state(regs, ce, engine, ce->ring, true);
 
-       execlists_init_reg_state(regs, ce, engine, ce->ring, false);
        ce->runtime.last = intel_context_get_runtime(ce);
 }
 
@@ -4168,8 +4165,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
         * image back to the expected values to skip over the guilty request.
         */
        __i915_request_reset(rq, stalled);
-       if (!stalled)
-               goto out_replay;
 
        /*
         * We want a simple context + ring to execute the breadcrumb update.
@@ -4179,9 +4174,6 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
         * future request will be after userspace has had the opportunity
         * to recreate its own state.
         */
-       GEM_BUG_ON(!intel_context_is_pinned(ce));
-       restore_default_state(ce, engine);
-
 out_replay:
        ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
                     head, ce->ring->tail);
index b8ed3cb..a56dff3 100644 (file)
@@ -154,10 +154,7 @@ static int live_context_size(void *arg)
         */
 
        for_each_engine(engine, gt, id) {
-               struct {
-                       struct file *state;
-                       void *pinned;
-               } saved;
+               struct file *saved;
 
                if (!engine->context_size)
                        continue;
@@ -171,8 +168,7 @@ static int live_context_size(void *arg)
                 * active state is sufficient, we are only checking that we
                 * don't use more than we planned.
                 */
-               saved.state = fetch_and_zero(&engine->default_state);
-               saved.pinned = fetch_and_zero(&engine->pinned_default_state);
+               saved = fetch_and_zero(&engine->default_state);
 
                /* Overlaps with the execlists redzone */
                engine->context_size += I915_GTT_PAGE_SIZE;
@@ -181,8 +177,7 @@ static int live_context_size(void *arg)
 
                engine->context_size -= I915_GTT_PAGE_SIZE;
 
-               engine->pinned_default_state = saved.pinned;
-               engine->default_state = saved.state;
+               engine->default_state = saved;
 
                intel_engine_pm_put(engine);
 
index 3ced735..824f99c 100644 (file)
@@ -5177,6 +5177,7 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
 {
        struct i915_vma *batch;
        u32 dw, x, *cs, *hw;
+       u32 *defaults;
 
        batch = create_user_vma(ce->vm, SZ_64K);
        if (IS_ERR(batch))
@@ -5188,9 +5189,16 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
                return ERR_CAST(cs);
        }
 
+       defaults = shmem_pin_map(ce->engine->default_state);
+       if (!defaults) {
+               i915_gem_object_unpin_map(batch->obj);
+               i915_vma_put(batch);
+               return ERR_PTR(-ENOMEM);
+       }
+
        x = 0;
        dw = 0;
-       hw = ce->engine->pinned_default_state;
+       hw = defaults;
        hw += LRC_STATE_OFFSET / sizeof(*hw);
        do {
                u32 len = hw[dw] & 0x7f;
@@ -5221,6 +5229,8 @@ store_context(struct intel_context *ce, struct i915_vma *scratch)
 
        *cs++ = MI_BATCH_BUFFER_END;
 
+       shmem_unpin_map(ce->engine->default_state, defaults);
+
        i915_gem_object_flush_map(batch->obj);
        i915_gem_object_unpin_map(batch->obj);
 
@@ -5331,6 +5341,7 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
 {
        struct i915_vma *batch;
        u32 dw, *cs, *hw;
+       u32 *defaults;
 
        batch = create_user_vma(ce->vm, SZ_64K);
        if (IS_ERR(batch))
@@ -5342,8 +5353,15 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
                return ERR_CAST(cs);
        }
 
+       defaults = shmem_pin_map(ce->engine->default_state);
+       if (!defaults) {
+               i915_gem_object_unpin_map(batch->obj);
+               i915_vma_put(batch);
+               return ERR_PTR(-ENOMEM);
+       }
+
        dw = 0;
-       hw = ce->engine->pinned_default_state;
+       hw = defaults;
        hw += LRC_STATE_OFFSET / sizeof(*hw);
        do {
                u32 len = hw[dw] & 0x7f;
@@ -5371,6 +5389,8 @@ static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
 
        *cs++ = MI_BATCH_BUFFER_END;
 
+       shmem_unpin_map(ce->engine->default_state, defaults);
+
        i915_gem_object_flush_map(batch->obj);
        i915_gem_object_unpin_map(batch->obj);
 
@@ -5438,6 +5458,7 @@ static int compare_isolation(struct intel_engine_cs *engine,
 {
        u32 x, dw, *hw, *lrc;
        u32 *A[2], *B[2];
+       u32 *defaults;
        int err = 0;
 
        A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
@@ -5470,9 +5491,15 @@ static int compare_isolation(struct intel_engine_cs *engine,
        }
        lrc += LRC_STATE_OFFSET / sizeof(*hw);
 
+       defaults = shmem_pin_map(ce->engine->default_state);
+       if (!defaults) {
+               err = -ENOMEM;
+               goto err_lrc;
+       }
+
        x = 0;
        dw = 0;
-       hw = engine->pinned_default_state;
+       hw = defaults;
        hw += LRC_STATE_OFFSET / sizeof(*hw);
        do {
                u32 len = hw[dw] & 0x7f;
@@ -5512,6 +5539,8 @@ static int compare_isolation(struct intel_engine_cs *engine,
        } while (dw < PAGE_SIZE / sizeof(u32) &&
                 (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
 
+       shmem_unpin_map(ce->engine->default_state, defaults);
+err_lrc:
        i915_gem_object_unpin_map(ce->state->obj);
 err_B1:
        i915_gem_object_unpin_map(result[1]->obj);
@@ -5661,18 +5690,16 @@ static int live_lrc_isolation(void *arg)
                        continue;
 
                intel_engine_pm_get(engine);
-               if (engine->pinned_default_state) {
-                       for (i = 0; i < ARRAY_SIZE(poison); i++) {
-                               int result;
+               for (i = 0; i < ARRAY_SIZE(poison); i++) {
+                       int result;
 
-                               result = __lrc_isolation(engine, poison[i]);
-                               if (result && !err)
-                                       err = result;
+                       result = __lrc_isolation(engine, poison[i]);
+                       if (result && !err)
+                               err = result;
 
-                               result = __lrc_isolation(engine, ~poison[i]);
-                               if (result && !err)
-                                       err = result;
-                       }
+                       result = __lrc_isolation(engine, ~poison[i]);
+                       if (result && !err)
+                               err = result;
                }
                intel_engine_pm_put(engine);
                if (igt_flush_test(gt->i915)) {