From d858d5695f3897d55df68452066a90d7560cb845 Mon Sep 17 00:00:00 2001 From: Daniele Ceraolo Spurio Date: Thu, 13 Jun 2019 16:21:54 -0700 Subject: [PATCH] drm/i915: update rpm_get/put to use the rpm structure The functions where internally already only using the structure, so we need to just flip the interface. v2: rebase Signed-off-by: Daniele Ceraolo Spurio Cc: Imre Deak Reviewed-by: Chris Wilson Acked-by: Imre Deak Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20190613232156.34940-7-daniele.ceraolospurio@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_mman.c | 11 ++--- drivers/gpu/drm/i915/gem/i915_gem_object.c | 4 +- drivers/gpu/drm/i915/gem/i915_gem_shrinker.c | 4 +- drivers/gpu/drm/i915/gem/selftests/huge_pages.c | 4 +- .../drm/i915/gem/selftests/i915_gem_coherency.c | 4 +- .../gpu/drm/i915/gem/selftests/i915_gem_context.c | 12 +++--- drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c | 4 +- drivers/gpu/drm/i915/gt/intel_engine_cs.c | 8 ++-- drivers/gpu/drm/i915/gt/intel_hangcheck.c | 4 +- drivers/gpu/drm/i915/gt/intel_reset.c | 4 +- drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 12 +++--- drivers/gpu/drm/i915/gt/selftest_lrc.c | 36 ++++++++-------- drivers/gpu/drm/i915/gt/selftest_reset.c | 4 +- drivers/gpu/drm/i915/gt/selftest_workarounds.c | 12 +++--- drivers/gpu/drm/i915/gvt/aperture_gm.c | 15 +++---- drivers/gpu/drm/i915/gvt/gvt.h | 4 +- drivers/gpu/drm/i915/gvt/sched_policy.c | 4 +- drivers/gpu/drm/i915/gvt/scheduler.c | 4 +- drivers/gpu/drm/i915/i915_debugfs.c | 49 +++++++++++----------- drivers/gpu/drm/i915/i915_gem.c | 19 +++++---- drivers/gpu/drm/i915/i915_gem_fence_reg.c | 4 +- drivers/gpu/drm/i915/i915_gem_gtt.c | 6 +-- drivers/gpu/drm/i915/i915_perf.c | 6 +-- drivers/gpu/drm/i915/i915_pmu.c | 12 +++--- drivers/gpu/drm/i915/i915_sysfs.c | 12 +++--- drivers/gpu/drm/i915/intel_display.c | 12 +++--- drivers/gpu/drm/i915/intel_display_power.c | 37 ++++++++-------- drivers/gpu/drm/i915/intel_fbdev.c | 6 +-- drivers/gpu/drm/i915/intel_hotplug.c | 4 +- drivers/gpu/drm/i915/intel_runtime_pm.c | 43 +++++++++---------- drivers/gpu/drm/i915/intel_runtime_pm.h | 26 ++++++------ drivers/gpu/drm/i915/intel_wakeref.c | 8 ++-- drivers/gpu/drm/i915/selftests/i915_active.c | 8 ++-- drivers/gpu/drm/i915/selftests/i915_gem.c | 4 +- drivers/gpu/drm/i915/selftests/i915_gem_evict.c | 4 +- drivers/gpu/drm/i915/selftests/i915_gem_gtt.c | 8 ++-- drivers/gpu/drm/i915/selftests/i915_request.c | 20 ++++----- drivers/gpu/drm/i915/selftests/i915_timeline.c | 16 +++---- drivers/gpu/drm/i915/selftests/i915_vma.c | 4 +- drivers/gpu/drm/i915/selftests/intel_guc.c | 8 ++-- drivers/gpu/drm/i915/selftests/intel_uncore.c | 4 +- 41 files changed, 236 insertions(+), 234 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c index 7b5841b73588..391621ee3cbb 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c @@ -222,6 +222,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) struct drm_i915_gem_object *obj = to_intel_bo(area->vm_private_data); struct drm_device *dev = obj->base.dev; struct drm_i915_private *i915 = to_i915(dev); + struct intel_runtime_pm *rpm = &i915->runtime_pm; struct i915_ggtt *ggtt = &i915->ggtt; bool write = area->vm_flags & VM_WRITE; intel_wakeref_t wakeref; @@ -243,7 +244,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) if (ret) goto err; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(rpm); srcu = i915_reset_trylock(i915); if (srcu < 0) { @@ -308,7 +309,7 @@ vm_fault_t i915_gem_fault(struct vm_fault *vmf) goto err_fence; /* Mark as being mmapped into userspace for later revocation */ - assert_rpm_wakelock_held(&i915->runtime_pm); + assert_rpm_wakelock_held(rpm); if (!i915_vma_set_userfault(vma) && !obj->userfault_count++) list_add(&obj->userfault_link, &i915->ggtt.userfault_list); if (CONFIG_DRM_I915_USERFAULT_AUTOSUSPEND) @@ -327,7 +328,7 @@ err_unlock: err_reset: i915_reset_unlock(i915, srcu); err_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); i915_gem_object_unpin_pages(obj); err: switch (ret) { @@ -410,7 +411,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) * wakeref. */ lockdep_assert_held(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (!obj->userfault_count) goto out; @@ -427,7 +428,7 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj) wmb(); out: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static int create_mmap_offset(struct drm_i915_gem_object *obj) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c index 36b76c6a0a9d..a4047a585c8b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c @@ -181,7 +181,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, struct drm_i915_gem_object *obj, *on; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); llist_for_each_entry_safe(obj, on, freed, freed) { struct i915_vma *vma, *vn; @@ -243,7 +243,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915, cond_resched(); } - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } void i915_gem_flush_free_objects(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index e15f37bef36a..13ff05566a0a 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -182,7 +182,7 @@ i915_gem_shrink(struct drm_i915_private *i915, * we will force the wake during oom-notifier. */ if (shrink & I915_SHRINK_BOUND) { - wakeref = intel_runtime_pm_get_if_in_use(i915); + wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm); if (!wakeref) shrink &= ~I915_SHRINK_BOUND; } @@ -267,7 +267,7 @@ i915_gem_shrink(struct drm_i915_private *i915, } if (shrink & I915_SHRINK_BOUND) - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); i915_retire_requests(i915); diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c index 73e667b31cc4..b74729b6f353 100644 --- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c +++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c @@ -1754,7 +1754,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) return PTR_ERR(file); mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); ctx = live_context(dev_priv, file); if (IS_ERR(ctx)) { @@ -1768,7 +1768,7 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *dev_priv) err = i915_subtests(tests, ctx); out_unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); mock_file_free(dev_priv, file); diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c index c72e17da090c..8f22d3f18422 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c @@ -293,7 +293,7 @@ static int igt_gem_coherency(void *arg) values = offsets + ncachelines; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for (over = igt_coherency_mode; over->name; over++) { if (!over->set) continue; @@ -371,7 +371,7 @@ static int igt_gem_coherency(void *arg) } } unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); kfree(offsets); return err; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c index 9e2878a59023..b85a4a43536a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c @@ -53,7 +53,7 @@ static int live_nop_switch(void *arg) return PTR_ERR(file); mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); ctx = kcalloc(nctx, sizeof(*ctx), GFP_KERNEL); if (!ctx) { @@ -156,7 +156,7 @@ static int live_nop_switch(void *arg) } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); mock_file_free(i915, file); return err; @@ -1084,7 +1084,7 @@ __igt_ctx_sseu(struct drm_i915_private *i915, goto out_unlock; } - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); ce = i915_gem_context_get_engine(ctx, RCS0); if (IS_ERR(ce)) { @@ -1124,7 +1124,7 @@ out_fail: out_context: intel_context_put(ce); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); i915_gem_object_put(obj); out_unlock: @@ -1541,7 +1541,7 @@ static int igt_vm_isolation(void *arg) GEM_BUG_ON(ctx_b->vm->total != vm_total); vm_total -= I915_GTT_PAGE_SIZE; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for_each_engine(engine, i915, id) { @@ -1586,7 +1586,7 @@ static int igt_vm_isolation(void *arg) count, RUNTIME_INFO(i915)->num_engines); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); out_unlock: if (igt_live_test_end(&t)) err = -EIO; diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c index b92809418729..5c81f4b4813a 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c @@ -205,7 +205,7 @@ static int igt_partial_tiling(void *arg) } mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (1) { IGT_TIMEOUT(end); @@ -316,7 +316,7 @@ next_tiling: ; } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_unpin_pages(obj); out: diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index c0d986db5a75..39220e16ea26 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -1103,7 +1103,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine) return true; /* If the whole device is asleep, the engine must be idle */ - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return true; @@ -1117,7 +1117,7 @@ static bool ring_is_idle(struct intel_engine_cs *engine) !(ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE)) idle = false; - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return idle; } @@ -1531,10 +1531,10 @@ void intel_engine_dump(struct intel_engine_cs *engine, rcu_read_unlock(); - wakeref = intel_runtime_pm_get_if_in_use(engine->i915); + wakeref = intel_runtime_pm_get_if_in_use(&engine->i915->runtime_pm); if (wakeref) { intel_engine_print_registers(engine, m); - intel_runtime_pm_put(engine->i915, wakeref); + intel_runtime_pm_put(&engine->i915->runtime_pm, wakeref); } else { drm_printf(m, "\tDevice is asleep; skipping register dump\n"); } diff --git a/drivers/gpu/drm/i915/gt/intel_hangcheck.c b/drivers/gpu/drm/i915/gt/intel_hangcheck.c index 174bb0a60309..6bcfa6456c45 100644 --- a/drivers/gpu/drm/i915/gt/intel_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/intel_hangcheck.c @@ -273,7 +273,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (i915_terminally_wedged(dev_priv)) return; - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return; @@ -324,7 +324,7 @@ static void i915_hangcheck_elapsed(struct work_struct *work) if (hung) hangcheck_declare_hang(dev_priv, hung, stuck); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); /* Reset timer in case GPU hangs without another request being added */ i915_queue_hangcheck(dev_priv); diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index 41a294f5cc19..9e7d400c083a 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -1311,7 +1311,7 @@ void i915_handle_error(struct drm_i915_private *i915, * isn't the case at least when we get here by doing a * simulated reset via debugfs, so get an RPM reference. */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); engine_mask &= INTEL_INFO(i915)->engine_mask; @@ -1374,7 +1374,7 @@ void i915_handle_error(struct drm_i915_private *i915, wake_up_all(&error->reset_queue); out: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } int i915_reset_trylock(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 45379a63e013..b0b2998e56b8 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -394,7 +394,7 @@ static int igt_reset_nop(void *arg) } i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); reset_count = i915_reset_count(&i915->gpu_error); count = 0; do { @@ -441,7 +441,7 @@ static int igt_reset_nop(void *arg) err = igt_flush_test(i915, I915_WAIT_LOCKED); mutex_unlock(&i915->drm.struct_mutex); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); out: mock_file_free(i915, file); @@ -478,7 +478,7 @@ static int igt_reset_nop_engine(void *arg) } i915_gem_context_clear_bannable(ctx); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for_each_engine(engine, i915, id) { unsigned int reset_count, reset_engine_count; unsigned int count; @@ -549,7 +549,7 @@ static int igt_reset_nop_engine(void *arg) err = igt_flush_test(i915, I915_WAIT_LOCKED); mutex_unlock(&i915->drm.struct_mutex); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); out: mock_file_free(i915, file); if (i915_reset_failed(i915)) @@ -1749,7 +1749,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) if (i915_terminally_wedged(i915)) return -EIO; /* we're long past hope of a successful reset */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); saved_hangcheck = fetch_and_zero(&i915_modparams.enable_hangcheck); drain_delayed_work(&i915->gpu_error.hangcheck_work); /* flush param */ @@ -1760,7 +1760,7 @@ int intel_hangcheck_live_selftests(struct drm_i915_private *i915) mutex_unlock(&i915->drm.struct_mutex); i915_modparams.enable_hangcheck = saved_hangcheck; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err; } diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c index f0ca2a09dabd..d84d31e3da19 100644 --- a/drivers/gpu/drm/i915/gt/selftest_lrc.c +++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c @@ -33,7 +33,7 @@ static int live_sanitycheck(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin, i915)) goto err_unlock; @@ -74,7 +74,7 @@ err_spin: igt_spinner_fini(&spin); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -97,7 +97,7 @@ static int live_busywait_preempt(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); ctx_hi = kernel_context(i915); if (!ctx_hi) @@ -255,7 +255,7 @@ err_ctx_hi: err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -277,7 +277,7 @@ static int live_preempt(void *arg) pr_err("Logical preemption supported, but not exposed\n"); mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -362,7 +362,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -382,7 +382,7 @@ static int live_late_preempt(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -466,7 +466,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -532,7 +532,7 @@ static int live_suppress_self_preempt(void *arg) return 0; /* presume black blox */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (preempt_client_init(i915, &a)) goto err_unlock; @@ -606,7 +606,7 @@ err_client_a: err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -683,7 +683,7 @@ static int live_suppress_wait_preempt(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (preempt_client_init(i915, &client[0])) /* ELSP[0] */ goto err_unlock; @@ -776,7 +776,7 @@ err_client_0: err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -807,7 +807,7 @@ static int live_chain_preempt(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (preempt_client_init(i915, &hi)) goto err_unlock; @@ -924,7 +924,7 @@ err_client_hi: err_unlock: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -953,7 +953,7 @@ static int live_preempt_hang(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); if (igt_spinner_init(&spin_hi, i915)) goto err_unlock; @@ -1050,7 +1050,7 @@ err_spin_hi: igt_spinner_fini(&spin_hi); err_unlock: igt_flush_test(i915, I915_WAIT_LOCKED); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1256,7 +1256,7 @@ static int live_preempt_smoke(void *arg) return -ENOMEM; mutex_lock(&smoke.i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(smoke.i915); + wakeref = intel_runtime_pm_get(&smoke.i915->runtime_pm); smoke.batch = i915_gem_object_create_internal(smoke.i915, PAGE_SIZE); if (IS_ERR(smoke.batch)) { @@ -1309,7 +1309,7 @@ err_ctx: err_batch: i915_gem_object_put(smoke.batch); err_unlock: - intel_runtime_pm_put(smoke.i915, wakeref); + intel_runtime_pm_put(&smoke.i915->runtime_pm, wakeref); mutex_unlock(&smoke.i915->drm.struct_mutex); kfree(smoke.contexts); diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c index 607473439eb0..91f0349882fc 100644 --- a/drivers/gpu/drm/i915/gt/selftest_reset.c +++ b/drivers/gpu/drm/i915/gt/selftest_reset.c @@ -42,14 +42,14 @@ static int igt_wedged_reset(void *arg) /* Check that we can recover a wedged device with a GPU reset */ igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); i915_gem_set_wedged(i915); GEM_BUG_ON(!i915_reset_failed(i915)); i915_reset(i915, ALL_ENGINES, NULL); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); igt_global_reset_unlock(i915); return i915_reset_failed(i915) ? -EIO : 0; diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c index 93e9579b8a4f..8891e1a8ac16 100644 --- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c +++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c @@ -636,7 +636,7 @@ static int live_dirty_whitelist(void *arg) if (INTEL_GEN(i915) < 7) /* minimum requirement for LRI, SRM, LRM */ return 0; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); mutex_unlock(&i915->drm.struct_mutex); file = mock_file(i915); @@ -666,7 +666,7 @@ out_file: mock_file_free(i915, file); mutex_lock(&i915->drm.struct_mutex); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err; } @@ -1055,7 +1055,7 @@ live_gpu_reset_workarounds(void *arg) pr_info("Verifying after GPU reset...\n"); igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); reference_lists_init(i915, &lists); @@ -1070,7 +1070,7 @@ live_gpu_reset_workarounds(void *arg) out: kernel_context_close(ctx); reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); igt_global_reset_unlock(i915); return ok ? 0 : -ESRCH; @@ -1097,7 +1097,7 @@ live_engine_reset_workarounds(void *arg) return PTR_ERR(ctx); igt_global_reset_lock(i915); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); reference_lists_init(i915, &lists); @@ -1154,7 +1154,7 @@ live_engine_reset_workarounds(void *arg) err: reference_lists_fini(i915, &lists); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); igt_global_reset_unlock(i915); kernel_context_close(ctx); diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c index 716622266fa6..c3d19d88da40 100644 --- a/drivers/gpu/drm/i915/gvt/aperture_gm.c +++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c @@ -170,7 +170,7 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) if (WARN_ON(!vgpu_fence_sz(vgpu))) return; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&dev_priv->drm.struct_mutex); _clear_vgpu_fence(vgpu); @@ -181,17 +181,18 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu) } mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } static int alloc_vgpu_fence(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; struct i915_fence_reg *reg; int i; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(rpm); /* Request fences from host */ mutex_lock(&dev_priv->drm.struct_mutex); @@ -207,7 +208,7 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu) _clear_vgpu_fence(vgpu); mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(rpm); return 0; out_free_fence: gvt_vgpu_err("Failed to alloc fences\n"); @@ -220,7 +221,7 @@ out_free_fence: vgpu->fence.regs[i] = NULL; } mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(rpm); return -ENOSPC; } @@ -316,9 +317,9 @@ void intel_vgpu_reset_resource(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); _clear_vgpu_fence(vgpu); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } /** diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index dfd10cf82b65..7a1fe44d45af 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -584,12 +584,12 @@ enum { static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv) { - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); } static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv) { - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } /** diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 1c763a27a412..2369d4a9af94 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -465,7 +465,7 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) scheduler->current_vgpu = NULL; } - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_bh(&scheduler->mmio_context_lock); for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) { if (scheduler->engine_owner[ring_id] == vgpu) { @@ -474,6 +474,6 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu) } } spin_unlock_bh(&scheduler->mmio_context_lock); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); mutex_unlock(&vgpu->gvt->sched_lock); } diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 080cb740e028..8f0bd1e195d1 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -1501,11 +1501,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, * as there is only one pre-allocated buf-obj for shadow. */ if (list_empty(workload_q_head(vgpu, ring_id))) { - intel_runtime_pm_get(dev_priv); + intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&dev_priv->drm.struct_mutex); ret = intel_gvt_scan_and_shadow_workload(workload); mutex_unlock(&dev_priv->drm.struct_mutex); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } if (ret) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 1fc5af4c1905..5ca41165fb78 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -490,7 +490,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) intel_wakeref_t wakeref; int i, pipe; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); if (IS_CHERRYVIEW(dev_priv)) { intel_wakeref_t pref; @@ -696,7 +696,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data) } } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -833,7 +833,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) intel_wakeref_t wakeref; int ret = 0; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); if (IS_GEN(dev_priv, 5)) { u16 rgvswctl = intel_uncore_read16(uncore, MEMSWCTL); @@ -1045,7 +1045,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Max CD clock frequency: %d kHz\n", dev_priv->max_cdclk_freq); seq_printf(m, "Max pixel clock frequency: %d kHz\n", dev_priv->max_dotclk_freq); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret; } @@ -1391,7 +1391,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) if (!HAS_FBC(dev_priv)) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&fbc->lock); if (intel_fbc_is_active(dev_priv)) @@ -1418,7 +1418,7 @@ static int i915_fbc_status(struct seq_file *m, void *unused) } mutex_unlock(&fbc->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -1468,7 +1468,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) if (!HAS_IPS(dev_priv)) return -ENODEV; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "Enabled by kernel parameter: %s\n", yesno(i915_modparams.enable_ips)); @@ -1482,7 +1482,7 @@ static int i915_ips_status(struct seq_file *m, void *unused) seq_puts(m, "Currently: disabled\n"); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -1561,7 +1561,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) seq_puts(m, "GPU freq (MHz)\tEffective CPU freq (MHz)\tEffective Ring freq (MHz)\n"); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { ia_freq = gpu_freq; sandybridge_pcode_read(dev_priv, @@ -1575,7 +1575,7 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused) ((ia_freq >> 0) & 0xff) * 100, ((ia_freq >> 8) & 0xff) * 100); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -1752,7 +1752,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) struct intel_uncore *uncore = &dev_priv->uncore; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "bit6 swizzle for X-tiling = %s\n", swizzle_string(dev_priv->mm.bit_6_swizzle_x)); @@ -1790,7 +1790,7 @@ static int i915_swizzle_info(struct seq_file *m, void *data) if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) seq_puts(m, "L-shaped memory detected\n"); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -2303,7 +2303,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) if (!psr->sink_support) return 0; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&psr->lock); if (psr->enabled) @@ -2367,7 +2367,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) unlock: mutex_unlock(&psr->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -2384,11 +2384,11 @@ i915_edp_psr_debug_set(void *data, u64 val) DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); ret = intel_psr_debug_set(dev_priv, val); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret; } @@ -2504,7 +2504,7 @@ static int i915_dmc_info(struct seq_file *m, void *unused) csr = &dev_priv->csr; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL)); seq_printf(m, "path: %s\n", csr->fw_path); @@ -2530,7 +2530,7 @@ out: seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE)); seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL)); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -2814,7 +2814,7 @@ static int i915_display_info(struct seq_file *m, void *unused) struct drm_connector_list_iter conn_iter; intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "CRTC info\n"); seq_printf(m, "---------\n"); @@ -2863,7 +2863,7 @@ static int i915_display_info(struct seq_file *m, void *unused) drm_connector_list_iter_end(&conn_iter); mutex_unlock(&dev->mode_config.mutex); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -2876,7 +2876,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) enum intel_engine_id id; struct drm_printer p; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); seq_printf(m, "GT awake? %s [%d]\n", yesno(dev_priv->gt.awake), @@ -2888,7 +2888,7 @@ static int i915_engine_info(struct seq_file *m, void *unused) for_each_engine(engine, dev_priv, id) intel_engine_dump(engine, &p, "%s\n", engine->name); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return 0; } @@ -4051,7 +4051,8 @@ static int i915_forcewake_open(struct inode *inode, struct file *file) if (INTEL_GEN(i915) < 6) return 0; - file->private_data = (void *)(uintptr_t)intel_runtime_pm_get(i915); + file->private_data = + (void *)(uintptr_t)intel_runtime_pm_get(&i915->runtime_pm); intel_uncore_forcewake_user_get(&i915->uncore); return 0; @@ -4065,7 +4066,7 @@ static int i915_forcewake_release(struct inode *inode, struct file *file) return 0; intel_uncore_forcewake_user_put(&i915->uncore); - intel_runtime_pm_put(i915, + intel_runtime_pm_put(&i915->runtime_pm, (intel_wakeref_t)(uintptr_t)file->private_data); return 0; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7232361973fd..9804e194fa7d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -374,7 +374,7 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj, if (ret) return ret; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, PIN_MAPPABLE | PIN_NONFAULT | @@ -461,7 +461,7 @@ out_unpin: i915_vma_unpin(vma); } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return ret; @@ -561,6 +561,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, { struct drm_i915_private *i915 = to_i915(obj->base.dev); struct i915_ggtt *ggtt = &i915->ggtt; + struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t wakeref; struct drm_mm_node node; struct dma_fence *fence; @@ -581,14 +582,14 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj, * This easily dwarfs any performance advantage from * using the cache bypass of indirect GGTT access. */ - wakeref = intel_runtime_pm_get_if_in_use(i915); + wakeref = intel_runtime_pm_get_if_in_use(rpm); if (!wakeref) { ret = -EFAULT; goto out_unlock; } } else { /* No backing pages, no fallback, we must force GGTT access */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(rpm); } vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, @@ -684,7 +685,7 @@ out_unpin: i915_vma_unpin(vma); } out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); out_unlock: mutex_unlock(&i915->drm.struct_mutex); return ret; @@ -1174,7 +1175,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) GEM_TRACE("\n"); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL); /* @@ -1197,7 +1198,7 @@ void i915_gem_sanitize(struct drm_i915_private *i915) intel_gt_sanitize(i915, false); intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_lock(&i915->drm.struct_mutex); i915_gem_contexts_lost(i915); @@ -1815,7 +1816,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) * the objects as well, see i915_gem_freeze() */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); i915_gem_shrink(i915, -1UL, NULL, ~0); i915_gem_drain_freed_objects(i915); @@ -1826,7 +1827,7 @@ int i915_gem_freeze_late(struct drm_i915_private *i915) i915_gem_object_unlock(obj); } - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c index fc4cf908afc3..3b35bb114b14 100644 --- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c +++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c @@ -270,7 +270,7 @@ static int fence_update(struct i915_fence_reg *fence, * be cleared before we can use any other fences to ensure that * the new fences do not overlap the elided clears, confusing HW. */ - wakeref = intel_runtime_pm_get_if_in_use(fence->i915); + wakeref = intel_runtime_pm_get_if_in_use(&fence->i915->runtime_pm); if (!wakeref) { GEM_BUG_ON(vma); return 0; @@ -284,7 +284,7 @@ static int fence_update(struct i915_fence_reg *fence, list_move_tail(&fence->link, &fence->i915->ggtt.fence_list); } - intel_runtime_pm_put(fence->i915, wakeref); + intel_runtime_pm_put(&fence->i915->runtime_pm, wakeref); return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7be72388b052..057ea8303bce 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1825,7 +1825,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, unsigned int pde; bool flush = false; - wakeref = intel_runtime_pm_get(vm->i915); + wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm); spin_lock(&ppgtt->base.pd.lock); gen6_for_each_pde(pt, &ppgtt->base.pd, start, length, pde) { @@ -1868,12 +1868,12 @@ static int gen6_alloc_va_range(struct i915_address_space *vm, gen6_ggtt_invalidate(vm->i915); } - intel_runtime_pm_put(vm->i915, wakeref); + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); return 0; unwind_out: - intel_runtime_pm_put(vm->i915, wakeref); + intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref); gen6_ppgtt_clear_range(vm, from, start - from); return -ENOMEM; } diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index d92ddfada262..3d8162d28730 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -1375,7 +1375,7 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream) free_oa_buffer(dev_priv); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv, stream->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref); if (stream->ctx) oa_put_render_ctx_id(stream); @@ -2112,7 +2112,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, * In our case we are expecting that taking pm + FORCEWAKE * references will effectively disable RC6. */ - stream->wakeref = intel_runtime_pm_get(dev_priv); + stream->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL); ret = alloc_oa_buffer(dev_priv); @@ -2148,7 +2148,7 @@ err_oa_buf_alloc: put_oa_config(dev_priv, stream->oa_config); intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv, stream->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, stream->wakeref); err_config: if (stream->ctx) diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index eb9c0e0e545c..507fcdb0f97e 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -171,7 +171,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) wakeref = 0; if (READ_ONCE(dev_priv->gt.awake)) - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return; @@ -207,7 +207,7 @@ engines_sample(struct drm_i915_private *dev_priv, unsigned int period_ns) } spin_unlock_irqrestore(&dev_priv->uncore.lock, flags); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } static void @@ -443,14 +443,15 @@ static u64 __get_rc6(struct drm_i915_private *i915) static u64 get_rc6(struct drm_i915_private *i915) { #if IS_ENABLED(CONFIG_PM) + struct intel_runtime_pm *rpm = &i915->runtime_pm; intel_wakeref_t wakeref; unsigned long flags; u64 val; - wakeref = intel_runtime_pm_get_if_in_use(i915); + wakeref = intel_runtime_pm_get_if_in_use(rpm); if (wakeref) { val = __get_rc6(i915); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); /* * If we are coming back from being runtime suspended we must @@ -469,8 +470,7 @@ static u64 get_rc6(struct drm_i915_private *i915) spin_unlock_irqrestore(&i915->pmu.lock, flags); } else { - struct pci_dev *pdev = i915->drm.pdev; - struct device *kdev = &pdev->dev; + struct device *kdev = rpm->kdev; /* * We are runtime suspended. diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 3ef07b987d40..75acbf686ec9 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -264,7 +264,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, intel_wakeref_t wakeref; u32 freq; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { vlv_punit_get(dev_priv); @@ -276,7 +276,7 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, freq = intel_get_cagf(dev_priv, I915_READ(GEN6_RPSTAT1)); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return snprintf(buf, PAGE_SIZE, "%d\n", intel_gpu_freq(dev_priv, freq)); } @@ -364,7 +364,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, if (ret) return ret; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&rps->lock); val = intel_freq_opcode(dev_priv, val); @@ -392,7 +392,7 @@ static ssize_t gt_max_freq_mhz_store(struct device *kdev, unlock: mutex_unlock(&rps->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret ?: count; } @@ -420,7 +420,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, if (ret) return ret; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&rps->lock); val = intel_freq_opcode(dev_priv, val); @@ -444,7 +444,7 @@ static ssize_t gt_min_freq_mhz_store(struct device *kdev, unlock: mutex_unlock(&rps->lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return ret ?: count; } diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e681ed99cdf2..0bf1e09acf22 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -2112,7 +2112,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, * intel_runtime_pm_put(), so it is correct to wrap only the * pin/unpin/fence and not more. */ - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); i915_gem_object_lock(obj); atomic_inc(&dev_priv->gpu_error.pending_fb_pin); @@ -2169,7 +2169,7 @@ err: atomic_dec(&dev_priv->gpu_error.pending_fb_pin); i915_gem_object_unlock(obj); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); return vma; } @@ -13927,7 +13927,7 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state) intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore); intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref); } - intel_runtime_pm_put(dev_priv, intel_state->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); /* * Defer the cleanup of the old state to a separate worker to not @@ -14006,7 +14006,7 @@ static int intel_atomic_commit(struct drm_device *dev, struct drm_i915_private *dev_priv = to_i915(dev); int ret = 0; - intel_state->wakeref = intel_runtime_pm_get(dev_priv); + intel_state->wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); drm_atomic_state_get(state); i915_sw_fence_init(&intel_state->commit_ready, @@ -14044,7 +14044,7 @@ static int intel_atomic_commit(struct drm_device *dev, if (ret) { DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret); i915_sw_fence_commit(&intel_state->commit_ready); - intel_runtime_pm_put(dev_priv, intel_state->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); return ret; } @@ -14056,7 +14056,7 @@ static int intel_atomic_commit(struct drm_device *dev, i915_sw_fence_commit(&intel_state->commit_ready); drm_atomic_helper_cleanup_planes(dev, state); - intel_runtime_pm_put(dev_priv, intel_state->wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, intel_state->wakeref); return ret; } dev_priv->wm.distrust_bios_wm = false; diff --git a/drivers/gpu/drm/i915/intel_display_power.c b/drivers/gpu/drm/i915/intel_display_power.c index 3e52453189ef..c672c8080a93 100644 --- a/drivers/gpu/drm/i915/intel_display_power.c +++ b/drivers/gpu/drm/i915/intel_display_power.c @@ -1644,7 +1644,7 @@ intel_display_power_grab_async_put_ref(struct drm_i915_private *dev_priv, goto out_verify; cancel_delayed_work(&power_domains->async_put_work); - intel_runtime_pm_put_raw(dev_priv, + intel_runtime_pm_put_raw(&dev_priv->runtime_pm, fetch_and_zero(&power_domains->async_put_wakeref)); out_verify: verify_async_put_domains_state(power_domains); @@ -1684,7 +1684,7 @@ intel_wakeref_t intel_display_power_get(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { struct i915_power_domains *power_domains = &dev_priv->power_domains; - intel_wakeref_t wakeref = intel_runtime_pm_get(dev_priv); + intel_wakeref_t wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); mutex_lock(&power_domains->lock); __intel_display_power_get_domain(dev_priv, domain); @@ -1713,7 +1713,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref; bool is_enabled; - wakeref = intel_runtime_pm_get_if_in_use(dev_priv); + wakeref = intel_runtime_pm_get_if_in_use(&dev_priv->runtime_pm); if (!wakeref) return false; @@ -1729,7 +1729,7 @@ intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, mutex_unlock(&power_domains->lock); if (!is_enabled) { - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); wakeref = 0; } @@ -1786,7 +1786,7 @@ void intel_display_power_put_unchecked(struct drm_i915_private *dev_priv, enum intel_display_power_domain domain) { __intel_display_power_put(dev_priv, domain); - intel_runtime_pm_put_unchecked(dev_priv); + intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm); } static void @@ -1806,6 +1806,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) struct drm_i915_private *dev_priv = container_of(power_domains, struct drm_i915_private, power_domains); + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; enum intel_display_power_domain domain; intel_wakeref_t wakeref; @@ -1814,8 +1815,8 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) * wakeref to make the state checker happy about the HW access during * power well disabling. */ - assert_rpm_raw_wakeref_held(&dev_priv->runtime_pm); - wakeref = intel_runtime_pm_get(dev_priv); + assert_rpm_raw_wakeref_held(rpm); + wakeref = intel_runtime_pm_get(rpm); for_each_power_domain(domain, mask) { /* Clear before put, so put's sanity check is happy. */ @@ -1823,7 +1824,7 @@ release_async_put_domains(struct i915_power_domains *power_domains, u64 mask) __intel_display_power_put_domain(dev_priv, domain); } - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(rpm, wakeref); } static void @@ -1833,7 +1834,8 @@ intel_display_power_put_async_work(struct work_struct *work) container_of(work, struct drm_i915_private, power_domains.async_put_work.work); struct i915_power_domains *power_domains = &dev_priv->power_domains; - intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(dev_priv); + struct intel_runtime_pm *rpm = &dev_priv->runtime_pm; + intel_wakeref_t new_work_wakeref = intel_runtime_pm_get_raw(rpm); intel_wakeref_t old_work_wakeref = 0; mutex_lock(&power_domains->lock); @@ -1863,9 +1865,9 @@ out_verify: mutex_unlock(&power_domains->lock); if (old_work_wakeref) - intel_runtime_pm_put_raw(dev_priv, old_work_wakeref); + intel_runtime_pm_put_raw(rpm, old_work_wakeref); if (new_work_wakeref) - intel_runtime_pm_put_raw(dev_priv, new_work_wakeref); + intel_runtime_pm_put_raw(rpm, new_work_wakeref); } /** @@ -1883,7 +1885,8 @@ void __intel_display_power_put_async(struct drm_i915_private *i915, intel_wakeref_t wakeref) { struct i915_power_domains *power_domains = &i915->power_domains; - intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(i915); + struct intel_runtime_pm *rpm = &i915->runtime_pm; + intel_wakeref_t work_wakeref = intel_runtime_pm_get_raw(rpm); mutex_lock(&power_domains->lock); @@ -1910,9 +1913,9 @@ out_verify: mutex_unlock(&power_domains->lock); if (work_wakeref) - intel_runtime_pm_put_raw(i915, work_wakeref); + intel_runtime_pm_put_raw(rpm, work_wakeref); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(rpm, wakeref); } /** @@ -1948,7 +1951,7 @@ out_verify: mutex_unlock(&power_domains->lock); if (work_wakeref) - intel_runtime_pm_put_raw(i915, work_wakeref); + intel_runtime_pm_put_raw(&i915->runtime_pm, work_wakeref); } /** @@ -1987,7 +1990,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, intel_wakeref_t wakeref) { __intel_display_power_put(dev_priv, domain); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } #endif @@ -4402,7 +4405,7 @@ void intel_power_domains_fini_hw(struct drm_i915_private *i915) intel_power_domains_verify_state(i915); /* Keep the power well enabled, but cancel its rpm wakeref. */ - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } /** diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 0d3a6fa674e6..1edd44ee32b2 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -213,7 +213,7 @@ static int intelfb_create(struct drm_fb_helper *helper, } mutex_lock(&dev->struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); /* Pin the GGTT vma for our access via info->screen_base. * This also validates that any existing fb inherited from the @@ -272,7 +272,7 @@ static int intelfb_create(struct drm_fb_helper *helper, ifbdev->vma = vma; ifbdev->vma_flags = flags; - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev->struct_mutex); vga_switcheroo_client_fb_set(pdev, info); return 0; @@ -280,7 +280,7 @@ static int intelfb_create(struct drm_fb_helper *helper, out_unpin: intel_unpin_fb_vma(vma, flags); out_unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev->struct_mutex); return ret; } diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index ff9eb3c855d3..ea3de4acc850 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c @@ -230,7 +230,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) intel_wakeref_t wakeref; enum hpd_pin pin; - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); spin_lock_irq(&dev_priv->irq_lock); for_each_hpd_pin(pin) { @@ -263,7 +263,7 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work) dev_priv->display.hpd_irq_setup(dev_priv); spin_unlock_irq(&dev_priv->irq_lock); - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); } bool intel_encoder_hotplug(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 3d9ea3498679..502c54428570 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -369,7 +369,7 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, /** * intel_runtime_pm_get_raw - grab a raw runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This is the unlocked version of intel_display_power_is_enabled() and should * only be used from error capture and recovery code where deadlocks are @@ -384,15 +384,14 @@ static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm, * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates * as True if the wakeref was acquired, or False otherwise. */ - -intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm) { - return __intel_runtime_pm_get(&i915->runtime_pm, false); + return __intel_runtime_pm_get(rpm, false); } /** * intel_runtime_pm_get - grab a runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function grabs a device-level runtime pm reference (mostly used for GEM * code to ensure the GTT or GT is on) and ensures that it is powered up. @@ -402,14 +401,14 @@ intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915) * * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm) { - return __intel_runtime_pm_get(&i915->runtime_pm, true); + return __intel_runtime_pm_get(rpm, true); } /** * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function grabs a device-level runtime pm reference if the device is * already in use and ensures that it is powered up. It is illegal to try @@ -421,10 +420,8 @@ intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915) * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates * as True if the wakeref was acquired, or False otherwise. */ -intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm *rpm = &i915->runtime_pm; - if (IS_ENABLED(CONFIG_PM)) { /* * In cases runtime PM is disabled by the RPM core and we get @@ -443,7 +440,7 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) /** * intel_runtime_pm_get_noresume - grab a runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function grabs a device-level runtime pm reference (mostly used for GEM * code to ensure the GTT or GT is on). @@ -460,10 +457,8 @@ intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915) * * Returns: the wakeref cookie to pass to intel_runtime_pm_put() */ -intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915) +intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm) { - struct intel_runtime_pm *rpm = &i915->runtime_pm; - assert_rpm_wakelock_held(rpm); pm_runtime_get_noresume(rpm->kdev); @@ -488,7 +483,7 @@ static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm, /** * intel_runtime_pm_put_raw - release a raw runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * @wref: wakeref acquired for the reference that is being released * * This function drops the device-level runtime pm reference obtained by @@ -496,14 +491,14 @@ static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm, * hardware block right away if this is the last reference. */ void -intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref) +intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref) { - __intel_runtime_pm_put(&i915->runtime_pm, wref, false); + __intel_runtime_pm_put(rpm, wref, false); } /** * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * * This function drops the device-level runtime pm reference obtained by * intel_runtime_pm_get() and might power down the corresponding @@ -513,24 +508,24 @@ intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref) * new code, as the correctness of its use cannot be checked. Always use * intel_runtime_pm_put() instead. */ -void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915) +void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm) { - __intel_runtime_pm_put(&i915->runtime_pm, -1, true); + __intel_runtime_pm_put(rpm, -1, true); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) /** * intel_runtime_pm_put - release a runtime pm reference - * @i915: i915 device instance + * @rpm: the intel_runtime_pm structure * @wref: wakeref acquired for the reference that is being released * * This function drops the device-level runtime pm reference obtained by * intel_runtime_pm_get() and might power down the corresponding * hardware block right away if this is the last reference. */ -void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) +void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) { - __intel_runtime_pm_put(&i915->runtime_pm, wref, true); + __intel_runtime_pm_put(rpm, wref, true); } #endif diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h index 0890e698f196..f6445ca5bbf1 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.h +++ b/drivers/gpu/drm/i915/intel_runtime_pm.h @@ -174,30 +174,30 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm); void intel_runtime_pm_disable(struct intel_runtime_pm *rpm); void intel_runtime_pm_cleanup(struct intel_runtime_pm *rpm); -intel_wakeref_t intel_runtime_pm_get(struct drm_i915_private *i915); -intel_wakeref_t intel_runtime_pm_get_if_in_use(struct drm_i915_private *i915); -intel_wakeref_t intel_runtime_pm_get_noresume(struct drm_i915_private *i915); -intel_wakeref_t intel_runtime_pm_get_raw(struct drm_i915_private *i915); +intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm); +intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm); +intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm); +intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm); #define with_intel_runtime_pm(i915, wf) \ - for ((wf) = intel_runtime_pm_get(i915); (wf); \ - intel_runtime_pm_put((i915), (wf)), (wf) = 0) + for ((wf) = intel_runtime_pm_get(&(i915)->runtime_pm); (wf); \ + intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0) #define with_intel_runtime_pm_if_in_use(i915, wf) \ - for ((wf) = intel_runtime_pm_get_if_in_use(i915); (wf); \ - intel_runtime_pm_put((i915), (wf)), (wf) = 0) + for ((wf) = intel_runtime_pm_get_if_in_use(&(i915)->runtime_pm); (wf); \ + intel_runtime_pm_put(&(i915)->runtime_pm, (wf)), (wf) = 0) -void intel_runtime_pm_put_unchecked(struct drm_i915_private *i915); +void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) -void intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref); +void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref); #else static inline void -intel_runtime_pm_put(struct drm_i915_private *i915, intel_wakeref_t wref) +intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref) { - intel_runtime_pm_put_unchecked(i915); + intel_runtime_pm_put_unchecked(rpm); } #endif -void intel_runtime_pm_put_raw(struct drm_i915_private *i915, intel_wakeref_t wref); +void intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref); #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm, diff --git a/drivers/gpu/drm/i915/intel_wakeref.c b/drivers/gpu/drm/i915/intel_wakeref.c index b6c7167ce154..b677ae893d6f 100644 --- a/drivers/gpu/drm/i915/intel_wakeref.c +++ b/drivers/gpu/drm/i915/intel_wakeref.c @@ -9,14 +9,14 @@ static void rpm_get(struct drm_i915_private *i915, struct intel_wakeref *wf) { - wf->wakeref = intel_runtime_pm_get(i915); + wf->wakeref = intel_runtime_pm_get(&i915->runtime_pm); } static void rpm_put(struct drm_i915_private *i915, struct intel_wakeref *wf) { intel_wakeref_t wakeref = fetch_and_zero(&wf->wakeref); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); GEM_BUG_ON(!wakeref); } @@ -86,7 +86,7 @@ static void wakeref_auto_timeout(struct timer_list *t) wakeref = fetch_and_zero(&wf->wakeref); spin_unlock_irqrestore(&wf->lock, flags); - intel_runtime_pm_put(wf->i915, wakeref); + intel_runtime_pm_put(&wf->i915->runtime_pm, wakeref); } void intel_wakeref_auto_init(struct intel_wakeref_auto *wf, @@ -116,7 +116,7 @@ void intel_wakeref_auto(struct intel_wakeref_auto *wf, unsigned long timeout) spin_lock_irqsave(&wf->lock, flags); if (!refcount_inc_not_zero(&wf->count)) { GEM_BUG_ON(wf->wakeref); - wf->wakeref = intel_runtime_pm_get_if_in_use(wf->i915); + wf->wakeref = intel_runtime_pm_get_if_in_use(&wf->i915->runtime_pm); refcount_set(&wf->count, 1); } spin_unlock_irqrestore(&wf->lock, flags); diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c index cc1ca4be1a00..c0b3537a5fa6 100644 --- a/drivers/gpu/drm/i915/selftests/i915_active.c +++ b/drivers/gpu/drm/i915/selftests/i915_active.c @@ -97,7 +97,7 @@ static int live_active_wait(void *arg) /* Check that we get a callback when requests retire upon waiting */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = __live_active_setup(i915, &active); @@ -111,7 +111,7 @@ static int live_active_wait(void *arg) if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -126,7 +126,7 @@ static int live_active_retire(void *arg) /* Check that we get a callback when requests are indirectly retired */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = __live_active_setup(i915, &active); @@ -140,7 +140,7 @@ static int live_active_retire(void *arg) } i915_active_fini(&active.base); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c index 83643929416c..23a54da47ca5 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem.c @@ -63,7 +63,7 @@ static void simulate_hibernate(struct drm_i915_private *i915) { intel_wakeref_t wakeref; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); /* * As a final sting in the tail, invalidate stolen. Under a real S4, @@ -74,7 +74,7 @@ static void simulate_hibernate(struct drm_i915_private *i915) */ trash_stolen(i915); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static int pm_prepare(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c index 71c1363ad536..e5f67474dcb4 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_evict.c @@ -404,7 +404,7 @@ static int igt_evict_contexts(void *arg) return 0; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); /* Reserve a block so that we know we have enough to fit a few rq */ memset(&hole, 0, sizeof(hole)); @@ -515,7 +515,7 @@ out_locked: } if (drm_mm_node_allocated(&hole)) drm_mm_remove_node(&hole); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index 2093d08a7569..1a60b9fe8221 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -295,9 +295,9 @@ static int lowlevel_hole(struct drm_i915_private *i915, mock_vma.node.size = BIT_ULL(size); mock_vma.node.start = addr; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); vm->insert_entries(vm, &mock_vma, I915_CACHE_NONE, 0); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); } count = n; @@ -1171,7 +1171,7 @@ static int igt_ggtt_page(void *arg) if (err) goto out_unpin; - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for (n = 0; n < count; n++) { u64 offset = tmp.start + n * PAGE_SIZE; @@ -1218,7 +1218,7 @@ static int igt_ggtt_page(void *arg) kfree(order); out_remove: ggtt->vm.clear_range(&ggtt->vm, tmp.start, tmp.size); - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); drm_mm_remove_node(&tmp); out_unpin: i915_gem_object_unpin_pages(obj); diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index 11278bac3a24..dd4e72eafc6c 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -537,7 +537,7 @@ static int live_nop_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for_each_engine(engine, i915, id) { struct i915_request *request = NULL; @@ -597,7 +597,7 @@ static int live_nop_request(void *arg) } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -682,7 +682,7 @@ static int live_empty_request(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); batch = empty_batch(i915); if (IS_ERR(batch)) { @@ -746,7 +746,7 @@ out_batch: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -839,7 +839,7 @@ static int live_all_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = igt_live_test_begin(&t, i915, __func__, ""); if (err) @@ -919,7 +919,7 @@ out_request: i915_vma_unpin(batch); i915_vma_put(batch); out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -942,7 +942,7 @@ static int live_sequential_engines(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); err = igt_live_test_begin(&t, i915, __func__, ""); if (err) @@ -1048,7 +1048,7 @@ out_request: i915_request_put(request[id]); } out_unlock: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; } @@ -1113,7 +1113,7 @@ static int live_breadcrumbs_smoketest(void *arg) * On real hardware this time. */ - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); file = mock_file(i915); if (IS_ERR(file)) { @@ -1220,7 +1220,7 @@ out_threads: out_file: mock_file_free(i915, file); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return ret; } diff --git a/drivers/gpu/drm/i915/selftests/i915_timeline.c b/drivers/gpu/drm/i915/selftests/i915_timeline.c index acb2cc5136b7..724bf3650b3e 100644 --- a/drivers/gpu/drm/i915/selftests/i915_timeline.c +++ b/drivers/gpu/drm/i915/selftests/i915_timeline.c @@ -515,7 +515,7 @@ static int live_hwsp_engine(void *arg) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for_each_engine(engine, i915, id) { @@ -558,7 +558,7 @@ out: i915_timeline_put(tl); } - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); kvfree(timelines); @@ -591,7 +591,7 @@ static int live_hwsp_alternate(void *arg) return -ENOMEM; mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for (n = 0; n < NUM_TIMELINES; n++) { @@ -634,7 +634,7 @@ out: i915_timeline_put(tl); } - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); kvfree(timelines); @@ -658,7 +658,7 @@ static int live_hwsp_wrap(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); tl = i915_timeline_create(i915, NULL); if (IS_ERR(tl)) { @@ -749,7 +749,7 @@ out: out_free: i915_timeline_put(tl); out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; @@ -771,7 +771,7 @@ static int live_hwsp_recycle(void *arg) */ mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); count = 0; for_each_engine(engine, i915, id) { @@ -825,7 +825,7 @@ static int live_hwsp_recycle(void *arg) out: if (igt_flush_test(i915, I915_WAIT_LOCKED)) err = -EIO; - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); return err; diff --git a/drivers/gpu/drm/i915/selftests/i915_vma.c b/drivers/gpu/drm/i915/selftests/i915_vma.c index a166d9405a94..fbc79b14823a 100644 --- a/drivers/gpu/drm/i915/selftests/i915_vma.c +++ b/drivers/gpu/drm/i915/selftests/i915_vma.c @@ -873,7 +873,7 @@ static int igt_vma_remapped_gtt(void *arg) mutex_lock(&i915->drm.struct_mutex); - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for (t = types; *t; t++) { for (p = planes; p->width; p++) { @@ -965,7 +965,7 @@ static int igt_vma_remapped_gtt(void *arg) } out: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); mutex_unlock(&i915->drm.struct_mutex); i915_gem_object_put(obj); diff --git a/drivers/gpu/drm/i915/selftests/intel_guc.c b/drivers/gpu/drm/i915/selftests/intel_guc.c index 7fd0321e0947..6ca8584cd64c 100644 --- a/drivers/gpu/drm/i915/selftests/intel_guc.c +++ b/drivers/gpu/drm/i915/selftests/intel_guc.c @@ -144,7 +144,7 @@ static int igt_guc_clients(void *args) GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); guc = &dev_priv->guc; if (!guc) { @@ -227,7 +227,7 @@ out: guc_clients_create(guc); guc_clients_enable(guc); unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } @@ -247,7 +247,7 @@ static int igt_guc_doorbells(void *arg) GEM_BUG_ON(!HAS_GUC(dev_priv)); mutex_lock(&dev_priv->drm.struct_mutex); - wakeref = intel_runtime_pm_get(dev_priv); + wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm); guc = &dev_priv->guc; if (!guc) { @@ -340,7 +340,7 @@ out: guc_client_free(clients[i]); } unlock: - intel_runtime_pm_put(dev_priv, wakeref); + intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref); mutex_unlock(&dev_priv->drm.struct_mutex); return err; } diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index e0d7ebecb215..86815c6072a1 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -176,7 +176,7 @@ static int live_forcewake_ops(void *arg) return 0; } - wakeref = intel_runtime_pm_get(i915); + wakeref = intel_runtime_pm_get(&i915->runtime_pm); for_each_fw_domain(domain, uncore, tmp) { smp_store_mb(domain->active, false); @@ -247,7 +247,7 @@ static int live_forcewake_ops(void *arg) } out_rpm: - intel_runtime_pm_put(i915, wakeref); + intel_runtime_pm_put(&i915->runtime_pm, wakeref); return err; } -- 2.11.0