OSDN Git Service

drm/amdkfd: refactor runtime pm for baco
authorRajneesh Bhardwaj <rajneesh.bhardwaj@amd.com>
Wed, 22 Jan 2020 00:11:03 +0000 (19:11 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 12 Feb 2020 21:00:54 +0000 (16:00 -0500)
So far the kfd driver implemented same routines for runtime and system
wide suspend and resume (s2idle or mem). During system wide suspend the
kfd aquires an atomic lock that prevents any more user processes to
create queues and interact with kfd driver and amd gpu. This mechanism
created problem when amdgpu device is runtime suspended with BACO
enabled. Any application that relies on kfd driver fails to load because
the driver reports a locked kfd device since gpu is runtime suspended.

However, in an ideal case, when gpu is runtime  suspended the kfd driver
should be able to:

 - auto resume amdgpu driver whenever a client requests compute service
 - prevent runtime suspend for amdgpu  while kfd is in use

This change refactors the amdgpu and amdkfd drivers to support BACO and
runtime power management.

Reviewed-by: Oak Zeng <oak.zeng@amd.com>
Reviewed-by: Felix Kuehling <felix.kuehling@amd.com>
Signed-off-by: Rajneesh Bhardwaj <rajneesh.bhardwaj@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdkfd/kfd_device.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c

index 8609287..314c4a2 100644 (file)
@@ -178,18 +178,18 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
                kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
 }
 
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
 {
        if (adev->kfd.dev)
-               kgd2kfd_suspend(adev->kfd.dev);
+               kgd2kfd_suspend(adev->kfd.dev, run_pm);
 }
 
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
 {
        int r = 0;
 
        if (adev->kfd.dev)
-               r = kgd2kfd_resume(adev->kfd.dev);
+               r = kgd2kfd_resume(adev->kfd.dev, run_pm);
 
        return r;
 }
@@ -713,11 +713,11 @@ void kgd2kfd_exit(void)
 {
 }
 
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 {
 }
 
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 {
        return 0;
 }
index 47b0f29..9e8db70 100644 (file)
@@ -122,8 +122,8 @@ struct amdkfd_process_info {
 int amdgpu_amdkfd_init(void);
 void amdgpu_amdkfd_fini(void);
 
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
 void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
                        const void *ih_ring_entry);
 void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -249,8 +249,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
                         struct drm_device *ddev,
                         const struct kgd2kfd_shared_resources *gpu_resources);
 void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd);
-int kgd2kfd_resume(struct kfd_dev *kfd);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
 int kgd2kfd_pre_reset(struct kfd_dev *kfd);
 int kgd2kfd_post_reset(struct kfd_dev *kfd);
 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
index 31fe28e..9d5100f 100644 (file)
@@ -3341,7 +3341,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
                }
        }
 
-       amdgpu_amdkfd_suspend(adev);
+       amdgpu_amdkfd_suspend(adev, !fbcon);
 
        amdgpu_ras_suspend(adev);
 
@@ -3425,7 +3425,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
                        }
                }
        }
-       r = amdgpu_amdkfd_resume(adev);
+       r = amdgpu_amdkfd_resume(adev, !fbcon);
        if (r)
                return r;
 
index 2a9e401..a3d7ec0 100644 (file)
@@ -710,7 +710,7 @@ out:
 void kgd2kfd_device_exit(struct kfd_dev *kfd)
 {
        if (kfd->init_complete) {
-               kgd2kfd_suspend(kfd);
+               kgd2kfd_suspend(kfd, false);
                device_queue_manager_uninit(kfd->dqm);
                kfd_interrupt_exit(kfd);
                kfd_topology_remove_device(kfd);
@@ -731,7 +731,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
 
        kfd->dqm->ops.pre_reset(kfd->dqm);
 
-       kgd2kfd_suspend(kfd);
+       kgd2kfd_suspend(kfd, false);
 
        kfd_signal_reset_event(kfd);
        return 0;
@@ -765,21 +765,23 @@ bool kfd_is_locked(void)
        return  (atomic_read(&kfd_locked) > 0);
 }
 
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
 {
        if (!kfd->init_complete)
                return;
 
-       /* For first KFD device suspend all the KFD processes */
-       if (atomic_inc_return(&kfd_locked) == 1)
-               kfd_suspend_all_processes();
+       /* for runtime suspend, skip locking kfd */
+       if (!run_pm) {
+               /* For first KFD device suspend all the KFD processes */
+               if (atomic_inc_return(&kfd_locked) == 1)
+                       kfd_suspend_all_processes();
+       }
 
        kfd->dqm->ops.stop(kfd->dqm);
-
        kfd_iommu_suspend(kfd);
 }
 
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
 {
        int ret, count;
 
@@ -790,10 +792,13 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
        if (ret)
                return ret;
 
-       count = atomic_dec_return(&kfd_locked);
-       WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
-       if (count == 0)
-               ret = kfd_resume_all_processes();
+       /* for runtime resume, skip unlocking kfd */
+       if (!run_pm) {
+               count = atomic_dec_return(&kfd_locked);
+               WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
+               if (count == 0)
+                       ret = kfd_resume_all_processes();
+       }
 
        return ret;
 }
index 9a06eb6..f326e31 100644 (file)
@@ -650,6 +650,7 @@ struct kfd_process_device {
         * function.
         */
        bool already_dequeued;
+       bool runtime_inuse;
 
        /* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
        enum kfd_pdd_bound bound;
index 98dcbb9..22abdbc 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/compat.h>
 #include <linux/mman.h>
 #include <linux/file.h>
+#include <linux/pm_runtime.h>
 #include "amdgpu_amdkfd.h"
 #include "amdgpu.h"
 
@@ -527,6 +528,16 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
                kfree(pdd->qpd.doorbell_bitmap);
                idr_destroy(&pdd->alloc_idr);
 
+               /*
+                * before destroying pdd, make sure to report availability
+                * for auto suspend
+                */
+               if (pdd->runtime_inuse) {
+                       pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
+                       pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
+                       pdd->runtime_inuse = false;
+               }
+
                kfree(pdd);
        }
 }
@@ -844,6 +855,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
        pdd->process = p;
        pdd->bound = PDD_UNBOUND;
        pdd->already_dequeued = false;
+       pdd->runtime_inuse = false;
        list_add(&pdd->per_device_list, &p->per_device_data);
 
        /* Init idr used for memory handle translation */
@@ -933,15 +945,41 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
                return ERR_PTR(-ENOMEM);
        }
 
+       /*
+        * signal runtime-pm system to auto resume and prevent
+        * further runtime suspend once device pdd is created until
+        * pdd is destroyed.
+        */
+       if (!pdd->runtime_inuse) {
+               err = pm_runtime_get_sync(dev->ddev->dev);
+               if (err < 0)
+                       return ERR_PTR(err);
+       }
+
        err = kfd_iommu_bind_process_to_device(pdd);
        if (err)
-               return ERR_PTR(err);
+               goto out;
 
        err = kfd_process_device_init_vm(pdd, NULL);
        if (err)
-               return ERR_PTR(err);
+               goto out;
+
+       /*
+        * make sure that runtime_usage counter is incremented just once
+        * per pdd
+        */
+       pdd->runtime_inuse = true;
 
        return pdd;
+
+out:
+       /* balance runpm reference count and exit with error */
+       if (!pdd->runtime_inuse) {
+               pm_runtime_mark_last_busy(dev->ddev->dev);
+               pm_runtime_put_autosuspend(dev->ddev->dev);
+       }
+
+       return ERR_PTR(err);
 }
 
 struct kfd_process_device *kfd_get_first_process_device_data(