OSDN Git Service

drm/amdgpu: add [en/dis]able_kgq() functions
authorAlex Deucher <alexander.deucher@amd.com>
Mon, 24 Apr 2023 18:24:20 +0000 (14:24 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 9 Jun 2023 13:28:28 +0000 (09:28 -0400)
To replace the IP specific variants which are largely
duplicate.

Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h

index 66b9740..90f5d30 100644 (file)
@@ -519,6 +519,39 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id)
        return r;
 }
 
+int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id)
+{
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+       struct amdgpu_ring *kiq_ring = &kiq->ring;
+       int i, r = 0;
+       int j;
+
+       if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+               return -EINVAL;
+
+       spin_lock(&kiq->ring_lock);
+       if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+               if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
+                                               adev->gfx.num_gfx_rings)) {
+                       spin_unlock(&kiq->ring_lock);
+                       return -ENOMEM;
+               }
+
+               for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+                       j = i + xcc_id * adev->gfx.num_gfx_rings;
+                       kiq->pmf->kiq_unmap_queues(kiq_ring,
+                                                  &adev->gfx.gfx_ring[i],
+                                                  PREEMPT_QUEUES, 0, 0);
+               }
+       }
+
+       if (adev->gfx.kiq[0].ring.sched.ready && !adev->job_hang)
+               r = amdgpu_ring_test_helper(kiq_ring);
+       spin_unlock(&kiq->ring_lock);
+
+       return r;
+}
+
 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
                                        int queue_bit)
 {
@@ -590,6 +623,41 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
        return r;
 }
 
+int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
+{
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq[xcc_id];
+       struct amdgpu_ring *kiq_ring = &kiq->ring;
+       int r, i, j;
+
+       if (!kiq->pmf || !kiq->pmf->kiq_map_queues)
+               return -EINVAL;
+
+       spin_lock(&kiq->ring_lock);
+       /* No need to map kcq on the slave */
+       if (amdgpu_gfx_is_master_xcc(adev, xcc_id)) {
+               r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
+                                               adev->gfx.num_gfx_rings);
+               if (r) {
+                       DRM_ERROR("Failed to lock KIQ (%d).\n", r);
+                       spin_unlock(&adev->gfx.kiq[0].ring_lock);
+                       return r;
+               }
+
+               for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
+                       j = i + xcc_id * adev->gfx.num_gfx_rings;
+                       kiq->pmf->kiq_map_queues(kiq_ring,
+                                                &adev->gfx.gfx_ring[i]);
+               }
+       }
+
+       r = amdgpu_ring_test_helper(kiq_ring);
+       spin_unlock(&kiq->ring_lock);
+       if (r)
+               DRM_ERROR("KCQ enable failed\n");
+
+       return r;
+}
+
 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
  *
  * @adev: amdgpu_device pointer
index f1a2ce7..2755f00 100644 (file)
@@ -433,6 +433,8 @@ int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id);
 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id);
 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id);
+int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id);
 
 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev);