OSDN Git Service

drm/amdgpu: cleanup GMC v9 TLB invalidation
authorChristian König <christian.koenig@amd.com>
Thu, 25 Oct 2018 08:49:07 +0000 (10:49 +0200)
committerAlex Deucher <alexander.deucher@amd.com>
Mon, 5 Nov 2018 20:49:40 +0000 (15:49 -0500)
Move the kiq handling into amdgpu_virt.c and drop the fallback.

Signed-off-by: Christian König <christian.koenig@amd.com>
Reviewed-by: Emily Deng <Emily.Deng@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c

index ff88763..cfee747 100644 (file)
@@ -132,6 +132,46 @@ failed_kiq_write:
        pr_err("failed to write reg:%x\n", reg);
 }
 
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+                                       uint32_t reg0, uint32_t reg1,
+                                       uint32_t ref, uint32_t mask)
+{
+       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+       struct amdgpu_ring *ring = &kiq->ring;
+       signed long r, cnt = 0;
+       unsigned long flags;
+       uint32_t seq;
+
+       spin_lock_irqsave(&kiq->ring_lock, flags);
+       amdgpu_ring_alloc(ring, 32);
+       amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
+                                           ref, mask);
+       amdgpu_fence_emit_polling(ring, &seq);
+       amdgpu_ring_commit(ring);
+       spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+       r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+       /* don't wait anymore for IRQ context */
+       if (r < 1 && in_interrupt())
+               goto failed_kiq;
+
+       might_sleep();
+       while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+
+               msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+               r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+       }
+
+       if (cnt > MAX_KIQ_REG_TRY)
+               goto failed_kiq;
+
+       return;
+
+failed_kiq:
+       pr_err("failed to write reg %x wait reg %x\n", reg0, reg1);
+}
+
 /**
  * amdgpu_virt_request_full_gpu() - request full gpu access
  * @amdgpu:    amdgpu device.
index cf46dfb..0728fbc 100644 (file)
@@ -278,6 +278,9 @@ bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev);
 void amdgpu_virt_init_setting(struct amdgpu_device *adev);
 uint32_t amdgpu_virt_kiq_rreg(struct amdgpu_device *adev, uint32_t reg);
 void amdgpu_virt_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v);
+void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
+                                       uint32_t reg0, uint32_t rreg1,
+                                       uint32_t ref, uint32_t mask);
 int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init);
 int amdgpu_virt_reset_gpu(struct amdgpu_device *adev);
index 6cedf7e..4845b6a 100644 (file)
@@ -312,48 +312,6 @@ static uint32_t gmc_v9_0_get_invalidate_req(unsigned int vmid,
        return req;
 }
 
-static signed long  amdgpu_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
-                                                 uint32_t reg0, uint32_t reg1,
-                                                 uint32_t ref, uint32_t mask)
-{
-       signed long r, cnt = 0;
-       unsigned long flags;
-       uint32_t seq;
-       struct amdgpu_kiq *kiq = &adev->gfx.kiq;
-       struct amdgpu_ring *ring = &kiq->ring;
-
-       spin_lock_irqsave(&kiq->ring_lock, flags);
-
-       amdgpu_ring_alloc(ring, 32);
-       amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
-                                           ref, mask);
-       amdgpu_fence_emit_polling(ring, &seq);
-       amdgpu_ring_commit(ring);
-       spin_unlock_irqrestore(&kiq->ring_lock, flags);
-
-       r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-
-       /* don't wait anymore for IRQ context */
-       if (r < 1 && in_interrupt())
-               goto failed_kiq;
-
-       might_sleep();
-
-       while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
-               msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
-               r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
-       }
-
-       if (cnt > MAX_KIQ_REG_TRY)
-               goto failed_kiq;
-
-       return 0;
-
-failed_kiq:
-       pr_err("failed to invalidate tlb with kiq\n");
-       return r;
-}
-
 /*
  * GART
  * VMID 0 is the physical GPU addresses as used by the kernel.
@@ -375,7 +333,6 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
 {
        const unsigned eng = 17;
        unsigned i, j;
-       int r;
 
        for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) {
                struct amdgpu_vmhub *hub = &adev->vmhub[i];
@@ -384,10 +341,12 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev,
                if (adev->gfx.kiq.ring.sched.ready &&
                    (amdgpu_sriov_runtime(adev) || !amdgpu_sriov_vf(adev)) &&
                    !adev->in_gpu_reset) {
-                       r = amdgpu_kiq_reg_write_reg_wait(adev, hub->vm_inv_eng0_req + eng,
-                               hub->vm_inv_eng0_ack + eng, tmp, 1 << vmid);
-                       if (!r)
-                               continue;
+                       uint32_t req = hub->vm_inv_eng0_req + eng;
+                       uint32_t ack = hub->vm_inv_eng0_ack + eng;
+
+                       amdgpu_virt_kiq_reg_write_reg_wait(adev, req, ack, tmp,
+                                                          1 << vmid);
+                       continue;
                }
 
                spin_lock(&adev->gmc.invalidate_lock);