OSDN Git Service

drm/amdgpu: implement lru amdgpu_queue_mgr policy for compute v4
authorAndres Rodriguez <andresx7@gmail.com>
Mon, 6 Mar 2017 21:27:55 +0000 (16:27 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 31 May 2017 20:49:02 +0000 (16:49 -0400)
Use an LRU policy to map usermode rings to HW compute queues.

Most compute clients use one queue, and usually the first queue
available. This results in poor pipe/queue work distribution when
multiple compute apps are running. In most cases pipe 0 queue 0 is
the only queue that gets used.

In order to better distribute work across multiple HW queues, we adopt
a policy to map the usermode ring ids to the LRU HW queue.

This fixes a large majority of multi-app compute workloads sharing the
same HW queue, even though 7 other queues are available.

v2: use ring->funcs->type instead of ring->hw_ip
v3: remove amdgpu_queue_mapper_funcs
v4: change ring_lru_list_lock to spinlock, grab only once in lru_get()

Signed-off-by: Andres Rodriguez <andresx7@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu.h
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h

index aad1d7b..96cbe02 100644 (file)
@@ -1656,6 +1656,9 @@ struct amdgpu_device {
        /* link all gtt */
        spinlock_t                      gtt_list_lock;
        struct list_head                gtt_list;
+       /* keep an lru list of rings by HW IP */
+       struct list_head                ring_lru_list;
+       spinlock_t                      ring_lru_list_lock;
 
        /* record hw reset is performed */
        bool has_hw_reset;
index e731c48..cce94d8 100644 (file)
@@ -2073,6 +2073,9 @@ int amdgpu_device_init(struct amdgpu_device *adev,
        INIT_LIST_HEAD(&adev->gtt_list);
        spin_lock_init(&adev->gtt_list_lock);
 
+       INIT_LIST_HEAD(&adev->ring_lru_list);
+       spin_lock_init(&adev->ring_lru_list_lock);
+
        INIT_DELAYED_WORK(&adev->late_init_work, amdgpu_late_init_func_handler);
 
        if (adev->asic_type >= CHIP_BONAIRE) {
index c13a553..4073f07 100644 (file)
@@ -100,6 +100,40 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
        return amdgpu_update_cached_map(mapper, ring, *out_ring);
 }
 
+static enum amdgpu_ring_type amdgpu_hw_ip_to_ring_type(int hw_ip)
+{
+       switch (hw_ip) {
+       case AMDGPU_HW_IP_GFX:
+               return AMDGPU_RING_TYPE_GFX;
+       case AMDGPU_HW_IP_COMPUTE:
+               return AMDGPU_RING_TYPE_COMPUTE;
+       case AMDGPU_HW_IP_DMA:
+               return AMDGPU_RING_TYPE_SDMA;
+       case AMDGPU_HW_IP_UVD:
+               return AMDGPU_RING_TYPE_UVD;
+       case AMDGPU_HW_IP_VCE:
+               return AMDGPU_RING_TYPE_VCE;
+       default:
+               DRM_ERROR("Invalid HW IP specified %d\n", hw_ip);
+               return -1;
+       }
+}
+
+static int amdgpu_lru_map(struct amdgpu_device *adev,
+                         struct amdgpu_queue_mapper *mapper,
+                         int user_ring,
+                         struct amdgpu_ring **out_ring)
+{
+       int r;
+       int ring_type = amdgpu_hw_ip_to_ring_type(mapper->hw_ip);
+
+       r = amdgpu_ring_lru_get(adev, ring_type, out_ring);
+       if (r)
+               return r;
+
+       return amdgpu_update_cached_map(mapper, user_ring, *out_ring);
+}
+
 /**
  * amdgpu_queue_mgr_init - init an amdgpu_queue_mgr struct
  *
@@ -230,7 +264,6 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
 
        switch (mapper->hw_ip) {
        case AMDGPU_HW_IP_GFX:
-       case AMDGPU_HW_IP_COMPUTE:
        case AMDGPU_HW_IP_DMA:
        case AMDGPU_HW_IP_UVD:
        case AMDGPU_HW_IP_VCE:
@@ -239,6 +272,9 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
        case AMDGPU_HW_IP_VCN_ENC:
                r = amdgpu_identity_map(adev, mapper, ring, out_ring);
                break;
+       case AMDGPU_HW_IP_COMPUTE:
+               r = amdgpu_lru_map(adev, mapper, ring, out_ring);
+               break;
        default:
                *out_ring = NULL;
                r = -EINVAL;
index 7d95435..f1076e3 100644 (file)
@@ -135,6 +135,8 @@ void amdgpu_ring_commit(struct amdgpu_ring *ring)
 
        if (ring->funcs->end_use)
                ring->funcs->end_use(ring);
+
+       amdgpu_ring_lru_touch(ring->adev, ring);
 }
 
 /**
@@ -283,6 +285,8 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
        }
 
        ring->max_dw = max_dw;
+       INIT_LIST_HEAD(&ring->lru_list);
+       amdgpu_ring_lru_touch(adev, ring);
 
        if (amdgpu_debugfs_ring_init(adev, ring)) {
                DRM_ERROR("Failed to register debugfs file for rings !\n");
@@ -327,6 +331,65 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
        ring->adev->rings[ring->idx] = NULL;
 }
 
+static void amdgpu_ring_lru_touch_locked(struct amdgpu_device *adev,
+                                        struct amdgpu_ring *ring)
+{
+       /* list_move_tail handles the case where ring isn't part of the list */
+       list_move_tail(&ring->lru_list, &adev->ring_lru_list);
+}
+
+/**
+ * amdgpu_ring_lru_get - get the least recently used ring for a HW IP block
+ *
+ * @adev: amdgpu_device pointer
+ * @type: amdgpu_ring_type enum
+ * @ring: output ring
+ *
+ * Retrieve the amdgpu_ring structure for the least recently used ring of
+ * a specific IP block (all asics).
+ * Returns 0 on success, error on failure.
+ */
+int amdgpu_ring_lru_get(struct amdgpu_device *adev, int type,
+                       struct amdgpu_ring **ring)
+{
+       struct amdgpu_ring *entry;
+
+       /* List is sorted in LRU order, find first entry corresponding
+        * to the desired HW IP */
+       *ring = NULL;
+       spin_lock(&adev->ring_lru_list_lock);
+       list_for_each_entry(entry, &adev->ring_lru_list, lru_list) {
+               if (entry->funcs->type == type) {
+                       *ring = entry;
+                       amdgpu_ring_lru_touch_locked(adev, *ring);
+                       break;
+               }
+       }
+       spin_unlock(&adev->ring_lru_list_lock);
+
+       if (!*ring) {
+               DRM_ERROR("Ring LRU contains no entries for ring type:%d\n", type);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/**
+ * amdgpu_ring_lru_touch - mark a ring as recently being used
+ *
+ * @adev: amdgpu_device pointer
+ * @ring: ring to touch
+ *
+ * Move @ring to the tail of the lru list
+ */
+void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring)
+{
+       spin_lock(&adev->ring_lru_list_lock);
+       amdgpu_ring_lru_touch_locked(adev, ring);
+       spin_unlock(&adev->ring_lru_list_lock);
+}
+
 /*
  * Debugfs info
  */
index 334307e..577528a 100644 (file)
@@ -154,6 +154,7 @@ struct amdgpu_ring {
        const struct amdgpu_ring_funcs  *funcs;
        struct amdgpu_fence_driver      fence_drv;
        struct amd_gpu_scheduler        sched;
+       struct list_head                lru_list;
 
        struct amdgpu_bo        *ring_obj;
        volatile uint32_t       *ring;
@@ -200,6 +201,9 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
                     unsigned ring_size, struct amdgpu_irq_src *irq_src,
                     unsigned irq_type);
 void amdgpu_ring_fini(struct amdgpu_ring *ring);
+int amdgpu_ring_lru_get(struct amdgpu_device *adev, int hw_ip,
+                       struct amdgpu_ring **ring);
+void amdgpu_ring_lru_touch(struct amdgpu_device *adev, struct amdgpu_ring *ring);
 static inline void amdgpu_ring_clear_ring(struct amdgpu_ring *ring)
 {
        int i = 0;