OSDN Git Service

drm/ttm: optionally move duplicates to a separate list
authorChristian König <christian.koenig@amd.com>
Wed, 3 Dec 2014 14:46:48 +0000 (15:46 +0100)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 3 Dec 2014 23:26:52 +0000 (18:26 -0500)
This patch adds an optional list_head parameter to ttm_eu_reserve_buffers.
If specified duplicates in the execbuf list are no longer reported as errors,
but moved to this list instead.

Reviewed-by: Thomas Hellstrom <thellstrom@vmware.com>
Signed-off-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/qxl/qxl_release.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
include/drm/ttm/ttm_execbuf_util.h

index 446e71c..d9b2568 100644 (file)
@@ -264,7 +264,8 @@ int qxl_release_reserve_list(struct qxl_release *release, bool no_intr)
        if (list_is_singular(&release->bos))
                return 0;
 
-       ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos, !no_intr);
+       ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos,
+                                    !no_intr, NULL);
        if (ret)
                return ret;
 
index 6162bd2..fe48f22 100644 (file)
@@ -564,7 +564,7 @@ static void radeon_gem_va_update_vm(struct radeon_device *rdev,
        if (!vm_bos)
                return;
 
-       r = ttm_eu_reserve_buffers(&ticket, &list, true);
+       r = ttm_eu_reserve_buffers(&ticket, &list, true, NULL);
        if (r)
                goto error_free;
 
index 4ab0747..a4a3ac8 100644 (file)
@@ -508,7 +508,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
        u64 bytes_moved = 0, initial_bytes_moved;
        u64 bytes_moved_threshold = radeon_bo_get_threshold_for_moves(rdev);
 
-       r = ttm_eu_reserve_buffers(ticket, head, true);
+       r = ttm_eu_reserve_buffers(ticket, head, true, NULL);
        if (unlikely(r != 0)) {
                return r;
        }
index 8ce508e..3820ae9 100644 (file)
@@ -93,7 +93,8 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
  */
 
 int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
-                          struct list_head *list, bool intr)
+                          struct list_head *list, bool intr,
+                          struct list_head *dups)
 {
        struct ttm_bo_global *glob;
        struct ttm_validate_buffer *entry;
@@ -117,6 +118,13 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
                        __ttm_bo_unreserve(bo);
 
                        ret = -EBUSY;
+
+               } else if (ret == -EALREADY && dups) {
+                       struct ttm_validate_buffer *safe = entry;
+                       entry = list_prev_entry(entry, head);
+                       list_del(&safe->head);
+                       list_add(&safe->head, dups);
+                       continue;
                }
 
                if (!ret) {
index 596cd6d..33176d0 100644 (file)
@@ -2487,7 +2487,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
        if (unlikely(ret != 0))
                goto out_err_nores;
 
-       ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, true);
+       ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
+                                    true, NULL);
        if (unlikely(ret != 0))
                goto out_err;
 
@@ -2677,7 +2678,8 @@ void __vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
        query_val.shared = false;
        list_add_tail(&query_val.head, &validate_list);
 
-       ret = ttm_eu_reserve_buffers(&ticket, &validate_list, false);
+       ret = ttm_eu_reserve_buffers(&ticket, &validate_list,
+                                    false, NULL);
        if (unlikely(ret != 0)) {
                vmw_execbuf_unpin_panic(dev_priv);
                goto out_no_reserve;
index 026de7c..210ef15 100644 (file)
@@ -1222,7 +1222,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
        val_buf->bo = ttm_bo_reference(&res->backup->base);
        val_buf->shared = false;
        list_add_tail(&val_buf->head, &val_list);
-       ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible);
+       ret = ttm_eu_reserve_buffers(NULL, &val_list, interruptible, NULL);
        if (unlikely(ret != 0))
                goto out_no_reserve;
 
index 4604417..b620c31 100644 (file)
@@ -68,6 +68,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  *           non-blocking reserves should be tried.
  * @list:    thread private list of ttm_validate_buffer structs.
  * @intr:    should the wait be interruptible
+ * @dups:    [out] optional list of duplicates.
  *
  * Tries to reserve bos pointed to by the list entries for validation.
  * If the function returns 0, all buffers are marked as "unfenced",
@@ -83,6 +84,11 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  * calling process receives a signal while waiting. In that case, no
  * buffers on the list will be reserved upon return.
  *
+ * If dups is non NULL all buffers already reserved by the current thread
+ * (e.g. duplicates) are added to this list, otherwise -EALREADY is returned
+ * on the first already reserved buffer and all buffers from the list are
+ * unreserved again.
+ *
  * Buffers reserved by this function should be unreserved by
  * a call to either ttm_eu_backoff_reservation() or
  * ttm_eu_fence_buffer_objects() when command submission is complete or
@@ -90,7 +96,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
  */
 
 extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
-                                 struct list_head *list, bool intr);
+                                 struct list_head *list, bool intr,
+                                 struct list_head *dups);
 
 /**
  * function ttm_eu_fence_buffer_objects.