OSDN Git Service

drm/i915/selftests: Always flush before unpining after writing
authorChris Wilson <chris@chris-wilson.co.uk>
Mon, 11 May 2020 14:13:03 +0000 (15:13 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Mon, 11 May 2020 15:50:04 +0000 (16:50 +0100)
Be consistent, and even when we know we had used a WC, flush the mapped
object after writing into it. The flush understands the mapping type and
will only clflush if !I915_MAP_WC, but will always insert a wmb [sfence]
so that we can be sure that all writes are visible.

v2: Add the unconditional wmb so we are know that we always flush the
writes to memory/HW at that point.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20200511141304.599-1-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
drivers/gpu/drm/i915/gem/i915_gem_pages.c
drivers/gpu/drm/i915/gem/selftests/i915_gem_coherency.c
drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
drivers/gpu/drm/i915/gt/selftest_ring_submission.c
drivers/gpu/drm/i915/gt/selftest_rps.c
drivers/gpu/drm/i915/selftests/i915_request.c

index 2fc7737..f457d71 100644 (file)
@@ -78,10 +78,12 @@ struct i915_vma *intel_emit_vma_fill_blt(struct intel_context *ce,
        } while (rem);
 
        *cmd = MI_BATCH_BUFFER_END;
-       intel_gt_chipset_flush(ce->vm->gt);
 
+       i915_gem_object_flush_map(pool->obj);
        i915_gem_object_unpin_map(pool->obj);
 
+       intel_gt_chipset_flush(ce->vm->gt);
+
        batch = i915_vma_instance(pool->obj, ce->vm, NULL);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
@@ -289,10 +291,12 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
        } while (rem);
 
        *cmd = MI_BATCH_BUFFER_END;
-       intel_gt_chipset_flush(ce->vm->gt);
 
+       i915_gem_object_flush_map(pool->obj);
        i915_gem_object_unpin_map(pool->obj);
 
+       intel_gt_chipset_flush(ce->vm->gt);
+
        batch = i915_vma_instance(pool->obj, ce->vm, NULL);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
index 5d855fc..af9e48e 100644 (file)
@@ -391,6 +391,7 @@ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
        GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
                                     offset, size, obj->base.size));
 
+       wmb(); /* let all previous writes be visible to coherent partners */
        obj->mm.dirty = true;
 
        if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
index 3f6079e..87d7d8a 100644 (file)
@@ -158,6 +158,8 @@ static int wc_set(struct context *ctx, unsigned long offset, u32 v)
                return PTR_ERR(map);
 
        map[offset / sizeof(*map)] = v;
+
+       __i915_gem_object_flush_map(ctx->obj, offset, sizeof(*map));
        i915_gem_object_unpin_map(ctx->obj);
 
        return 0;
index 8b3925b..e21b502 100644 (file)
@@ -84,6 +84,7 @@ igt_emit_store_dw(struct i915_vma *vma,
        }
        *cmd = MI_BATCH_BUFFER_END;
 
+       i915_gem_object_flush_map(obj);
        i915_gem_object_unpin_map(obj);
 
        intel_gt_chipset_flush(vma->vm->gt);
index 9995faa..3350e7c 100644 (file)
@@ -54,6 +54,8 @@ static struct i915_vma *create_wally(struct intel_engine_cs *engine)
        *cs++ = STACK_MAGIC;
 
        *cs++ = MI_BATCH_BUFFER_END;
+
+       i915_gem_object_flush_map(obj);
        i915_gem_object_unpin_map(obj);
 
        vma->private = intel_context_create(engine); /* dummy residuals */
index bfa1a15..6275d69 100644 (file)
@@ -727,6 +727,7 @@ int live_rps_frequency_cs(void *arg)
 
 err_vma:
                *cancel = MI_BATCH_BUFFER_END;
+               i915_gem_object_flush_map(vma->obj);
                i915_gem_object_unpin_map(vma->obj);
                i915_vma_unpin(vma);
                i915_vma_put(vma);
@@ -868,6 +869,7 @@ int live_rps_frequency_srm(void *arg)
 
 err_vma:
                *cancel = MI_BATCH_BUFFER_END;
+               i915_gem_object_flush_map(vma->obj);
                i915_gem_object_unpin_map(vma->obj);
                i915_vma_unpin(vma);
                i915_vma_put(vma);
index ffdfcb3..6014e8d 100644 (file)
@@ -816,10 +816,12 @@ static int recursive_batch_resolve(struct i915_vma *batch)
                return PTR_ERR(cmd);
 
        *cmd = MI_BATCH_BUFFER_END;
-       intel_gt_chipset_flush(batch->vm->gt);
 
+       __i915_gem_object_flush_map(batch->obj, 0, sizeof(*cmd));
        i915_gem_object_unpin_map(batch->obj);
 
+       intel_gt_chipset_flush(batch->vm->gt);
+
        return 0;
 }
 
@@ -1060,9 +1062,12 @@ out_request:
                                              I915_MAP_WC);
                if (!IS_ERR(cmd)) {
                        *cmd = MI_BATCH_BUFFER_END;
-                       intel_gt_chipset_flush(engine->gt);
 
+                       __i915_gem_object_flush_map(request[idx]->batch->obj,
+                                                   0, sizeof(*cmd));
                        i915_gem_object_unpin_map(request[idx]->batch->obj);
+
+                       intel_gt_chipset_flush(engine->gt);
                }
 
                i915_vma_put(request[idx]->batch);