OSDN Git Service

drm/i915: Eliminate dual personality of i915_scratch_offset
authorTvrtko Ursulin <tvrtko.ursulin@intel.com>
Fri, 21 Jun 2019 07:08:11 +0000 (08:08 +0100)
committerTvrtko Ursulin <tvrtko.ursulin@intel.com>
Fri, 21 Jun 2019 12:49:00 +0000 (13:49 +0100)
Scratch vma lives under gt but the API used to work on i915. Make this
consistent by renaming the function to intel_gt_scratch_offset and make
it take struct intel_gt.

v2:
 * Move to intel_gt. (Chris)

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk>
Link: https://patchwork.freedesktop.org/patch/msgid/20190621070811.7006-33-tvrtko.ursulin@linux.intel.com
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_gt.c
drivers/gpu/drm/i915/gt/intel_gt.h
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_ringbuffer.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gpu_error.c

index e30212e..4961f74 100644 (file)
@@ -734,7 +734,7 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
        struct measure_breadcrumb *frame;
        int dw = -ENOMEM;
 
-       GEM_BUG_ON(!engine->i915->gt.scratch);
+       GEM_BUG_ON(!engine->gt->scratch);
 
        frame = kzalloc(sizeof(*frame), GFP_KERNEL);
        if (!frame)
index e22ee3e..8cca6b2 100644 (file)
@@ -203,3 +203,41 @@ void intel_gt_chipset_flush(struct intel_gt *gt)
        if (INTEL_GEN(gt->i915) < 6)
                intel_gtt_chipset_flush();
 }
+
+int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
+{
+       struct drm_i915_private *i915 = gt->i915;
+       struct drm_i915_gem_object *obj;
+       struct i915_vma *vma;
+       int ret;
+
+       obj = i915_gem_object_create_stolen(i915, size);
+       if (!obj)
+               obj = i915_gem_object_create_internal(i915, size);
+       if (IS_ERR(obj)) {
+               DRM_ERROR("Failed to allocate scratch page\n");
+               return PTR_ERR(obj);
+       }
+
+       vma = i915_vma_instance(obj, &gt->ggtt->vm, NULL);
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto err_unref;
+       }
+
+       ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+       if (ret)
+               goto err_unref;
+
+       gt->scratch = vma;
+       return 0;
+
+err_unref:
+       i915_gem_object_put(obj);
+       return ret;
+}
+
+void intel_gt_fini_scratch(struct intel_gt *gt)
+{
+       i915_vma_unpin_and_release(&gt->scratch, 0);
+}
index 29cd15b..cf3c6ce 100644 (file)
@@ -21,4 +21,12 @@ void intel_gt_clear_error_registers(struct intel_gt *gt,
 void intel_gt_flush_ggtt_writes(struct intel_gt *gt);
 void intel_gt_chipset_flush(struct intel_gt *gt);
 
+int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size);
+void intel_gt_fini_scratch(struct intel_gt *gt);
+
+static inline u32 intel_gt_scratch_offset(const struct intel_gt *gt)
+{
+       return i915_ggtt_offset(gt->scratch);
+}
+
 #endif /* __INTEL_GT_H__ */
index 3abcec3..b3e0e25 100644 (file)
 
 #include "gem/i915_gem_context.h"
 
+#include "gt/intel_gt.h"
 #include "i915_drv.h"
 #include "i915_gem_render_state.h"
 #include "i915_vgpu.h"
@@ -1756,7 +1757,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
        /* NB no one else is allowed to scribble over scratch + 256! */
        *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
        *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-       *batch++ = i915_scratch_offset(engine->i915) + 256;
+       *batch++ = intel_gt_scratch_offset(engine->gt) + 256;
        *batch++ = 0;
 
        *batch++ = MI_LOAD_REGISTER_IMM(1);
@@ -1770,7 +1771,7 @@ gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
 
        *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
        *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
-       *batch++ = i915_scratch_offset(engine->i915) + 256;
+       *batch++ = intel_gt_scratch_offset(engine->gt) + 256;
        *batch++ = 0;
 
        return batch;
@@ -1807,7 +1808,7 @@ static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
                                       PIPE_CONTROL_GLOBAL_GTT_IVB |
                                       PIPE_CONTROL_CS_STALL |
                                       PIPE_CONTROL_QW_WRITE,
-                                      i915_scratch_offset(engine->i915) +
+                                      intel_gt_scratch_offset(engine->gt) +
                                       2 * CACHELINE_BYTES);
 
        *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
@@ -2501,7 +2502,7 @@ static int gen8_emit_flush_render(struct i915_request *request,
 {
        struct intel_engine_cs *engine = request->engine;
        u32 scratch_addr =
-               i915_scratch_offset(engine->i915) + 2 * CACHELINE_BYTES;
+               intel_gt_scratch_offset(engine->gt) + 2 * CACHELINE_BYTES;
        bool vf_flush_wa = false, dc_flush_wa = false;
        u32 *cs, flags = 0;
        int len;
index aa483bb..d65b8cb 100644 (file)
@@ -33,6 +33,8 @@
 
 #include "gem/i915_gem_context.h"
 
+#include "gt/intel_gt.h"
+
 #include "i915_drv.h"
 #include "i915_gem_render_state.h"
 #include "i915_trace.h"
@@ -75,7 +77,7 @@ gen2_render_ring_flush(struct i915_request *rq, u32 mode)
        *cs++ = cmd;
        while (num_store_dw--) {
                *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
-               *cs++ = i915_scratch_offset(rq->i915);
+               *cs++ = intel_gt_scratch_offset(rq->engine->gt);
                *cs++ = 0;
        }
        *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
@@ -148,7 +150,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
         */
        if (mode & EMIT_INVALIDATE) {
                *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-               *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+               *cs++ = intel_gt_scratch_offset(rq->engine->gt) |
+                       PIPE_CONTROL_GLOBAL_GTT;
                *cs++ = 0;
                *cs++ = 0;
 
@@ -156,7 +159,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
                        *cs++ = MI_FLUSH;
 
                *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
-               *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+               *cs++ = intel_gt_scratch_offset(rq->engine->gt) |
+                       PIPE_CONTROL_GLOBAL_GTT;
                *cs++ = 0;
                *cs++ = 0;
        }
@@ -208,7 +212,8 @@ gen4_render_ring_flush(struct i915_request *rq, u32 mode)
 static int
 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
 {
-       u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+       u32 scratch_addr =
+               intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES;
        u32 *cs;
 
        cs = intel_ring_begin(rq, 6);
@@ -241,7 +246,8 @@ gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
 static int
 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
 {
-       u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+       u32 scratch_addr =
+               intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES;
        u32 *cs, flags = 0;
        int ret;
 
@@ -299,7 +305,8 @@ static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
 
        *cs++ = GFX_OP_PIPE_CONTROL(4);
        *cs++ = PIPE_CONTROL_QW_WRITE;
-       *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
+       *cs++ = intel_gt_scratch_offset(rq->engine->gt) |
+               PIPE_CONTROL_GLOBAL_GTT;
        *cs++ = 0;
 
        /* Finally we can flush and with it emit the breadcrumb */
@@ -342,7 +349,8 @@ gen7_render_ring_cs_stall_wa(struct i915_request *rq)
 static int
 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
 {
-       u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
+       u32 scratch_addr =
+               intel_gt_scratch_offset(rq->engine->gt) + 2 * CACHELINE_BYTES;
        u32 *cs, flags = 0;
 
        /*
@@ -1071,9 +1079,9 @@ i830_emit_bb_start(struct i915_request *rq,
                   u64 offset, u32 len,
                   unsigned int dispatch_flags)
 {
-       u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
+       u32 *cs, cs_offset = intel_gt_scratch_offset(rq->engine->gt);
 
-       GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
+       GEM_BUG_ON(rq->engine->gt->scratch->size < I830_WA_SIZE);
 
        cs = intel_ring_begin(rq, 6);
        if (IS_ERR(cs))
@@ -1513,7 +1521,7 @@ static int flush_pd_dir(struct i915_request *rq)
        /* Stall until the page table load is complete */
        *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
        *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine->mmio_base));
-       *cs++ = i915_scratch_offset(rq->i915);
+       *cs++ = intel_gt_scratch_offset(rq->engine->gt);
        *cs++ = MI_NOOP;
 
        intel_ring_advance(rq, cs);
@@ -1629,7 +1637,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
                        /* Insert a delay before the next switch! */
                        *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
                        *cs++ = i915_mmio_reg_offset(last_reg);
-                       *cs++ = i915_scratch_offset(rq->i915);
+                       *cs++ = intel_gt_scratch_offset(rq->engine->gt);
                        *cs++ = MI_NOOP;
                }
                *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
index 89a21fa..4077d15 100644 (file)
@@ -2783,11 +2783,6 @@ static inline int intel_hws_csb_write_index(struct drm_i915_private *i915)
                return I915_HWS_CSB_WRITE_INDEX;
 }
 
-static inline u32 i915_scratch_offset(const struct drm_i915_private *i915)
-{
-       return i915_ggtt_offset(i915->gt.scratch);
-}
-
 static inline enum i915_map_type
 i915_coherent_map_type(struct drm_i915_private *i915)
 {
index 6e07127..8dff3b8 100644 (file)
@@ -1424,39 +1424,12 @@ err_active:
 static int
 i915_gem_init_scratch(struct drm_i915_private *i915, unsigned int size)
 {
-       struct drm_i915_gem_object *obj;
-       struct i915_vma *vma;
-       int ret;
-
-       obj = i915_gem_object_create_stolen(i915, size);
-       if (!obj)
-               obj = i915_gem_object_create_internal(i915, size);
-       if (IS_ERR(obj)) {
-               DRM_ERROR("Failed to allocate scratch page\n");
-               return PTR_ERR(obj);
-       }
-
-       vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
-       if (IS_ERR(vma)) {
-               ret = PTR_ERR(vma);
-               goto err_unref;
-       }
-
-       ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
-       if (ret)
-               goto err_unref;
-
-       i915->gt.scratch = vma;
-       return 0;
-
-err_unref:
-       i915_gem_object_put(obj);
-       return ret;
+       return intel_gt_init_scratch(&i915->gt, size);
 }
 
 static void i915_gem_fini_scratch(struct drm_i915_private *i915)
 {
-       i915_vma_unpin_and_release(&i915->gt.scratch, 0);
+       intel_gt_fini_scratch(&i915->gt);
 }
 
 static int intel_engines_verify_workarounds(struct drm_i915_private *i915)
index 330a25c..59f5b02 100644 (file)
@@ -1441,8 +1441,8 @@ static void gem_record_rings(struct i915_gpu_state *error)
 
                        if (HAS_BROKEN_CS_TLB(i915))
                                ee->wa_batchbuffer =
-                                       i915_error_object_create(i915,
-                                                                i915->gt.scratch);
+                                 i915_error_object_create(i915,
+                                                          engine->gt->scratch);
                        request_record_user_bo(request, ee);
 
                        ee->ctx =