OSDN Git Service

drm/i915/gvt: Move request alloc to dispatch_workload path only
authorfred gao <fred.gao@intel.com>
Tue, 14 Nov 2017 09:09:35 +0000 (17:09 +0800)
committerZhenyu Wang <zhenyuw@linux.intel.com>
Tue, 28 Nov 2017 09:24:20 +0000 (17:24 +0800)
Previously the performance is improved through the workload auditing
and shadowing ahead of vGPU scheduling, however, there is the case that
more requests are allocated in submit_context before the previous request
is added, the timeline will hold its seqno which is later.

This patch is to move the request alloc to dispatch_workload function,
where is the same place as request is added.

It will fix the issue of kernel BUG for (timeline->seqno != request->fence.seqno)
check when add_request.

Fixes: 89ea20b930cb ("drm/i915/gvt: Factor out scan and shadow from workload dispatch")
Signed-off-by: Chuanxiao Dong <chuanxiao.dong@intel.com>
Signed-off-by: fred gao <fred.gao@intel.com>
Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
(cherry picked from commit f2880e04f3a5419366926182fc97a3c2e4fd8f2a)

drivers/gpu/drm/i915/gvt/execlist.c
drivers/gpu/drm/i915/gvt/scheduler.c
drivers/gpu/drm/i915/gvt/scheduler.h

index 4427be1..940cdaa 100644 (file)
@@ -496,6 +496,12 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
                goto err_unpin_mm;
        }
 
+       ret = intel_gvt_generate_request(workload);
+       if (ret) {
+               gvt_vgpu_err("fail to generate request\n");
+               goto err_unpin_mm;
+       }
+
        ret = prepare_shadow_batch_buffer(workload);
        if (ret) {
                gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
index f031234..3ac1dc9 100644 (file)
@@ -254,7 +254,6 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        struct i915_gem_context *shadow_ctx = workload->vgpu->shadow_ctx;
        struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
        struct intel_engine_cs *engine = dev_priv->engine[ring_id];
-       struct drm_i915_gem_request *rq;
        struct intel_vgpu *vgpu = workload->vgpu;
        struct intel_ring *ring;
        int ret;
@@ -300,6 +299,26 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        ret = populate_shadow_context(workload);
        if (ret)
                goto err_unpin;
+       workload->shadowed = true;
+       return 0;
+
+err_unpin:
+       engine->context_unpin(engine, shadow_ctx);
+err_shadow:
+       release_shadow_wa_ctx(&workload->wa_ctx);
+err_scan:
+       return ret;
+}
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
+{
+       int ring_id = workload->ring_id;
+       struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+       struct intel_engine_cs *engine = dev_priv->engine[ring_id];
+       struct drm_i915_gem_request *rq;
+       struct intel_vgpu *vgpu = workload->vgpu;
+       struct i915_gem_context *shadow_ctx = vgpu->shadow_ctx;
+       int ret;
 
        rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
        if (IS_ERR(rq)) {
@@ -314,14 +333,11 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
        ret = copy_workload_to_ring_buffer(workload);
        if (ret)
                goto err_unpin;
-       workload->shadowed = true;
        return 0;
 
 err_unpin:
        engine->context_unpin(engine, shadow_ctx);
-err_shadow:
        release_shadow_wa_ctx(&workload->wa_ctx);
-err_scan:
        return ret;
 }
 
index 2d694f6..b9f8722 100644 (file)
@@ -142,4 +142,7 @@ int intel_vgpu_init_gvt_context(struct intel_vgpu *vgpu);
 void intel_vgpu_clean_gvt_context(struct intel_vgpu *vgpu);
 
 void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
+
+int intel_gvt_generate_request(struct intel_vgpu_workload *workload);
+
 #endif