From b20c0d5ce1047ba03a6709a07f31f4d7178de35c Mon Sep 17 00:00:00 2001 From: Zhi Wang Date: Wed, 7 Feb 2018 18:12:15 +0800 Subject: [PATCH] drm/i915/gvt: Update PDPs after a vGPU mm object is pinned. The PDPs of a shadow page will only be valid after a vGPU mm is pinned. So the PDPs in the shadow context should be updated then. Signed-off-by: Zhi Wang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/scheduler.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 9b92b4e25a20..1127bd77fc6e 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -52,6 +52,29 @@ static void set_context_pdp_root_pointer( pdp_pair[i].val = pdp[7 - i]; } +static void update_shadow_pdps(struct intel_vgpu_workload *workload) +{ + struct intel_vgpu *vgpu = workload->vgpu; + int ring_id = workload->ring_id; + struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx; + struct drm_i915_gem_object *ctx_obj = + shadow_ctx->engine[ring_id].state->obj; + struct execlist_ring_context *shadow_ring_context; + struct page *page; + + if (WARN_ON(!workload->shadow_mm)) + return; + + if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount))) + return; + + page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); + shadow_ring_context = kmap(page); + set_context_pdp_root_pointer(shadow_ring_context, + (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); + kunmap(page); +} + static int populate_shadow_context(struct intel_vgpu_workload *workload) { struct intel_vgpu *vgpu = workload->vgpu; @@ -112,9 +135,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) } #undef COPY_REG - set_context_pdp_root_pointer(shadow_ring_context, - (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps); - intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa + sizeof(*shadow_ring_context), @@ -509,6 +529,8 @@ static int prepare_workload(struct intel_vgpu_workload *workload) return ret; } + update_shadow_pdps(workload); + ret = intel_vgpu_sync_oos_pages(workload->vgpu); if (ret) { gvt_vgpu_err("fail to vgpu sync oos pages\n"); -- 2.11.0