2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Zhi Wang <zhi.a.wang@intel.com>
27 * Ping Gao <ping.a.gao@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
29 * Chanbin Du <changbin.du@intel.com>
30 * Min He <min.he@intel.com>
31 * Bing Niu <bing.niu@intel.com>
32 * Zhenyu Wang <zhenyuw@linux.intel.com>
36 #include <linux/kthread.h>
38 #include "gem/i915_gem_pm.h"
39 #include "gt/intel_context.h"
40 #include "gt/intel_execlists_submission.h"
41 #include "gt/intel_gt_regs.h"
42 #include "gt/intel_lrc.h"
43 #include "gt/intel_ring.h"
46 #include "i915_gem_gtt.h"
47 #include "i915_perf_oa_regs.h"
50 #define RING_CTX_OFF(x) \
51 offsetof(struct execlist_ring_context, x)
53 static void set_context_pdp_root_pointer(
54 struct execlist_ring_context *ring_context,
59 for (i = 0; i < 8; i++)
60 ring_context->pdps[i].val = pdp[7 - i];
63 static void update_shadow_pdps(struct intel_vgpu_workload *workload)
65 struct execlist_ring_context *shadow_ring_context;
66 struct intel_context *ctx = workload->req->context;
68 if (WARN_ON(!workload->shadow_mm))
71 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
74 shadow_ring_context = (struct execlist_ring_context *)ctx->lrc_reg_state;
75 set_context_pdp_root_pointer(shadow_ring_context,
76 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
80 * when populating shadow ctx from guest, we should not overrride oa related
81 * registers, so that they will not be overlapped by guest oa configs. Thus
82 * made it possible to capture oa data from host for both host and guests.
84 static void sr_oa_regs(struct intel_vgpu_workload *workload,
85 u32 *reg_state, bool save)
87 struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
88 u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
89 u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
92 i915_mmio_reg_offset(EU_PERF_CNTL0),
93 i915_mmio_reg_offset(EU_PERF_CNTL1),
94 i915_mmio_reg_offset(EU_PERF_CNTL2),
95 i915_mmio_reg_offset(EU_PERF_CNTL3),
96 i915_mmio_reg_offset(EU_PERF_CNTL4),
97 i915_mmio_reg_offset(EU_PERF_CNTL5),
98 i915_mmio_reg_offset(EU_PERF_CNTL6),
101 if (workload->engine->id != RCS0)
105 workload->oactxctrl = reg_state[ctx_oactxctrl + 1];
107 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
108 u32 state_offset = ctx_flexeu0 + i * 2;
110 workload->flex_mmio[i] = reg_state[state_offset + 1];
113 reg_state[ctx_oactxctrl] =
114 i915_mmio_reg_offset(GEN8_OACTXCONTROL);
115 reg_state[ctx_oactxctrl + 1] = workload->oactxctrl;
117 for (i = 0; i < ARRAY_SIZE(workload->flex_mmio); i++) {
118 u32 state_offset = ctx_flexeu0 + i * 2;
119 u32 mmio = flex_mmio[i];
121 reg_state[state_offset] = mmio;
122 reg_state[state_offset + 1] = workload->flex_mmio[i];
127 static int populate_shadow_context(struct intel_vgpu_workload *workload)
129 struct intel_vgpu *vgpu = workload->vgpu;
130 struct intel_gvt *gvt = vgpu->gvt;
131 struct intel_context *ctx = workload->req->context;
132 struct execlist_ring_context *shadow_ring_context;
135 unsigned long context_gpa, context_page_num;
136 unsigned long gpa_base; /* first gpa of consecutive GPAs */
137 unsigned long gpa_size; /* size of consecutive GPAs */
138 struct intel_vgpu_submission *s = &vgpu->submission;
141 int ring_id = workload->engine->id;
144 GEM_BUG_ON(!intel_context_is_pinned(ctx));
146 context_base = (void *) ctx->lrc_reg_state -
147 (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
149 shadow_ring_context = (void *) ctx->lrc_reg_state;
151 sr_oa_regs(workload, (u32 *)shadow_ring_context, true);
152 #define COPY_REG(name) \
153 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
154 + RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
155 #define COPY_REG_MASKED(name) {\
156 intel_gvt_read_gpa(vgpu, workload->ring_context_gpa \
157 + RING_CTX_OFF(name.val),\
158 &shadow_ring_context->name.val, 4);\
159 shadow_ring_context->name.val |= 0xffff << 16;\
162 COPY_REG_MASKED(ctx_ctrl);
163 COPY_REG(ctx_timestamp);
165 if (workload->engine->id == RCS0) {
166 COPY_REG(bb_per_ctx_ptr);
167 COPY_REG(rcs_indirect_ctx);
168 COPY_REG(rcs_indirect_ctx_offset);
169 } else if (workload->engine->id == BCS0)
170 intel_gvt_read_gpa(vgpu,
171 workload->ring_context_gpa +
172 BCS_TILE_REGISTER_VAL_OFFSET,
173 (void *)shadow_ring_context +
174 BCS_TILE_REGISTER_VAL_OFFSET, 4);
176 #undef COPY_REG_MASKED
178 /* don't copy Ring Context (the first 0x50 dwords),
179 * only copy the Engine Context part from guest
181 intel_gvt_read_gpa(vgpu,
182 workload->ring_context_gpa +
184 (void *)shadow_ring_context +
186 I915_GTT_PAGE_SIZE - RING_CTX_SIZE);
188 sr_oa_regs(workload, (u32 *)shadow_ring_context, false);
190 gvt_dbg_sched("ring %s workload lrca %x, ctx_id %x, ctx gpa %llx",
191 workload->engine->name, workload->ctx_desc.lrca,
192 workload->ctx_desc.context_id,
193 workload->ring_context_gpa);
195 /* only need to ensure this context is not pinned/unpinned during the
196 * period from last submission to this this submission.
197 * Upon reaching this function, the currently submitted context is not
198 * supposed to get unpinned. If a misbehaving guest driver ever does
199 * this, it would corrupt itself.
201 if (s->last_ctx[ring_id].valid &&
202 (s->last_ctx[ring_id].lrca ==
203 workload->ctx_desc.lrca) &&
204 (s->last_ctx[ring_id].ring_context_gpa ==
205 workload->ring_context_gpa))
208 s->last_ctx[ring_id].lrca = workload->ctx_desc.lrca;
209 s->last_ctx[ring_id].ring_context_gpa = workload->ring_context_gpa;
211 if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val) || skip)
214 s->last_ctx[ring_id].valid = false;
215 context_page_num = workload->engine->context_size;
216 context_page_num = context_page_num >> PAGE_SHIFT;
218 if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
219 context_page_num = 19;
221 /* find consecutive GPAs from gma until the first inconsecutive GPA.
222 * read from the continuous GPAs into dst virtual address
225 for (i = 2; i < context_page_num; i++) {
226 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
227 (u32)((workload->ctx_desc.lrca + i) <<
228 I915_GTT_PAGE_SHIFT));
229 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
230 gvt_vgpu_err("Invalid guest context descriptor\n");
235 gpa_base = context_gpa;
236 dst = context_base + (i << I915_GTT_PAGE_SHIFT);
237 } else if (context_gpa != gpa_base + gpa_size)
240 gpa_size += I915_GTT_PAGE_SIZE;
242 if (i == context_page_num - 1)
248 intel_gvt_read_gpa(vgpu, gpa_base, dst, gpa_size);
249 gpa_base = context_gpa;
250 gpa_size = I915_GTT_PAGE_SIZE;
251 dst = context_base + (i << I915_GTT_PAGE_SHIFT);
253 ret = intel_gvt_scan_engine_context(workload);
255 gvt_vgpu_err("invalid cmd found in guest context pages\n");
258 s->last_ctx[ring_id].valid = true;
262 static inline bool is_gvt_request(struct i915_request *rq)
264 return intel_context_force_single_submission(rq->context);
267 static void save_ring_hw_state(struct intel_vgpu *vgpu,
268 const struct intel_engine_cs *engine)
270 struct intel_uncore *uncore = engine->uncore;
273 reg = RING_INSTDONE(engine->mmio_base);
274 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
275 intel_uncore_read(uncore, reg);
277 reg = RING_ACTHD(engine->mmio_base);
278 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
279 intel_uncore_read(uncore, reg);
281 reg = RING_ACTHD_UDW(engine->mmio_base);
282 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
283 intel_uncore_read(uncore, reg);
286 static int shadow_context_status_change(struct notifier_block *nb,
287 unsigned long action, void *data)
289 struct i915_request *rq = data;
290 struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
291 shadow_ctx_notifier_block[rq->engine->id]);
292 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
293 enum intel_engine_id ring_id = rq->engine->id;
294 struct intel_vgpu_workload *workload;
297 if (!is_gvt_request(rq)) {
298 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
299 if (action == INTEL_CONTEXT_SCHEDULE_IN &&
300 scheduler->engine_owner[ring_id]) {
301 /* Switch ring from vGPU to host. */
302 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
304 scheduler->engine_owner[ring_id] = NULL;
306 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
311 workload = scheduler->current_workload[ring_id];
312 if (unlikely(!workload))
316 case INTEL_CONTEXT_SCHEDULE_IN:
317 spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
318 if (workload->vgpu != scheduler->engine_owner[ring_id]) {
319 /* Switch ring from host to vGPU or vGPU to vGPU. */
320 intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
321 workload->vgpu, rq->engine);
322 scheduler->engine_owner[ring_id] = workload->vgpu;
324 gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
325 ring_id, workload->vgpu->id);
326 spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
327 atomic_set(&workload->shadow_ctx_active, 1);
329 case INTEL_CONTEXT_SCHEDULE_OUT:
330 save_ring_hw_state(workload->vgpu, rq->engine);
331 atomic_set(&workload->shadow_ctx_active, 0);
333 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
334 save_ring_hw_state(workload->vgpu, rq->engine);
340 wake_up(&workload->shadow_ctx_status_wq);
345 shadow_context_descriptor_update(struct intel_context *ce,
346 struct intel_vgpu_workload *workload)
348 u64 desc = ce->lrc.desc;
351 * Update bits 0-11 of the context descriptor which includes flags
352 * like GEN8_CTX_* cached in desc_template
354 desc &= ~(0x3ull << GEN8_CTX_ADDRESSING_MODE_SHIFT);
355 desc |= (u64)workload->ctx_desc.addressing_mode <<
356 GEN8_CTX_ADDRESSING_MODE_SHIFT;
361 static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
363 struct intel_vgpu *vgpu = workload->vgpu;
364 struct i915_request *req = workload->req;
365 void *shadow_ring_buffer_va;
369 if (GRAPHICS_VER(req->engine->i915) == 9 && is_inhibit_context(req->context))
370 intel_vgpu_restore_inhibit_context(vgpu, req);
373 * To track whether a request has started on HW, we can emit a
374 * breadcrumb at the beginning of the request and check its
375 * timeline's HWSP to see if the breadcrumb has advanced past the
376 * start of this request. Actually, the request must have the
377 * init_breadcrumb if its timeline set has_init_bread_crumb, or the
378 * scheduler might get a wrong state of it during reset. Since the
379 * requests from gvt always set the has_init_breadcrumb flag, here
380 * need to do the emit_init_breadcrumb for all the requests.
382 if (req->engine->emit_init_breadcrumb) {
383 err = req->engine->emit_init_breadcrumb(req);
385 gvt_vgpu_err("fail to emit init breadcrumb\n");
390 /* allocate shadow ring buffer */
391 cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
393 gvt_vgpu_err("fail to alloc size =%ld shadow ring buffer\n",
398 shadow_ring_buffer_va = workload->shadow_ring_buffer_va;
400 /* get shadow ring buffer va */
401 workload->shadow_ring_buffer_va = cs;
403 memcpy(cs, shadow_ring_buffer_va,
406 cs += workload->rb_len / sizeof(u32);
407 intel_ring_advance(workload->req, cs);
412 static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
414 if (!wa_ctx->indirect_ctx.obj)
417 i915_gem_object_lock(wa_ctx->indirect_ctx.obj, NULL);
418 i915_gem_object_unpin_map(wa_ctx->indirect_ctx.obj);
419 i915_gem_object_unlock(wa_ctx->indirect_ctx.obj);
420 i915_gem_object_put(wa_ctx->indirect_ctx.obj);
422 wa_ctx->indirect_ctx.obj = NULL;
423 wa_ctx->indirect_ctx.shadow_va = NULL;
426 static void set_dma_address(struct i915_page_directory *pd, dma_addr_t addr)
428 struct scatterlist *sg = pd->pt.base->mm.pages->sgl;
430 /* This is not a good idea */
431 sg->dma_address = addr;
434 static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
435 struct intel_context *ce)
437 struct intel_vgpu_mm *mm = workload->shadow_mm;
438 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(ce->vm);
441 if (mm->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
442 set_dma_address(ppgtt->pd, mm->ppgtt_mm.shadow_pdps[0]);
444 for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
445 struct i915_page_directory * const pd =
446 i915_pd_entry(ppgtt->pd, i);
447 /* skip now as current i915 ppgtt alloc won't allocate
448 top level pdp for non 4-level table, won't impact
453 set_dma_address(pd, mm->ppgtt_mm.shadow_pdps[i]);
459 intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
461 struct intel_vgpu *vgpu = workload->vgpu;
462 struct intel_vgpu_submission *s = &vgpu->submission;
463 struct i915_request *rq;
468 rq = i915_request_create(s->shadow[workload->engine->id]);
470 gvt_vgpu_err("fail to allocate gem request\n");
474 workload->req = i915_request_get(rq);
479 * intel_gvt_scan_and_shadow_workload - audit the workload by scanning and
480 * shadow it as well, include ringbuffer,wa_ctx and ctx.
481 * @workload: an abstract entity for each execlist submission.
483 * This function is called before the workload submitting to i915, to make
484 * sure the content of the workload is valid.
486 int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
488 struct intel_vgpu *vgpu = workload->vgpu;
489 struct intel_vgpu_submission *s = &vgpu->submission;
492 lockdep_assert_held(&vgpu->vgpu_lock);
494 if (workload->shadow)
497 if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
498 shadow_context_descriptor_update(s->shadow[workload->engine->id],
501 ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
505 if (workload->engine->id == RCS0 &&
506 workload->wa_ctx.indirect_ctx.size) {
507 ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
512 workload->shadow = true;
516 release_shadow_wa_ctx(&workload->wa_ctx);
520 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload);
522 static int prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
524 struct intel_gvt *gvt = workload->vgpu->gvt;
525 const int gmadr_bytes = gvt->device_info.gmadr_bytes_in_cmd;
526 struct intel_vgpu_shadow_bb *bb;
527 struct i915_gem_ww_ctx ww;
530 list_for_each_entry(bb, &workload->shadow_bb, list) {
531 /* For privilge batch buffer and not wa_ctx, the bb_start_cmd_va
532 * is only updated into ring_scan_buffer, not real ring address
533 * allocated in later copy_workload_to_ring_buffer. pls be noted
534 * shadow_ring_buffer_va is now pointed to real ring buffer va
535 * in copy_workload_to_ring_buffer.
539 bb->bb_start_cmd_va = workload->shadow_ring_buffer_va
543 * For non-priv bb, scan&shadow is only for
544 * debugging purpose, so the content of shadow bb
545 * is the same as original bb. Therefore,
546 * here, rather than switch to shadow bb's gma
547 * address, we directly use original batch buffer's
548 * gma address, and send original bb to hardware
552 i915_gem_ww_ctx_init(&ww, false);
554 i915_gem_object_lock(bb->obj, &ww);
556 bb->vma = i915_gem_object_ggtt_pin_ww(bb->obj, &ww,
558 if (IS_ERR(bb->vma)) {
559 ret = PTR_ERR(bb->vma);
560 if (ret == -EDEADLK) {
561 ret = i915_gem_ww_ctx_backoff(&ww);
568 /* relocate shadow batch buffer */
569 bb->bb_start_cmd_va[1] = i915_ggtt_offset(bb->vma);
570 if (gmadr_bytes == 8)
571 bb->bb_start_cmd_va[2] = 0;
573 ret = i915_vma_move_to_active(bb->vma, workload->req,
574 __EXEC_OBJECT_NO_REQUEST_AWAIT);
578 /* No one is going to touch shadow bb from now on. */
579 i915_gem_object_flush_map(bb->obj);
580 i915_gem_ww_ctx_fini(&ww);
585 i915_gem_ww_ctx_fini(&ww);
586 release_shadow_batch_buffer(workload);
590 static void update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
592 struct intel_vgpu_workload *workload =
593 container_of(wa_ctx, struct intel_vgpu_workload, wa_ctx);
594 struct i915_request *rq = workload->req;
595 struct execlist_ring_context *shadow_ring_context =
596 (struct execlist_ring_context *)rq->context->lrc_reg_state;
598 shadow_ring_context->bb_per_ctx_ptr.val =
599 (shadow_ring_context->bb_per_ctx_ptr.val &
600 (~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
601 shadow_ring_context->rcs_indirect_ctx.val =
602 (shadow_ring_context->rcs_indirect_ctx.val &
603 (~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
606 static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
608 struct i915_vma *vma;
609 unsigned char *per_ctx_va =
610 (unsigned char *)wa_ctx->indirect_ctx.shadow_va +
611 wa_ctx->indirect_ctx.size;
612 struct i915_gem_ww_ctx ww;
615 if (wa_ctx->indirect_ctx.size == 0)
618 i915_gem_ww_ctx_init(&ww, false);
620 i915_gem_object_lock(wa_ctx->indirect_ctx.obj, &ww);
622 vma = i915_gem_object_ggtt_pin_ww(wa_ctx->indirect_ctx.obj, &ww, NULL,
623 0, CACHELINE_BYTES, 0);
626 if (ret == -EDEADLK) {
627 ret = i915_gem_ww_ctx_backoff(&ww);
634 i915_gem_ww_ctx_fini(&ww);
636 /* FIXME: we are not tracking our pinned VMA leaving it
637 * up to the core to fix up the stray pin_count upon
641 wa_ctx->indirect_ctx.shadow_gma = i915_ggtt_offset(vma);
643 wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
644 memset(per_ctx_va, 0, CACHELINE_BYTES);
646 update_wa_ctx_2_shadow_ctx(wa_ctx);
650 static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
652 vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
656 static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
658 struct intel_vgpu_shadow_bb *bb, *pos;
660 if (list_empty(&workload->shadow_bb))
663 bb = list_first_entry(&workload->shadow_bb,
664 struct intel_vgpu_shadow_bb, list);
666 list_for_each_entry_safe(bb, pos, &workload->shadow_bb, list) {
668 i915_gem_object_lock(bb->obj, NULL);
669 if (bb->va && !IS_ERR(bb->va))
670 i915_gem_object_unpin_map(bb->obj);
672 if (bb->vma && !IS_ERR(bb->vma))
673 i915_vma_unpin(bb->vma);
675 i915_gem_object_unlock(bb->obj);
676 i915_gem_object_put(bb->obj);
684 intel_vgpu_shadow_mm_pin(struct intel_vgpu_workload *workload)
686 struct intel_vgpu *vgpu = workload->vgpu;
687 struct intel_vgpu_mm *m;
690 ret = intel_vgpu_pin_mm(workload->shadow_mm);
692 gvt_vgpu_err("fail to vgpu pin mm\n");
696 if (workload->shadow_mm->type != INTEL_GVT_MM_PPGTT ||
697 !workload->shadow_mm->ppgtt_mm.shadowed) {
698 intel_vgpu_unpin_mm(workload->shadow_mm);
699 gvt_vgpu_err("workload shadow ppgtt isn't ready\n");
703 if (!list_empty(&workload->lri_shadow_mm)) {
704 list_for_each_entry(m, &workload->lri_shadow_mm,
706 ret = intel_vgpu_pin_mm(m);
708 list_for_each_entry_from_reverse(m,
709 &workload->lri_shadow_mm,
711 intel_vgpu_unpin_mm(m);
712 gvt_vgpu_err("LRI shadow ppgtt fail to pin\n");
719 intel_vgpu_unpin_mm(workload->shadow_mm);
725 intel_vgpu_shadow_mm_unpin(struct intel_vgpu_workload *workload)
727 struct intel_vgpu_mm *m;
729 if (!list_empty(&workload->lri_shadow_mm)) {
730 list_for_each_entry(m, &workload->lri_shadow_mm,
732 intel_vgpu_unpin_mm(m);
734 intel_vgpu_unpin_mm(workload->shadow_mm);
737 static int prepare_workload(struct intel_vgpu_workload *workload)
739 struct intel_vgpu *vgpu = workload->vgpu;
740 struct intel_vgpu_submission *s = &vgpu->submission;
743 ret = intel_vgpu_shadow_mm_pin(workload);
745 gvt_vgpu_err("fail to pin shadow mm\n");
749 update_shadow_pdps(workload);
751 set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
753 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
755 gvt_vgpu_err("fail to vgpu sync oos pages\n");
759 ret = intel_vgpu_flush_post_shadow(workload->vgpu);
761 gvt_vgpu_err("fail to flush post shadow\n");
765 ret = copy_workload_to_ring_buffer(workload);
767 gvt_vgpu_err("fail to generate request\n");
771 ret = prepare_shadow_batch_buffer(workload);
773 gvt_vgpu_err("fail to prepare_shadow_batch_buffer\n");
777 ret = prepare_shadow_wa_ctx(&workload->wa_ctx);
779 gvt_vgpu_err("fail to prepare_shadow_wa_ctx\n");
780 goto err_shadow_batch;
783 if (workload->prepare) {
784 ret = workload->prepare(workload);
786 goto err_shadow_wa_ctx;
791 release_shadow_wa_ctx(&workload->wa_ctx);
793 release_shadow_batch_buffer(workload);
795 intel_vgpu_shadow_mm_unpin(workload);
799 static int dispatch_workload(struct intel_vgpu_workload *workload)
801 struct intel_vgpu *vgpu = workload->vgpu;
802 struct i915_request *rq;
805 gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
806 workload->engine->name, workload);
808 mutex_lock(&vgpu->vgpu_lock);
810 ret = intel_gvt_workload_req_alloc(workload);
814 ret = intel_gvt_scan_and_shadow_workload(workload);
818 ret = populate_shadow_context(workload);
820 release_shadow_wa_ctx(&workload->wa_ctx);
824 ret = prepare_workload(workload);
827 /* We might still need to add request with
828 * clean ctx to retire it properly..
830 rq = fetch_and_zero(&workload->req);
831 i915_request_put(rq);
834 if (!IS_ERR_OR_NULL(workload->req)) {
835 gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
836 workload->engine->name, workload->req);
837 i915_request_add(workload->req);
838 workload->dispatched = true;
842 workload->status = ret;
843 mutex_unlock(&vgpu->vgpu_lock);
847 static struct intel_vgpu_workload *
848 pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
850 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
851 struct intel_vgpu_workload *workload = NULL;
853 mutex_lock(&gvt->sched_lock);
856 * no current vgpu / will be scheduled out / no workload
859 if (!scheduler->current_vgpu) {
860 gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
864 if (scheduler->need_reschedule) {
865 gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
869 if (!scheduler->current_vgpu->active ||
870 list_empty(workload_q_head(scheduler->current_vgpu, engine)))
874 * still have current workload, maybe the workload disptacher
875 * fail to submit it for some reason, resubmit it.
877 if (scheduler->current_workload[engine->id]) {
878 workload = scheduler->current_workload[engine->id];
879 gvt_dbg_sched("ring %s still have current workload %p\n",
880 engine->name, workload);
885 * pick a workload as current workload
886 * once current workload is set, schedule policy routines
887 * will wait the current workload is finished when trying to
888 * schedule out a vgpu.
890 scheduler->current_workload[engine->id] =
891 list_first_entry(workload_q_head(scheduler->current_vgpu,
893 struct intel_vgpu_workload, list);
895 workload = scheduler->current_workload[engine->id];
897 gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
899 atomic_inc(&workload->vgpu->submission.running_workload_num);
901 mutex_unlock(&gvt->sched_lock);
905 static void update_guest_pdps(struct intel_vgpu *vgpu,
906 u64 ring_context_gpa, u32 pdp[8])
911 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
913 for (i = 0; i < 8; i++)
914 intel_gvt_write_gpa(vgpu, gpa + i * 8, &pdp[7 - i], 4);
917 static __maybe_unused bool
918 check_shadow_context_ppgtt(struct execlist_ring_context *c, struct intel_vgpu_mm *m)
920 if (m->ppgtt_mm.root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
921 u64 shadow_pdp = c->pdps[7].val | (u64) c->pdps[6].val << 32;
923 if (shadow_pdp != m->ppgtt_mm.shadow_pdps[0]) {
924 gvt_dbg_mm("4-level context ppgtt not match LRI command\n");
929 /* see comment in LRI handler in cmd_parser.c */
930 gvt_dbg_mm("invalid shadow mm type\n");
935 static void update_guest_context(struct intel_vgpu_workload *workload)
937 struct i915_request *rq = workload->req;
938 struct intel_vgpu *vgpu = workload->vgpu;
939 struct execlist_ring_context *shadow_ring_context;
940 struct intel_context *ctx = workload->req->context;
943 unsigned long context_gpa, context_page_num;
944 unsigned long gpa_base; /* first gpa of consecutive GPAs */
945 unsigned long gpa_size; /* size of consecutive GPAs*/
951 gvt_dbg_sched("ring id %d workload lrca %x\n", rq->engine->id,
952 workload->ctx_desc.lrca);
954 GEM_BUG_ON(!intel_context_is_pinned(ctx));
956 head = workload->rb_head;
957 tail = workload->rb_tail;
958 wrap_count = workload->guest_rb_head >> RB_HEAD_WRAP_CNT_OFF;
961 if (wrap_count == RB_HEAD_WRAP_CNT_MAX)
967 head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
969 ring_base = rq->engine->mmio_base;
970 vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
971 vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
973 context_page_num = rq->engine->context_size;
974 context_page_num = context_page_num >> PAGE_SHIFT;
976 if (IS_BROADWELL(rq->engine->i915) && rq->engine->id == RCS0)
977 context_page_num = 19;
979 context_base = (void *) ctx->lrc_reg_state -
980 (LRC_STATE_PN << I915_GTT_PAGE_SHIFT);
982 /* find consecutive GPAs from gma until the first inconsecutive GPA.
983 * write to the consecutive GPAs from src virtual address
986 for (i = 2; i < context_page_num; i++) {
987 context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
988 (u32)((workload->ctx_desc.lrca + i) <<
989 I915_GTT_PAGE_SHIFT));
990 if (context_gpa == INTEL_GVT_INVALID_ADDR) {
991 gvt_vgpu_err("invalid guest context descriptor\n");
996 gpa_base = context_gpa;
997 src = context_base + (i << I915_GTT_PAGE_SHIFT);
998 } else if (context_gpa != gpa_base + gpa_size)
1001 gpa_size += I915_GTT_PAGE_SIZE;
1003 if (i == context_page_num - 1)
1009 intel_gvt_write_gpa(vgpu, gpa_base, src, gpa_size);
1010 gpa_base = context_gpa;
1011 gpa_size = I915_GTT_PAGE_SIZE;
1012 src = context_base + (i << I915_GTT_PAGE_SHIFT);
1015 intel_gvt_write_gpa(vgpu, workload->ring_context_gpa +
1016 RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
1018 shadow_ring_context = (void *) ctx->lrc_reg_state;
1020 if (!list_empty(&workload->lri_shadow_mm)) {
1021 struct intel_vgpu_mm *m = list_last_entry(&workload->lri_shadow_mm,
1022 struct intel_vgpu_mm,
1024 GEM_BUG_ON(!check_shadow_context_ppgtt(shadow_ring_context, m));
1025 update_guest_pdps(vgpu, workload->ring_context_gpa,
1026 (void *)m->ppgtt_mm.guest_pdps);
1029 #define COPY_REG(name) \
1030 intel_gvt_write_gpa(vgpu, workload->ring_context_gpa + \
1031 RING_CTX_OFF(name.val), &shadow_ring_context->name.val, 4)
1034 COPY_REG(ctx_timestamp);
1038 intel_gvt_write_gpa(vgpu,
1039 workload->ring_context_gpa +
1040 sizeof(*shadow_ring_context),
1041 (void *)shadow_ring_context +
1042 sizeof(*shadow_ring_context),
1043 I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
1046 void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
1047 intel_engine_mask_t engine_mask)
1049 struct intel_vgpu_submission *s = &vgpu->submission;
1050 struct intel_engine_cs *engine;
1051 struct intel_vgpu_workload *pos, *n;
1052 intel_engine_mask_t tmp;
1054 /* free the unsubmited workloads in the queues. */
1055 for_each_engine_masked(engine, vgpu->gvt->gt, engine_mask, tmp) {
1056 list_for_each_entry_safe(pos, n,
1057 &s->workload_q_head[engine->id], list) {
1058 list_del_init(&pos->list);
1059 intel_vgpu_destroy_workload(pos);
1061 clear_bit(engine->id, s->shadow_ctx_desc_updated);
1065 static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
1067 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1068 struct intel_vgpu_workload *workload =
1069 scheduler->current_workload[ring_id];
1070 struct intel_vgpu *vgpu = workload->vgpu;
1071 struct intel_vgpu_submission *s = &vgpu->submission;
1072 struct i915_request *rq = workload->req;
1075 mutex_lock(&vgpu->vgpu_lock);
1076 mutex_lock(&gvt->sched_lock);
1078 /* For the workload w/ request, needs to wait for the context
1079 * switch to make sure request is completed.
1080 * For the workload w/o request, directly complete the workload.
1083 wait_event(workload->shadow_ctx_status_wq,
1084 !atomic_read(&workload->shadow_ctx_active));
1086 /* If this request caused GPU hang, req->fence.error will
1087 * be set to -EIO. Use -EIO to set workload status so
1088 * that when this request caused GPU hang, didn't trigger
1089 * context switch interrupt to guest.
1091 if (likely(workload->status == -EINPROGRESS)) {
1092 if (workload->req->fence.error == -EIO)
1093 workload->status = -EIO;
1095 workload->status = 0;
1098 if (!workload->status &&
1099 !(vgpu->resetting_eng & BIT(ring_id))) {
1100 update_guest_context(workload);
1102 for_each_set_bit(event, workload->pending_events,
1103 INTEL_GVT_EVENT_MAX)
1104 intel_vgpu_trigger_virtual_event(vgpu, event);
1107 i915_request_put(fetch_and_zero(&workload->req));
1110 gvt_dbg_sched("ring id %d complete workload %p status %d\n",
1111 ring_id, workload, workload->status);
1113 scheduler->current_workload[ring_id] = NULL;
1115 list_del_init(&workload->list);
1117 if (workload->status || vgpu->resetting_eng & BIT(ring_id)) {
1118 /* if workload->status is not successful means HW GPU
1119 * has occurred GPU hang or something wrong with i915/GVT,
1120 * and GVT won't inject context switch interrupt to guest.
1121 * So this error is a vGPU hang actually to the guest.
1122 * According to this we should emunlate a vGPU hang. If
1123 * there are pending workloads which are already submitted
1124 * from guest, we should clean them up like HW GPU does.
1126 * if it is in middle of engine resetting, the pending
1127 * workloads won't be submitted to HW GPU and will be
1128 * cleaned up during the resetting process later, so doing
1129 * the workload clean up here doesn't have any impact.
1131 intel_vgpu_clean_workloads(vgpu, BIT(ring_id));
1134 workload->complete(workload);
1136 intel_vgpu_shadow_mm_unpin(workload);
1137 intel_vgpu_destroy_workload(workload);
1139 atomic_dec(&s->running_workload_num);
1140 wake_up(&scheduler->workload_complete_wq);
1142 if (gvt->scheduler.need_reschedule)
1143 intel_gvt_request_service(gvt, INTEL_GVT_REQUEST_EVENT_SCHED);
1145 mutex_unlock(&gvt->sched_lock);
1146 mutex_unlock(&vgpu->vgpu_lock);
1149 static int workload_thread(void *arg)
1151 struct intel_engine_cs *engine = arg;
1152 const bool need_force_wake = GRAPHICS_VER(engine->i915) >= 9;
1153 struct intel_gvt *gvt = engine->i915->gvt;
1154 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1155 struct intel_vgpu_workload *workload = NULL;
1156 struct intel_vgpu *vgpu = NULL;
1158 DEFINE_WAIT_FUNC(wait, woken_wake_function);
1160 gvt_dbg_core("workload thread for ring %s started\n", engine->name);
1162 while (!kthread_should_stop()) {
1163 intel_wakeref_t wakeref;
1165 add_wait_queue(&scheduler->waitq[engine->id], &wait);
1167 workload = pick_next_workload(gvt, engine);
1170 wait_woken(&wait, TASK_INTERRUPTIBLE,
1171 MAX_SCHEDULE_TIMEOUT);
1172 } while (!kthread_should_stop());
1173 remove_wait_queue(&scheduler->waitq[engine->id], &wait);
1178 gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
1179 engine->name, workload,
1180 workload->vgpu->id);
1182 wakeref = intel_runtime_pm_get(engine->uncore->rpm);
1184 gvt_dbg_sched("ring %s will dispatch workload %p\n",
1185 engine->name, workload);
1187 if (need_force_wake)
1188 intel_uncore_forcewake_get(engine->uncore,
1191 * Update the vReg of the vGPU which submitted this
1192 * workload. The vGPU may use these registers for checking
1193 * the context state. The value comes from GPU commands
1196 update_vreg_in_ctx(workload);
1198 ret = dispatch_workload(workload);
1201 vgpu = workload->vgpu;
1202 gvt_vgpu_err("fail to dispatch workload, skip\n");
1206 gvt_dbg_sched("ring %s wait workload %p\n",
1207 engine->name, workload);
1208 i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
1211 gvt_dbg_sched("will complete workload %p, status: %d\n",
1212 workload, workload->status);
1214 complete_current_workload(gvt, engine->id);
1216 if (need_force_wake)
1217 intel_uncore_forcewake_put(engine->uncore,
1220 intel_runtime_pm_put(engine->uncore->rpm, wakeref);
1221 if (ret && (vgpu_is_vm_unhealthy(ret)))
1222 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1227 void intel_gvt_wait_vgpu_idle(struct intel_vgpu *vgpu)
1229 struct intel_vgpu_submission *s = &vgpu->submission;
1230 struct intel_gvt *gvt = vgpu->gvt;
1231 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1233 if (atomic_read(&s->running_workload_num)) {
1234 gvt_dbg_sched("wait vgpu idle\n");
1236 wait_event(scheduler->workload_complete_wq,
1237 !atomic_read(&s->running_workload_num));
1241 void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
1243 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1244 struct intel_engine_cs *engine;
1245 enum intel_engine_id i;
1247 gvt_dbg_core("clean workload scheduler\n");
1249 for_each_engine(engine, gvt->gt, i) {
1250 atomic_notifier_chain_unregister(
1251 &engine->context_status_notifier,
1252 &gvt->shadow_ctx_notifier_block[i]);
1253 kthread_stop(scheduler->thread[i]);
1257 int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
1259 struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
1260 struct intel_engine_cs *engine;
1261 enum intel_engine_id i;
1264 gvt_dbg_core("init workload scheduler\n");
1266 init_waitqueue_head(&scheduler->workload_complete_wq);
1268 for_each_engine(engine, gvt->gt, i) {
1269 init_waitqueue_head(&scheduler->waitq[i]);
1271 scheduler->thread[i] = kthread_run(workload_thread, engine,
1272 "gvt:%s", engine->name);
1273 if (IS_ERR(scheduler->thread[i])) {
1274 gvt_err("fail to create workload thread\n");
1275 ret = PTR_ERR(scheduler->thread[i]);
1279 gvt->shadow_ctx_notifier_block[i].notifier_call =
1280 shadow_context_status_change;
1281 atomic_notifier_chain_register(&engine->context_status_notifier,
1282 &gvt->shadow_ctx_notifier_block[i]);
1288 intel_gvt_clean_workload_scheduler(gvt);
1293 i915_context_ppgtt_root_restore(struct intel_vgpu_submission *s,
1294 struct i915_ppgtt *ppgtt)
1298 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1299 set_dma_address(ppgtt->pd, s->i915_context_pml4);
1301 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1302 struct i915_page_directory * const pd =
1303 i915_pd_entry(ppgtt->pd, i);
1305 set_dma_address(pd, s->i915_context_pdps[i]);
1311 * intel_vgpu_clean_submission - free submission-related resource for vGPU
1314 * This function is called when a vGPU is being destroyed.
1317 void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
1319 struct intel_vgpu_submission *s = &vgpu->submission;
1320 struct intel_engine_cs *engine;
1321 enum intel_engine_id id;
1323 intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
1325 i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
1326 for_each_engine(engine, vgpu->gvt->gt, id)
1327 intel_context_put(s->shadow[id]);
1329 kmem_cache_destroy(s->workloads);
1334 * intel_vgpu_reset_submission - reset submission-related resource for vGPU
1336 * @engine_mask: engines expected to be reset
1338 * This function is called when a vGPU is being destroyed.
1341 void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
1342 intel_engine_mask_t engine_mask)
1344 struct intel_vgpu_submission *s = &vgpu->submission;
1349 intel_vgpu_clean_workloads(vgpu, engine_mask);
1350 s->ops->reset(vgpu, engine_mask);
1354 i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
1355 struct i915_ppgtt *ppgtt)
1359 if (i915_vm_is_4lvl(&ppgtt->vm)) {
1360 s->i915_context_pml4 = px_dma(ppgtt->pd);
1362 for (i = 0; i < GEN8_3LVL_PDPES; i++) {
1363 struct i915_page_directory * const pd =
1364 i915_pd_entry(ppgtt->pd, i);
1366 s->i915_context_pdps[i] = px_dma(pd);
1372 * intel_vgpu_setup_submission - setup submission-related resource for vGPU
1375 * This function is called when a vGPU is being created.
1378 * Zero on success, negative error code if failed.
1381 int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
1383 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1384 struct intel_vgpu_submission *s = &vgpu->submission;
1385 struct intel_engine_cs *engine;
1386 struct i915_ppgtt *ppgtt;
1387 enum intel_engine_id i;
1390 ppgtt = i915_ppgtt_create(to_gt(i915), I915_BO_ALLOC_PM_EARLY);
1392 return PTR_ERR(ppgtt);
1394 i915_context_ppgtt_root_save(s, ppgtt);
1396 for_each_engine(engine, vgpu->gvt->gt, i) {
1397 struct intel_context *ce;
1399 INIT_LIST_HEAD(&s->workload_q_head[i]);
1400 s->shadow[i] = ERR_PTR(-EINVAL);
1402 ce = intel_context_create(engine);
1405 goto out_shadow_ctx;
1408 i915_vm_put(ce->vm);
1409 ce->vm = i915_vm_get(&ppgtt->vm);
1410 intel_context_set_single_submission(ce);
1412 /* Max ring buffer size */
1413 if (!intel_uc_wants_guc_submission(&engine->gt->uc))
1414 ce->ring_size = SZ_2M;
1419 bitmap_zero(s->shadow_ctx_desc_updated, I915_NUM_ENGINES);
1421 s->workloads = kmem_cache_create_usercopy("gvt-g_vgpu_workload",
1422 sizeof(struct intel_vgpu_workload), 0,
1424 offsetof(struct intel_vgpu_workload, rb_tail),
1425 sizeof_field(struct intel_vgpu_workload, rb_tail),
1428 if (!s->workloads) {
1430 goto out_shadow_ctx;
1433 atomic_set(&s->running_workload_num, 0);
1434 bitmap_zero(s->tlb_handle_pending, I915_NUM_ENGINES);
1436 memset(s->last_ctx, 0, sizeof(s->last_ctx));
1438 i915_vm_put(&ppgtt->vm);
1442 i915_context_ppgtt_root_restore(s, ppgtt);
1443 for_each_engine(engine, vgpu->gvt->gt, i) {
1444 if (IS_ERR(s->shadow[i]))
1447 intel_context_put(s->shadow[i]);
1449 i915_vm_put(&ppgtt->vm);
1454 * intel_vgpu_select_submission_ops - select virtual submission interface
1456 * @engine_mask: either ALL_ENGINES or target engine mask
1457 * @interface: expected vGPU virtual submission interface
1459 * This function is called when guest configures submission interface.
1462 * Zero on success, negative error code if failed.
1465 int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
1466 intel_engine_mask_t engine_mask,
1467 unsigned int interface)
1469 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
1470 struct intel_vgpu_submission *s = &vgpu->submission;
1471 const struct intel_vgpu_submission_ops *ops[] = {
1472 [INTEL_VGPU_EXECLIST_SUBMISSION] =
1473 &intel_vgpu_execlist_submission_ops,
1477 if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops)))
1480 if (drm_WARN_ON(&i915->drm,
1481 interface == 0 && engine_mask != ALL_ENGINES))
1485 s->ops->clean(vgpu, engine_mask);
1487 if (interface == 0) {
1489 s->virtual_submission_interface = 0;
1491 gvt_dbg_core("vgpu%d: remove submission ops\n", vgpu->id);
1495 ret = ops[interface]->init(vgpu, engine_mask);
1499 s->ops = ops[interface];
1500 s->virtual_submission_interface = interface;
1503 gvt_dbg_core("vgpu%d: activate ops [ %s ]\n",
1504 vgpu->id, s->ops->name);
1510 * intel_vgpu_destroy_workload - destroy a vGPU workload
1511 * @workload: workload to destroy
1513 * This function is called when destroy a vGPU workload.
1516 void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
1518 struct intel_vgpu_submission *s = &workload->vgpu->submission;
1520 intel_context_unpin(s->shadow[workload->engine->id]);
1521 release_shadow_batch_buffer(workload);
1522 release_shadow_wa_ctx(&workload->wa_ctx);
1524 if (!list_empty(&workload->lri_shadow_mm)) {
1525 struct intel_vgpu_mm *m, *mm;
1526 list_for_each_entry_safe(m, mm, &workload->lri_shadow_mm,
1528 list_del(&m->ppgtt_mm.link);
1529 intel_vgpu_mm_put(m);
1533 GEM_BUG_ON(!list_empty(&workload->lri_shadow_mm));
1534 if (workload->shadow_mm)
1535 intel_vgpu_mm_put(workload->shadow_mm);
1537 kmem_cache_free(s->workloads, workload);
1540 static struct intel_vgpu_workload *
1541 alloc_workload(struct intel_vgpu *vgpu)
1543 struct intel_vgpu_submission *s = &vgpu->submission;
1544 struct intel_vgpu_workload *workload;
1546 workload = kmem_cache_zalloc(s->workloads, GFP_KERNEL);
1548 return ERR_PTR(-ENOMEM);
1550 INIT_LIST_HEAD(&workload->list);
1551 INIT_LIST_HEAD(&workload->shadow_bb);
1552 INIT_LIST_HEAD(&workload->lri_shadow_mm);
1554 init_waitqueue_head(&workload->shadow_ctx_status_wq);
1555 atomic_set(&workload->shadow_ctx_active, 0);
1557 workload->status = -EINPROGRESS;
1558 workload->vgpu = vgpu;
1563 #define RING_CTX_OFF(x) \
1564 offsetof(struct execlist_ring_context, x)
1566 static void read_guest_pdps(struct intel_vgpu *vgpu,
1567 u64 ring_context_gpa, u32 pdp[8])
1572 gpa = ring_context_gpa + RING_CTX_OFF(pdps[0].val);
1574 for (i = 0; i < 8; i++)
1575 intel_gvt_read_gpa(vgpu,
1576 gpa + i * 8, &pdp[7 - i], 4);
1579 static int prepare_mm(struct intel_vgpu_workload *workload)
1581 struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
1582 struct intel_vgpu_mm *mm;
1583 struct intel_vgpu *vgpu = workload->vgpu;
1584 enum intel_gvt_gtt_type root_entry_type;
1585 u64 pdps[GVT_RING_CTX_NR_PDPS];
1587 switch (desc->addressing_mode) {
1588 case 1: /* legacy 32-bit */
1589 root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
1591 case 3: /* legacy 64-bit */
1592 root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
1595 gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
1599 read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
1601 mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
1605 workload->shadow_mm = mm;
1609 #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
1610 ((a)->lrca == (b)->lrca))
1613 * intel_vgpu_create_workload - create a vGPU workload
1615 * @engine: the engine
1616 * @desc: a guest context descriptor
1618 * This function is called when creating a vGPU workload.
1621 * struct intel_vgpu_workload * on success, negative error code in
1622 * pointer if failed.
1625 struct intel_vgpu_workload *
1626 intel_vgpu_create_workload(struct intel_vgpu *vgpu,
1627 const struct intel_engine_cs *engine,
1628 struct execlist_ctx_descriptor_format *desc)
1630 struct intel_vgpu_submission *s = &vgpu->submission;
1631 struct list_head *q = workload_q_head(vgpu, engine);
1632 struct intel_vgpu_workload *last_workload = NULL;
1633 struct intel_vgpu_workload *workload = NULL;
1634 u64 ring_context_gpa;
1635 u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
1639 ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
1640 (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
1641 if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
1642 gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
1643 return ERR_PTR(-EINVAL);
1646 intel_gvt_read_gpa(vgpu, ring_context_gpa +
1647 RING_CTX_OFF(ring_header.val), &head, 4);
1649 intel_gvt_read_gpa(vgpu, ring_context_gpa +
1650 RING_CTX_OFF(ring_tail.val), &tail, 4);
1654 head &= RB_HEAD_OFF_MASK;
1655 tail &= RB_TAIL_OFF_MASK;
1657 list_for_each_entry_reverse(last_workload, q, list) {
1659 if (same_context(&last_workload->ctx_desc, desc)) {
1660 gvt_dbg_el("ring %s cur workload == last\n",
1662 gvt_dbg_el("ctx head %x real head %lx\n", head,
1663 last_workload->rb_tail);
1665 * cannot use guest context head pointer here,
1666 * as it might not be updated at this time
1668 head = last_workload->rb_tail;
1673 gvt_dbg_el("ring %s begin a new workload\n", engine->name);
1675 /* record some ring buffer register values for scan and shadow */
1676 intel_gvt_read_gpa(vgpu, ring_context_gpa +
1677 RING_CTX_OFF(rb_start.val), &start, 4);
1678 intel_gvt_read_gpa(vgpu, ring_context_gpa +
1679 RING_CTX_OFF(rb_ctrl.val), &ctl, 4);
1680 intel_gvt_read_gpa(vgpu, ring_context_gpa +
1681 RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
1683 if (!intel_gvt_ggtt_validate_range(vgpu, start,
1684 _RING_CTL_BUF_SIZE(ctl))) {
1685 gvt_vgpu_err("context contain invalid rb at: 0x%x\n", start);
1686 return ERR_PTR(-EINVAL);
1689 workload = alloc_workload(vgpu);
1690 if (IS_ERR(workload))
1693 workload->engine = engine;
1694 workload->ctx_desc = *desc;
1695 workload->ring_context_gpa = ring_context_gpa;
1696 workload->rb_head = head;
1697 workload->guest_rb_head = guest_head;
1698 workload->rb_tail = tail;
1699 workload->rb_start = start;
1700 workload->rb_ctl = ctl;
1702 if (engine->id == RCS0) {
1703 intel_gvt_read_gpa(vgpu, ring_context_gpa +
1704 RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
1705 intel_gvt_read_gpa(vgpu, ring_context_gpa +
1706 RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
1708 workload->wa_ctx.indirect_ctx.guest_gma =
1709 indirect_ctx & INDIRECT_CTX_ADDR_MASK;
1710 workload->wa_ctx.indirect_ctx.size =
1711 (indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
1714 if (workload->wa_ctx.indirect_ctx.size != 0) {
1715 if (!intel_gvt_ggtt_validate_range(vgpu,
1716 workload->wa_ctx.indirect_ctx.guest_gma,
1717 workload->wa_ctx.indirect_ctx.size)) {
1718 gvt_vgpu_err("invalid wa_ctx at: 0x%lx\n",
1719 workload->wa_ctx.indirect_ctx.guest_gma);
1720 kmem_cache_free(s->workloads, workload);
1721 return ERR_PTR(-EINVAL);
1725 workload->wa_ctx.per_ctx.guest_gma =
1726 per_ctx & PER_CTX_ADDR_MASK;
1727 workload->wa_ctx.per_ctx.valid = per_ctx & 1;
1728 if (workload->wa_ctx.per_ctx.valid) {
1729 if (!intel_gvt_ggtt_validate_range(vgpu,
1730 workload->wa_ctx.per_ctx.guest_gma,
1732 gvt_vgpu_err("invalid per_ctx at: 0x%lx\n",
1733 workload->wa_ctx.per_ctx.guest_gma);
1734 kmem_cache_free(s->workloads, workload);
1735 return ERR_PTR(-EINVAL);
1740 gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
1741 workload, engine->name, head, tail, start, ctl);
1743 ret = prepare_mm(workload);
1745 kmem_cache_free(s->workloads, workload);
1746 return ERR_PTR(ret);
1749 /* Only scan and shadow the first workload in the queue
1750 * as there is only one pre-allocated buf-obj for shadow.
1752 if (list_empty(q)) {
1753 intel_wakeref_t wakeref;
1755 with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
1756 ret = intel_gvt_scan_and_shadow_workload(workload);
1760 if (vgpu_is_vm_unhealthy(ret))
1761 enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
1762 intel_vgpu_destroy_workload(workload);
1763 return ERR_PTR(ret);
1766 ret = intel_context_pin(s->shadow[engine->id]);
1768 intel_vgpu_destroy_workload(workload);
1769 return ERR_PTR(ret);
1776 * intel_vgpu_queue_workload - Qeue a vGPU workload
1777 * @workload: the workload to queue in
1779 void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
1781 list_add_tail(&workload->list,
1782 workload_q_head(workload->vgpu, workload->engine));
1783 intel_gvt_kick_schedule(workload->vgpu->gvt);
1784 wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);