4 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
26 * Zhi Wang <zhi.a.wang@intel.com>
27 * Zhenyu Wang <zhenyuw@linux.intel.com>
28 * Xiao Zheng <xiao.zheng@intel.com>
31 * Min He <min.he@intel.com>
32 * Bing Niu <bing.niu@intel.com>
38 #include "i915_pvinfo.h"
41 #include "gt/intel_gt_regs.h"
43 #if defined(VERBOSE_DEBUG)
44 #define gvt_vdbg_mm(fmt, args...) gvt_dbg_mm(fmt, ##args)
46 #define gvt_vdbg_mm(fmt, args...)
49 static bool enable_out_of_sync = false;
50 static int preallocated_oos_pages = 8192;
52 static bool intel_gvt_is_valid_gfn(struct intel_vgpu *vgpu, unsigned long gfn)
54 struct kvm *kvm = vgpu->vfio_device.kvm;
58 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
61 idx = srcu_read_lock(&kvm->srcu);
62 ret = kvm_is_visible_gfn(kvm, gfn);
63 srcu_read_unlock(&kvm->srcu, idx);
69 * validate a gm address and related range size,
70 * translate it to host gm address
72 bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
75 return vgpu_gmadr_is_valid(vgpu, addr);
77 if (vgpu_gmadr_is_aperture(vgpu, addr) &&
78 vgpu_gmadr_is_aperture(vgpu, addr + size - 1))
80 else if (vgpu_gmadr_is_hidden(vgpu, addr) &&
81 vgpu_gmadr_is_hidden(vgpu, addr + size - 1))
84 gvt_dbg_mm("Invalid ggtt range at 0x%llx, size: 0x%x\n",
89 /* translate a guest gmadr to host gmadr */
90 int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
92 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
94 if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
95 "invalid guest gmadr %llx\n", g_addr))
98 if (vgpu_gmadr_is_aperture(vgpu, g_addr))
99 *h_addr = vgpu_aperture_gmadr_base(vgpu)
100 + (g_addr - vgpu_aperture_offset(vgpu));
102 *h_addr = vgpu_hidden_gmadr_base(vgpu)
103 + (g_addr - vgpu_hidden_offset(vgpu));
107 /* translate a host gmadr to guest gmadr */
108 int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
110 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
112 if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
113 "invalid host gmadr %llx\n", h_addr))
116 if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
117 *g_addr = vgpu_aperture_gmadr_base(vgpu)
118 + (h_addr - gvt_aperture_gmadr_base(vgpu->gvt));
120 *g_addr = vgpu_hidden_gmadr_base(vgpu)
121 + (h_addr - gvt_hidden_gmadr_base(vgpu->gvt));
125 int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
126 unsigned long *h_index)
131 ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
136 *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
140 int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
141 unsigned long *g_index)
146 ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
151 *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
155 #define gtt_type_is_entry(type) \
156 (type > GTT_TYPE_INVALID && type < GTT_TYPE_PPGTT_ENTRY \
157 && type != GTT_TYPE_PPGTT_PTE_ENTRY \
158 && type != GTT_TYPE_PPGTT_ROOT_ENTRY)
160 #define gtt_type_is_pt(type) \
161 (type >= GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX)
163 #define gtt_type_is_pte_pt(type) \
164 (type == GTT_TYPE_PPGTT_PTE_PT)
166 #define gtt_type_is_root_pointer(type) \
167 (gtt_type_is_entry(type) && type > GTT_TYPE_PPGTT_ROOT_ENTRY)
169 #define gtt_init_entry(e, t, p, v) do { \
172 memcpy(&(e)->val64, &v, sizeof(v)); \
176 * Mappings between GTT_TYPE* enumerations.
177 * Following information can be found according to the given type:
178 * - type of next level page table
179 * - type of entry inside this level page table
180 * - type of entry with PSE set
182 * If the given type doesn't have such a kind of information,
183 * e.g. give a l4 root entry type, then request to get its PSE type,
184 * give a PTE page table type, then request to get its next level page
185 * table type, as we know l4 root entry doesn't have a PSE bit,
186 * and a PTE page table doesn't have a next level page table type,
187 * GTT_TYPE_INVALID will be returned. This is useful when traversing a
191 struct gtt_type_table_entry {
198 #define GTT_TYPE_TABLE_ENTRY(type, e_type, cpt_type, npt_type, pse_type) \
200 .entry_type = e_type, \
201 .pt_type = cpt_type, \
202 .next_pt_type = npt_type, \
203 .pse_entry_type = pse_type, \
206 static const struct gtt_type_table_entry gtt_type_table[] = {
207 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
208 GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
210 GTT_TYPE_PPGTT_PML4_PT,
212 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_PT,
213 GTT_TYPE_PPGTT_PML4_ENTRY,
214 GTT_TYPE_PPGTT_PML4_PT,
215 GTT_TYPE_PPGTT_PDP_PT,
217 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PML4_ENTRY,
218 GTT_TYPE_PPGTT_PML4_ENTRY,
219 GTT_TYPE_PPGTT_PML4_PT,
220 GTT_TYPE_PPGTT_PDP_PT,
222 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_PT,
223 GTT_TYPE_PPGTT_PDP_ENTRY,
224 GTT_TYPE_PPGTT_PDP_PT,
225 GTT_TYPE_PPGTT_PDE_PT,
226 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
227 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
228 GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
230 GTT_TYPE_PPGTT_PDE_PT,
231 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
232 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDP_ENTRY,
233 GTT_TYPE_PPGTT_PDP_ENTRY,
234 GTT_TYPE_PPGTT_PDP_PT,
235 GTT_TYPE_PPGTT_PDE_PT,
236 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
237 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_PT,
238 GTT_TYPE_PPGTT_PDE_ENTRY,
239 GTT_TYPE_PPGTT_PDE_PT,
240 GTT_TYPE_PPGTT_PTE_PT,
241 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
242 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PDE_ENTRY,
243 GTT_TYPE_PPGTT_PDE_ENTRY,
244 GTT_TYPE_PPGTT_PDE_PT,
245 GTT_TYPE_PPGTT_PTE_PT,
246 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
247 /* We take IPS bit as 'PSE' for PTE level. */
248 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_PT,
249 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
250 GTT_TYPE_PPGTT_PTE_PT,
252 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
253 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_4K_ENTRY,
254 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
255 GTT_TYPE_PPGTT_PTE_PT,
257 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
258 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_64K_ENTRY,
259 GTT_TYPE_PPGTT_PTE_4K_ENTRY,
260 GTT_TYPE_PPGTT_PTE_PT,
262 GTT_TYPE_PPGTT_PTE_64K_ENTRY),
263 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_2M_ENTRY,
264 GTT_TYPE_PPGTT_PDE_ENTRY,
265 GTT_TYPE_PPGTT_PDE_PT,
267 GTT_TYPE_PPGTT_PTE_2M_ENTRY),
268 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_PTE_1G_ENTRY,
269 GTT_TYPE_PPGTT_PDP_ENTRY,
270 GTT_TYPE_PPGTT_PDP_PT,
272 GTT_TYPE_PPGTT_PTE_1G_ENTRY),
273 GTT_TYPE_TABLE_ENTRY(GTT_TYPE_GGTT_PTE,
280 static inline int get_next_pt_type(int type)
282 return gtt_type_table[type].next_pt_type;
285 static inline int get_entry_type(int type)
287 return gtt_type_table[type].entry_type;
290 static inline int get_pse_type(int type)
292 return gtt_type_table[type].pse_entry_type;
295 static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
297 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
302 static void ggtt_invalidate(struct intel_gt *gt)
304 mmio_hw_access_pre(gt);
305 intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
306 mmio_hw_access_post(gt);
309 static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
311 void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
316 static inline int gtt_get_entry64(void *pt,
317 struct intel_gvt_gtt_entry *e,
318 unsigned long index, bool hypervisor_access, unsigned long gpa,
319 struct intel_vgpu *vgpu)
321 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
324 if (WARN_ON(info->gtt_entry_size != 8))
327 if (hypervisor_access) {
328 ret = intel_gvt_read_gpa(vgpu, gpa +
329 (index << info->gtt_entry_size_shift),
334 e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
336 e->val64 = *((u64 *)pt + index);
341 static inline int gtt_set_entry64(void *pt,
342 struct intel_gvt_gtt_entry *e,
343 unsigned long index, bool hypervisor_access, unsigned long gpa,
344 struct intel_vgpu *vgpu)
346 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
349 if (WARN_ON(info->gtt_entry_size != 8))
352 if (hypervisor_access) {
353 ret = intel_gvt_write_gpa(vgpu, gpa +
354 (index << info->gtt_entry_size_shift),
359 write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
361 *((u64 *)pt + index) = e->val64;
368 #define ADDR_1G_MASK GENMASK_ULL(GTT_HAW - 1, 30)
369 #define ADDR_2M_MASK GENMASK_ULL(GTT_HAW - 1, 21)
370 #define ADDR_64K_MASK GENMASK_ULL(GTT_HAW - 1, 16)
371 #define ADDR_4K_MASK GENMASK_ULL(GTT_HAW - 1, 12)
373 #define GTT_SPTE_FLAG_MASK GENMASK_ULL(62, 52)
374 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */
376 #define GTT_64K_PTE_STRIDE 16
378 static unsigned long gen8_gtt_get_pfn(struct intel_gvt_gtt_entry *e)
382 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY)
383 pfn = (e->val64 & ADDR_1G_MASK) >> PAGE_SHIFT;
384 else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY)
385 pfn = (e->val64 & ADDR_2M_MASK) >> PAGE_SHIFT;
386 else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY)
387 pfn = (e->val64 & ADDR_64K_MASK) >> PAGE_SHIFT;
389 pfn = (e->val64 & ADDR_4K_MASK) >> PAGE_SHIFT;
393 static void gen8_gtt_set_pfn(struct intel_gvt_gtt_entry *e, unsigned long pfn)
395 if (e->type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
396 e->val64 &= ~ADDR_1G_MASK;
397 pfn &= (ADDR_1G_MASK >> PAGE_SHIFT);
398 } else if (e->type == GTT_TYPE_PPGTT_PTE_2M_ENTRY) {
399 e->val64 &= ~ADDR_2M_MASK;
400 pfn &= (ADDR_2M_MASK >> PAGE_SHIFT);
401 } else if (e->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY) {
402 e->val64 &= ~ADDR_64K_MASK;
403 pfn &= (ADDR_64K_MASK >> PAGE_SHIFT);
405 e->val64 &= ~ADDR_4K_MASK;
406 pfn &= (ADDR_4K_MASK >> PAGE_SHIFT);
409 e->val64 |= (pfn << PAGE_SHIFT);
412 static bool gen8_gtt_test_pse(struct intel_gvt_gtt_entry *e)
414 return !!(e->val64 & _PAGE_PSE);
417 static void gen8_gtt_clear_pse(struct intel_gvt_gtt_entry *e)
419 if (gen8_gtt_test_pse(e)) {
421 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
422 e->val64 &= ~_PAGE_PSE;
423 e->type = GTT_TYPE_PPGTT_PDE_ENTRY;
425 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
426 e->type = GTT_TYPE_PPGTT_PDP_ENTRY;
427 e->val64 &= ~_PAGE_PSE;
435 static bool gen8_gtt_test_ips(struct intel_gvt_gtt_entry *e)
437 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
440 return !!(e->val64 & GEN8_PDE_IPS_64K);
443 static void gen8_gtt_clear_ips(struct intel_gvt_gtt_entry *e)
445 if (GEM_WARN_ON(e->type != GTT_TYPE_PPGTT_PDE_ENTRY))
448 e->val64 &= ~GEN8_PDE_IPS_64K;
451 static bool gen8_gtt_test_present(struct intel_gvt_gtt_entry *e)
454 * i915 writes PDP root pointer registers without present bit,
455 * it also works, so we need to treat root pointer entry
458 if (e->type == GTT_TYPE_PPGTT_ROOT_L3_ENTRY
459 || e->type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
460 return (e->val64 != 0);
462 return (e->val64 & GEN8_PAGE_PRESENT);
465 static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
467 e->val64 &= ~GEN8_PAGE_PRESENT;
470 static void gtt_entry_set_present(struct intel_gvt_gtt_entry *e)
472 e->val64 |= GEN8_PAGE_PRESENT;
475 static bool gen8_gtt_test_64k_splited(struct intel_gvt_gtt_entry *e)
477 return !!(e->val64 & GTT_SPTE_FLAG_64K_SPLITED);
480 static void gen8_gtt_set_64k_splited(struct intel_gvt_gtt_entry *e)
482 e->val64 |= GTT_SPTE_FLAG_64K_SPLITED;
485 static void gen8_gtt_clear_64k_splited(struct intel_gvt_gtt_entry *e)
487 e->val64 &= ~GTT_SPTE_FLAG_64K_SPLITED;
491 * Per-platform GMA routines.
493 static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
495 unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
497 trace_gma_index(__func__, gma, x);
501 #define DEFINE_PPGTT_GMA_TO_INDEX(prefix, ename, exp) \
502 static unsigned long prefix##_gma_to_##ename##_index(unsigned long gma) \
504 unsigned long x = (exp); \
505 trace_gma_index(__func__, gma, x); \
509 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pte, (gma >> 12 & 0x1ff));
510 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pde, (gma >> 21 & 0x1ff));
511 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3));
512 DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff));
513 DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff));
515 static const struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = {
516 .get_entry = gtt_get_entry64,
517 .set_entry = gtt_set_entry64,
518 .clear_present = gtt_entry_clear_present,
519 .set_present = gtt_entry_set_present,
520 .test_present = gen8_gtt_test_present,
521 .test_pse = gen8_gtt_test_pse,
522 .clear_pse = gen8_gtt_clear_pse,
523 .clear_ips = gen8_gtt_clear_ips,
524 .test_ips = gen8_gtt_test_ips,
525 .clear_64k_splited = gen8_gtt_clear_64k_splited,
526 .set_64k_splited = gen8_gtt_set_64k_splited,
527 .test_64k_splited = gen8_gtt_test_64k_splited,
528 .get_pfn = gen8_gtt_get_pfn,
529 .set_pfn = gen8_gtt_set_pfn,
532 static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = {
533 .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index,
534 .gma_to_pte_index = gen8_gma_to_pte_index,
535 .gma_to_pde_index = gen8_gma_to_pde_index,
536 .gma_to_l3_pdp_index = gen8_gma_to_l3_pdp_index,
537 .gma_to_l4_pdp_index = gen8_gma_to_l4_pdp_index,
538 .gma_to_pml4_index = gen8_gma_to_pml4_index,
541 /* Update entry type per pse and ips bit. */
542 static void update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops *pte_ops,
543 struct intel_gvt_gtt_entry *entry, bool ips)
545 switch (entry->type) {
546 case GTT_TYPE_PPGTT_PDE_ENTRY:
547 case GTT_TYPE_PPGTT_PDP_ENTRY:
548 if (pte_ops->test_pse(entry))
549 entry->type = get_pse_type(entry->type);
551 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
553 entry->type = get_pse_type(entry->type);
556 GEM_BUG_ON(!gtt_type_is_entry(entry->type));
559 GEM_BUG_ON(entry->type == GTT_TYPE_INVALID);
565 static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm,
566 struct intel_gvt_gtt_entry *entry, unsigned long index,
569 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
571 GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT);
573 entry->type = mm->ppgtt_mm.root_entry_type;
574 pte_ops->get_entry(guest ? mm->ppgtt_mm.guest_pdps :
575 mm->ppgtt_mm.shadow_pdps,
576 entry, index, false, 0, mm->vgpu);
577 update_entry_type_for_real(pte_ops, entry, false);
580 static inline void ppgtt_get_guest_root_entry(struct intel_vgpu_mm *mm,
581 struct intel_gvt_gtt_entry *entry, unsigned long index)
583 _ppgtt_get_root_entry(mm, entry, index, true);
586 static inline void ppgtt_get_shadow_root_entry(struct intel_vgpu_mm *mm,
587 struct intel_gvt_gtt_entry *entry, unsigned long index)
589 _ppgtt_get_root_entry(mm, entry, index, false);
592 static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm,
593 struct intel_gvt_gtt_entry *entry, unsigned long index,
596 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
598 pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps :
599 mm->ppgtt_mm.shadow_pdps,
600 entry, index, false, 0, mm->vgpu);
603 static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm,
604 struct intel_gvt_gtt_entry *entry, unsigned long index)
606 _ppgtt_set_root_entry(mm, entry, index, false);
609 static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm,
610 struct intel_gvt_gtt_entry *entry, unsigned long index)
612 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
614 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
616 entry->type = GTT_TYPE_GGTT_PTE;
617 pte_ops->get_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
621 static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm,
622 struct intel_gvt_gtt_entry *entry, unsigned long index)
624 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
626 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
628 pte_ops->set_entry(mm->ggtt_mm.virtual_ggtt, entry, index,
632 static void ggtt_get_host_entry(struct intel_vgpu_mm *mm,
633 struct intel_gvt_gtt_entry *entry, unsigned long index)
635 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
637 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
639 pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu);
642 static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
643 struct intel_gvt_gtt_entry *entry, unsigned long index)
645 const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
646 unsigned long offset = index;
648 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
650 if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
651 offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
652 mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64;
653 } else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
654 offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
655 mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64;
658 pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
662 * PPGTT shadow page table helpers.
664 static inline int ppgtt_spt_get_entry(
665 struct intel_vgpu_ppgtt_spt *spt,
666 void *page_table, int type,
667 struct intel_gvt_gtt_entry *e, unsigned long index,
670 struct intel_gvt *gvt = spt->vgpu->gvt;
671 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
674 e->type = get_entry_type(type);
676 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
679 ret = ops->get_entry(page_table, e, index, guest,
680 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
685 update_entry_type_for_real(ops, e, guest ?
686 spt->guest_page.pde_ips : false);
688 gvt_vdbg_mm("read ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
689 type, e->type, index, e->val64);
693 static inline int ppgtt_spt_set_entry(
694 struct intel_vgpu_ppgtt_spt *spt,
695 void *page_table, int type,
696 struct intel_gvt_gtt_entry *e, unsigned long index,
699 struct intel_gvt *gvt = spt->vgpu->gvt;
700 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
702 if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
705 gvt_vdbg_mm("set ppgtt entry, spt type %d, entry type %d, index %lu, value %llx\n",
706 type, e->type, index, e->val64);
708 return ops->set_entry(page_table, e, index, guest,
709 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
713 #define ppgtt_get_guest_entry(spt, e, index) \
714 ppgtt_spt_get_entry(spt, NULL, \
715 spt->guest_page.type, e, index, true)
717 #define ppgtt_set_guest_entry(spt, e, index) \
718 ppgtt_spt_set_entry(spt, NULL, \
719 spt->guest_page.type, e, index, true)
721 #define ppgtt_get_shadow_entry(spt, e, index) \
722 ppgtt_spt_get_entry(spt, spt->shadow_page.vaddr, \
723 spt->shadow_page.type, e, index, false)
725 #define ppgtt_set_shadow_entry(spt, e, index) \
726 ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
727 spt->shadow_page.type, e, index, false)
729 static void *alloc_spt(gfp_t gfp_mask)
731 struct intel_vgpu_ppgtt_spt *spt;
733 spt = kzalloc(sizeof(*spt), gfp_mask);
737 spt->shadow_page.page = alloc_page(gfp_mask);
738 if (!spt->shadow_page.page) {
745 static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
747 __free_page(spt->shadow_page.page);
751 static int detach_oos_page(struct intel_vgpu *vgpu,
752 struct intel_vgpu_oos_page *oos_page);
754 static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
756 struct device *kdev = spt->vgpu->gvt->gt->i915->drm.dev;
758 trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
760 dma_unmap_page(kdev, spt->shadow_page.mfn << I915_GTT_PAGE_SHIFT, 4096,
763 radix_tree_delete(&spt->vgpu->gtt.spt_tree, spt->shadow_page.mfn);
765 if (spt->guest_page.gfn) {
766 if (spt->guest_page.oos_page)
767 detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
769 intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
772 list_del_init(&spt->post_shadow_list);
776 static void ppgtt_free_all_spt(struct intel_vgpu *vgpu)
778 struct intel_vgpu_ppgtt_spt *spt, *spn;
779 struct radix_tree_iter iter;
784 radix_tree_for_each_slot(slot, &vgpu->gtt.spt_tree, &iter, 0) {
785 spt = radix_tree_deref_slot(slot);
786 list_move(&spt->post_shadow_list, &all_spt);
790 list_for_each_entry_safe(spt, spn, &all_spt, post_shadow_list)
794 static int ppgtt_handle_guest_write_page_table_bytes(
795 struct intel_vgpu_ppgtt_spt *spt,
796 u64 pa, void *p_data, int bytes);
798 static int ppgtt_write_protection_handler(
799 struct intel_vgpu_page_track *page_track,
800 u64 gpa, void *data, int bytes)
802 struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
806 if (bytes != 4 && bytes != 8)
809 ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
815 /* Find a spt by guest gfn. */
816 static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
817 struct intel_vgpu *vgpu, unsigned long gfn)
819 struct intel_vgpu_page_track *track;
821 track = intel_vgpu_find_page_track(vgpu, gfn);
822 if (track && track->handler == ppgtt_write_protection_handler)
823 return track->priv_data;
828 /* Find the spt by shadow page mfn. */
829 static inline struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_mfn(
830 struct intel_vgpu *vgpu, unsigned long mfn)
832 return radix_tree_lookup(&vgpu->gtt.spt_tree, mfn);
835 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
837 /* Allocate shadow page table without guest page. */
838 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
839 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
841 struct device *kdev = vgpu->gvt->gt->i915->drm.dev;
842 struct intel_vgpu_ppgtt_spt *spt = NULL;
847 spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
849 if (reclaim_one_ppgtt_mm(vgpu->gvt))
852 gvt_vgpu_err("fail to allocate ppgtt shadow page\n");
853 return ERR_PTR(-ENOMEM);
857 atomic_set(&spt->refcount, 1);
858 INIT_LIST_HEAD(&spt->post_shadow_list);
863 spt->shadow_page.type = type;
864 daddr = dma_map_page(kdev, spt->shadow_page.page,
865 0, 4096, DMA_BIDIRECTIONAL);
866 if (dma_mapping_error(kdev, daddr)) {
867 gvt_vgpu_err("fail to map dma addr\n");
871 spt->shadow_page.vaddr = page_address(spt->shadow_page.page);
872 spt->shadow_page.mfn = daddr >> I915_GTT_PAGE_SHIFT;
874 ret = radix_tree_insert(&vgpu->gtt.spt_tree, spt->shadow_page.mfn, spt);
881 dma_unmap_page(kdev, daddr, PAGE_SIZE, DMA_BIDIRECTIONAL);
887 /* Allocate shadow page table associated with specific gfn. */
888 static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt_gfn(
889 struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type,
890 unsigned long gfn, bool guest_pde_ips)
892 struct intel_vgpu_ppgtt_spt *spt;
895 spt = ppgtt_alloc_spt(vgpu, type);
902 ret = intel_vgpu_register_page_track(vgpu, gfn,
903 ppgtt_write_protection_handler, spt);
909 spt->guest_page.type = type;
910 spt->guest_page.gfn = gfn;
911 spt->guest_page.pde_ips = guest_pde_ips;
913 trace_spt_alloc(vgpu->id, spt, type, spt->shadow_page.mfn, gfn);
918 #define pt_entry_size_shift(spt) \
919 ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
921 #define pt_entries(spt) \
922 (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
924 #define for_each_present_guest_entry(spt, e, i) \
925 for (i = 0; i < pt_entries(spt); \
926 i += spt->guest_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
927 if (!ppgtt_get_guest_entry(spt, e, i) && \
928 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
930 #define for_each_present_shadow_entry(spt, e, i) \
931 for (i = 0; i < pt_entries(spt); \
932 i += spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1) \
933 if (!ppgtt_get_shadow_entry(spt, e, i) && \
934 spt->vgpu->gvt->gtt.pte_ops->test_present(e))
936 #define for_each_shadow_entry(spt, e, i) \
937 for (i = 0; i < pt_entries(spt); \
938 i += (spt->shadow_page.pde_ips ? GTT_64K_PTE_STRIDE : 1)) \
939 if (!ppgtt_get_shadow_entry(spt, e, i))
941 static inline void ppgtt_get_spt(struct intel_vgpu_ppgtt_spt *spt)
943 int v = atomic_read(&spt->refcount);
945 trace_spt_refcount(spt->vgpu->id, "inc", spt, v, (v + 1));
946 atomic_inc(&spt->refcount);
949 static inline int ppgtt_put_spt(struct intel_vgpu_ppgtt_spt *spt)
951 int v = atomic_read(&spt->refcount);
953 trace_spt_refcount(spt->vgpu->id, "dec", spt, v, (v - 1));
954 return atomic_dec_return(&spt->refcount);
957 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
959 static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
960 struct intel_gvt_gtt_entry *e)
962 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
963 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
964 struct intel_vgpu_ppgtt_spt *s;
965 enum intel_gvt_gtt_type cur_pt_type;
967 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(e->type)));
969 if (e->type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY
970 && e->type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY) {
971 cur_pt_type = get_next_pt_type(e->type);
973 if (!gtt_type_is_pt(cur_pt_type) ||
974 !gtt_type_is_pt(cur_pt_type + 1)) {
975 drm_WARN(&i915->drm, 1,
976 "Invalid page table type, cur_pt_type is: %d\n",
983 if (ops->get_pfn(e) ==
984 vgpu->gtt.scratch_pt[cur_pt_type].page_mfn)
987 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
989 gvt_vgpu_err("fail to find shadow page: mfn: 0x%lx\n",
993 return ppgtt_invalidate_spt(s);
996 static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt,
997 struct intel_gvt_gtt_entry *entry)
999 struct intel_vgpu *vgpu = spt->vgpu;
1000 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1004 pfn = ops->get_pfn(entry);
1005 type = spt->shadow_page.type;
1007 /* Uninitialized spte or unshadowed spte. */
1008 if (!pfn || pfn == vgpu->gtt.scratch_pt[type].page_mfn)
1011 intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
1014 static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt)
1016 struct intel_vgpu *vgpu = spt->vgpu;
1017 struct intel_gvt_gtt_entry e;
1018 unsigned long index;
1021 trace_spt_change(spt->vgpu->id, "die", spt,
1022 spt->guest_page.gfn, spt->shadow_page.type);
1024 if (ppgtt_put_spt(spt) > 0)
1027 for_each_present_shadow_entry(spt, &e, index) {
1029 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1030 gvt_vdbg_mm("invalidate 4K entry\n");
1031 ppgtt_invalidate_pte(spt, &e);
1033 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1034 /* We don't setup 64K shadow entry so far. */
1035 WARN(1, "suspicious 64K gtt entry\n");
1037 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1038 gvt_vdbg_mm("invalidate 2M entry\n");
1040 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1041 WARN(1, "GVT doesn't support 1GB page\n");
1043 case GTT_TYPE_PPGTT_PML4_ENTRY:
1044 case GTT_TYPE_PPGTT_PDP_ENTRY:
1045 case GTT_TYPE_PPGTT_PDE_ENTRY:
1046 gvt_vdbg_mm("invalidate PMUL4/PDP/PDE entry\n");
1047 ret = ppgtt_invalidate_spt_by_shadow_entry(
1057 trace_spt_change(spt->vgpu->id, "release", spt,
1058 spt->guest_page.gfn, spt->shadow_page.type);
1059 ppgtt_free_spt(spt);
1062 gvt_vgpu_err("fail: shadow page %p shadow entry 0x%llx type %d\n",
1063 spt, e.val64, e.type);
1067 static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
1069 struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
1071 if (GRAPHICS_VER(dev_priv) == 9) {
1072 u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
1073 GAMW_ECO_ENABLE_64K_IPS_FIELD;
1075 return ips == GAMW_ECO_ENABLE_64K_IPS_FIELD;
1076 } else if (GRAPHICS_VER(dev_priv) >= 11) {
1077 /* 64K paging only controlled by IPS bit in PTE now. */
1083 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt);
1085 static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
1086 struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we)
1088 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1089 struct intel_vgpu_ppgtt_spt *spt = NULL;
1093 GEM_BUG_ON(!gtt_type_is_pt(get_next_pt_type(we->type)));
1095 if (we->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1096 ips = vgpu_ips_enabled(vgpu) && ops->test_ips(we);
1098 spt = intel_vgpu_find_spt_by_gfn(vgpu, ops->get_pfn(we));
1102 if (ips != spt->guest_page.pde_ips) {
1103 spt->guest_page.pde_ips = ips;
1105 gvt_dbg_mm("reshadow PDE since ips changed\n");
1106 clear_page(spt->shadow_page.vaddr);
1107 ret = ppgtt_populate_spt(spt);
1114 int type = get_next_pt_type(we->type);
1116 if (!gtt_type_is_pt(type)) {
1121 spt = ppgtt_alloc_spt_gfn(vgpu, type, ops->get_pfn(we), ips);
1127 ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
1131 ret = ppgtt_populate_spt(spt);
1135 trace_spt_change(vgpu->id, "new", spt, spt->guest_page.gfn,
1136 spt->shadow_page.type);
1141 ppgtt_free_spt(spt);
1144 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1145 spt, we->val64, we->type);
1146 return ERR_PTR(ret);
1149 static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se,
1150 struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge)
1152 const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops;
1154 se->type = ge->type;
1155 se->val64 = ge->val64;
1157 /* Because we always split 64KB pages, so clear IPS in shadow PDE. */
1158 if (se->type == GTT_TYPE_PPGTT_PDE_ENTRY)
1161 ops->set_pfn(se, s->shadow_page.mfn);
1165 * Check if can do 2M page
1166 * @vgpu: target vgpu
1167 * @entry: target pfn's gtt entry
1169 * Return 1 if 2MB huge gtt shadowing is possible, 0 if miscondition,
1170 * negative if found err.
1172 static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
1173 struct intel_gvt_gtt_entry *entry)
1175 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1178 if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
1181 if (!test_bit(INTEL_VGPU_STATUS_ATTACHED, vgpu->status))
1183 pfn = gfn_to_pfn(vgpu->vfio_device.kvm, ops->get_pfn(entry));
1184 if (is_error_noslot_pfn(pfn))
1186 return PageTransHuge(pfn_to_page(pfn));
1189 static int split_2MB_gtt_entry(struct intel_vgpu *vgpu,
1190 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1191 struct intel_gvt_gtt_entry *se)
1193 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1194 struct intel_vgpu_ppgtt_spt *sub_spt;
1195 struct intel_gvt_gtt_entry sub_se;
1196 unsigned long start_gfn;
1197 dma_addr_t dma_addr;
1198 unsigned long sub_index;
1201 gvt_dbg_mm("Split 2M gtt entry, index %lu\n", index);
1203 start_gfn = ops->get_pfn(se);
1205 sub_spt = ppgtt_alloc_spt(vgpu, GTT_TYPE_PPGTT_PTE_PT);
1206 if (IS_ERR(sub_spt))
1207 return PTR_ERR(sub_spt);
1209 for_each_shadow_entry(sub_spt, &sub_se, sub_index) {
1210 ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + sub_index,
1211 PAGE_SIZE, &dma_addr);
1214 sub_se.val64 = se->val64;
1216 /* Copy the PAT field from PDE. */
1217 sub_se.val64 &= ~_PAGE_PAT;
1218 sub_se.val64 |= (se->val64 & _PAGE_PAT_LARGE) >> 5;
1220 ops->set_pfn(&sub_se, dma_addr >> PAGE_SHIFT);
1221 ppgtt_set_shadow_entry(sub_spt, &sub_se, sub_index);
1224 /* Clear dirty field. */
1225 se->val64 &= ~_PAGE_DIRTY;
1229 ops->set_pfn(se, sub_spt->shadow_page.mfn);
1230 ppgtt_set_shadow_entry(spt, se, index);
1233 /* Cancel the existing addess mappings of DMA addr. */
1234 for_each_present_shadow_entry(sub_spt, &sub_se, sub_index) {
1235 gvt_vdbg_mm("invalidate 4K entry\n");
1236 ppgtt_invalidate_pte(sub_spt, &sub_se);
1238 /* Release the new allocated spt. */
1239 trace_spt_change(sub_spt->vgpu->id, "release", sub_spt,
1240 sub_spt->guest_page.gfn, sub_spt->shadow_page.type);
1241 ppgtt_free_spt(sub_spt);
1245 static int split_64KB_gtt_entry(struct intel_vgpu *vgpu,
1246 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1247 struct intel_gvt_gtt_entry *se)
1249 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1250 struct intel_gvt_gtt_entry entry = *se;
1251 unsigned long start_gfn;
1252 dma_addr_t dma_addr;
1255 gvt_vdbg_mm("Split 64K gtt entry, index %lu\n", index);
1257 GEM_BUG_ON(index % GTT_64K_PTE_STRIDE);
1259 start_gfn = ops->get_pfn(se);
1261 entry.type = GTT_TYPE_PPGTT_PTE_4K_ENTRY;
1262 ops->set_64k_splited(&entry);
1264 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1265 ret = intel_gvt_dma_map_guest_page(vgpu, start_gfn + i,
1266 PAGE_SIZE, &dma_addr);
1270 ops->set_pfn(&entry, dma_addr >> PAGE_SHIFT);
1271 ppgtt_set_shadow_entry(spt, &entry, index + i);
1276 static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu,
1277 struct intel_vgpu_ppgtt_spt *spt, unsigned long index,
1278 struct intel_gvt_gtt_entry *ge)
1280 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
1281 struct intel_gvt_gtt_entry se = *ge;
1282 unsigned long gfn, page_size = PAGE_SIZE;
1283 dma_addr_t dma_addr;
1286 if (!pte_ops->test_present(ge))
1289 gfn = pte_ops->get_pfn(ge);
1292 case GTT_TYPE_PPGTT_PTE_4K_ENTRY:
1293 gvt_vdbg_mm("shadow 4K gtt entry\n");
1295 case GTT_TYPE_PPGTT_PTE_64K_ENTRY:
1296 gvt_vdbg_mm("shadow 64K gtt entry\n");
1298 * The layout of 64K page is special, the page size is
1299 * controlled by uper PDE. To be simple, we always split
1300 * 64K page to smaller 4K pages in shadow PT.
1302 return split_64KB_gtt_entry(vgpu, spt, index, &se);
1303 case GTT_TYPE_PPGTT_PTE_2M_ENTRY:
1304 gvt_vdbg_mm("shadow 2M gtt entry\n");
1305 ret = is_2MB_gtt_possible(vgpu, ge);
1307 return split_2MB_gtt_entry(vgpu, spt, index, &se);
1310 page_size = I915_GTT_PAGE_SIZE_2M;
1312 case GTT_TYPE_PPGTT_PTE_1G_ENTRY:
1313 gvt_vgpu_err("GVT doesn't support 1GB entry\n");
1320 ret = intel_gvt_dma_map_guest_page(vgpu, gfn, page_size, &dma_addr);
1324 pte_ops->set_pfn(&se, dma_addr >> PAGE_SHIFT);
1325 ppgtt_set_shadow_entry(spt, &se, index);
1329 static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
1331 struct intel_vgpu *vgpu = spt->vgpu;
1332 struct intel_gvt *gvt = vgpu->gvt;
1333 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1334 struct intel_vgpu_ppgtt_spt *s;
1335 struct intel_gvt_gtt_entry se, ge;
1336 unsigned long gfn, i;
1339 trace_spt_change(spt->vgpu->id, "born", spt,
1340 spt->guest_page.gfn, spt->shadow_page.type);
1342 for_each_present_guest_entry(spt, &ge, i) {
1343 if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
1344 s = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1349 ppgtt_get_shadow_entry(spt, &se, i);
1350 ppgtt_generate_shadow_entry(&se, s, &ge);
1351 ppgtt_set_shadow_entry(spt, &se, i);
1353 gfn = ops->get_pfn(&ge);
1354 if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
1355 ops->set_pfn(&se, gvt->gtt.scratch_mfn);
1356 ppgtt_set_shadow_entry(spt, &se, i);
1360 ret = ppgtt_populate_shadow_entry(vgpu, spt, i, &ge);
1367 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1368 spt, ge.val64, ge.type);
1372 static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt,
1373 struct intel_gvt_gtt_entry *se, unsigned long index)
1375 struct intel_vgpu *vgpu = spt->vgpu;
1376 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1379 trace_spt_guest_change(spt->vgpu->id, "remove", spt,
1380 spt->shadow_page.type, se->val64, index);
1382 gvt_vdbg_mm("destroy old shadow entry, type %d, index %lu, value %llx\n",
1383 se->type, index, se->val64);
1385 if (!ops->test_present(se))
1388 if (ops->get_pfn(se) ==
1389 vgpu->gtt.scratch_pt[spt->shadow_page.type].page_mfn)
1392 if (gtt_type_is_pt(get_next_pt_type(se->type))) {
1393 struct intel_vgpu_ppgtt_spt *s =
1394 intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(se));
1396 gvt_vgpu_err("fail to find guest page\n");
1400 ret = ppgtt_invalidate_spt(s);
1404 /* We don't setup 64K shadow entry so far. */
1405 WARN(se->type == GTT_TYPE_PPGTT_PTE_64K_ENTRY,
1406 "suspicious 64K entry\n");
1407 ppgtt_invalidate_pte(spt, se);
1412 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d\n",
1413 spt, se->val64, se->type);
1417 static int ppgtt_handle_guest_entry_add(struct intel_vgpu_ppgtt_spt *spt,
1418 struct intel_gvt_gtt_entry *we, unsigned long index)
1420 struct intel_vgpu *vgpu = spt->vgpu;
1421 struct intel_gvt_gtt_entry m;
1422 struct intel_vgpu_ppgtt_spt *s;
1425 trace_spt_guest_change(spt->vgpu->id, "add", spt, spt->shadow_page.type,
1428 gvt_vdbg_mm("add shadow entry: type %d, index %lu, value %llx\n",
1429 we->type, index, we->val64);
1431 if (gtt_type_is_pt(get_next_pt_type(we->type))) {
1432 s = ppgtt_populate_spt_by_guest_entry(vgpu, we);
1437 ppgtt_get_shadow_entry(spt, &m, index);
1438 ppgtt_generate_shadow_entry(&m, s, we);
1439 ppgtt_set_shadow_entry(spt, &m, index);
1441 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, we);
1447 gvt_vgpu_err("fail: spt %p guest entry 0x%llx type %d\n",
1448 spt, we->val64, we->type);
1452 static int sync_oos_page(struct intel_vgpu *vgpu,
1453 struct intel_vgpu_oos_page *oos_page)
1455 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1456 struct intel_gvt *gvt = vgpu->gvt;
1457 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
1458 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1459 struct intel_gvt_gtt_entry old, new;
1463 trace_oos_change(vgpu->id, "sync", oos_page->id,
1464 spt, spt->guest_page.type);
1466 old.type = new.type = get_entry_type(spt->guest_page.type);
1467 old.val64 = new.val64 = 0;
1469 for (index = 0; index < (I915_GTT_PAGE_SIZE >>
1470 info->gtt_entry_size_shift); index++) {
1471 ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
1472 ops->get_entry(NULL, &new, index, true,
1473 spt->guest_page.gfn << PAGE_SHIFT, vgpu);
1475 if (old.val64 == new.val64
1476 && !test_and_clear_bit(index, spt->post_shadow_bitmap))
1479 trace_oos_sync(vgpu->id, oos_page->id,
1480 spt, spt->guest_page.type,
1483 ret = ppgtt_populate_shadow_entry(vgpu, spt, index, &new);
1487 ops->set_entry(oos_page->mem, &new, index, false, 0, vgpu);
1490 spt->guest_page.write_cnt = 0;
1491 list_del_init(&spt->post_shadow_list);
1495 static int detach_oos_page(struct intel_vgpu *vgpu,
1496 struct intel_vgpu_oos_page *oos_page)
1498 struct intel_gvt *gvt = vgpu->gvt;
1499 struct intel_vgpu_ppgtt_spt *spt = oos_page->spt;
1501 trace_oos_change(vgpu->id, "detach", oos_page->id,
1502 spt, spt->guest_page.type);
1504 spt->guest_page.write_cnt = 0;
1505 spt->guest_page.oos_page = NULL;
1506 oos_page->spt = NULL;
1508 list_del_init(&oos_page->vm_list);
1509 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_free_list_head);
1514 static int attach_oos_page(struct intel_vgpu_oos_page *oos_page,
1515 struct intel_vgpu_ppgtt_spt *spt)
1517 struct intel_gvt *gvt = spt->vgpu->gvt;
1520 ret = intel_gvt_read_gpa(spt->vgpu,
1521 spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
1522 oos_page->mem, I915_GTT_PAGE_SIZE);
1526 oos_page->spt = spt;
1527 spt->guest_page.oos_page = oos_page;
1529 list_move_tail(&oos_page->list, &gvt->gtt.oos_page_use_list_head);
1531 trace_oos_change(spt->vgpu->id, "attach", oos_page->id,
1532 spt, spt->guest_page.type);
1536 static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
1538 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1541 ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
1545 trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
1546 spt, spt->guest_page.type);
1548 list_del_init(&oos_page->vm_list);
1549 return sync_oos_page(spt->vgpu, oos_page);
1552 static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
1554 struct intel_gvt *gvt = spt->vgpu->gvt;
1555 struct intel_gvt_gtt *gtt = &gvt->gtt;
1556 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1559 WARN(oos_page, "shadow PPGTT page has already has a oos page\n");
1561 if (list_empty(>t->oos_page_free_list_head)) {
1562 oos_page = container_of(gtt->oos_page_use_list_head.next,
1563 struct intel_vgpu_oos_page, list);
1564 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1567 ret = detach_oos_page(spt->vgpu, oos_page);
1571 oos_page = container_of(gtt->oos_page_free_list_head.next,
1572 struct intel_vgpu_oos_page, list);
1573 return attach_oos_page(oos_page, spt);
1576 static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
1578 struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
1580 if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
1583 trace_oos_change(spt->vgpu->id, "set page out of sync", oos_page->id,
1584 spt, spt->guest_page.type);
1586 list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
1587 return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
1591 * intel_vgpu_sync_oos_pages - sync all the out-of-synced shadow for vGPU
1594 * This function is called before submitting a guest workload to host,
1595 * to sync all the out-of-synced shadow for vGPU
1598 * Zero on success, negative error code if failed.
1600 int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu)
1602 struct list_head *pos, *n;
1603 struct intel_vgpu_oos_page *oos_page;
1606 if (!enable_out_of_sync)
1609 list_for_each_safe(pos, n, &vgpu->gtt.oos_page_list_head) {
1610 oos_page = container_of(pos,
1611 struct intel_vgpu_oos_page, vm_list);
1612 ret = ppgtt_set_guest_page_sync(oos_page->spt);
1620 * The heart of PPGTT shadow page table.
1622 static int ppgtt_handle_guest_write_page_table(
1623 struct intel_vgpu_ppgtt_spt *spt,
1624 struct intel_gvt_gtt_entry *we, unsigned long index)
1626 struct intel_vgpu *vgpu = spt->vgpu;
1627 int type = spt->shadow_page.type;
1628 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1629 struct intel_gvt_gtt_entry old_se;
1633 new_present = ops->test_present(we);
1636 * Adding the new entry first and then removing the old one, that can
1637 * guarantee the ppgtt table is validated during the window between
1638 * adding and removal.
1640 ppgtt_get_shadow_entry(spt, &old_se, index);
1643 ret = ppgtt_handle_guest_entry_add(spt, we, index);
1648 ret = ppgtt_handle_guest_entry_removal(spt, &old_se, index);
1653 /* For 64KB splited entries, we need clear them all. */
1654 if (ops->test_64k_splited(&old_se) &&
1655 !(index % GTT_64K_PTE_STRIDE)) {
1656 gvt_vdbg_mm("remove splited 64K shadow entries\n");
1657 for (i = 0; i < GTT_64K_PTE_STRIDE; i++) {
1658 ops->clear_64k_splited(&old_se);
1659 ops->set_pfn(&old_se,
1660 vgpu->gtt.scratch_pt[type].page_mfn);
1661 ppgtt_set_shadow_entry(spt, &old_se, index + i);
1663 } else if (old_se.type == GTT_TYPE_PPGTT_PTE_2M_ENTRY ||
1664 old_se.type == GTT_TYPE_PPGTT_PTE_1G_ENTRY) {
1665 ops->clear_pse(&old_se);
1666 ops->set_pfn(&old_se,
1667 vgpu->gtt.scratch_pt[type].page_mfn);
1668 ppgtt_set_shadow_entry(spt, &old_se, index);
1670 ops->set_pfn(&old_se,
1671 vgpu->gtt.scratch_pt[type].page_mfn);
1672 ppgtt_set_shadow_entry(spt, &old_se, index);
1678 gvt_vgpu_err("fail: shadow page %p guest entry 0x%llx type %d.\n",
1679 spt, we->val64, we->type);
1685 static inline bool can_do_out_of_sync(struct intel_vgpu_ppgtt_spt *spt)
1687 return enable_out_of_sync
1688 && gtt_type_is_pte_pt(spt->guest_page.type)
1689 && spt->guest_page.write_cnt >= 2;
1692 static void ppgtt_set_post_shadow(struct intel_vgpu_ppgtt_spt *spt,
1693 unsigned long index)
1695 set_bit(index, spt->post_shadow_bitmap);
1696 if (!list_empty(&spt->post_shadow_list))
1699 list_add_tail(&spt->post_shadow_list,
1700 &spt->vgpu->gtt.post_shadow_list_head);
1704 * intel_vgpu_flush_post_shadow - flush the post shadow transactions
1707 * This function is called before submitting a guest workload to host,
1708 * to flush all the post shadows for a vGPU.
1711 * Zero on success, negative error code if failed.
1713 int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
1715 struct list_head *pos, *n;
1716 struct intel_vgpu_ppgtt_spt *spt;
1717 struct intel_gvt_gtt_entry ge;
1718 unsigned long index;
1721 list_for_each_safe(pos, n, &vgpu->gtt.post_shadow_list_head) {
1722 spt = container_of(pos, struct intel_vgpu_ppgtt_spt,
1725 for_each_set_bit(index, spt->post_shadow_bitmap,
1726 GTT_ENTRY_NUM_IN_ONE_PAGE) {
1727 ppgtt_get_guest_entry(spt, &ge, index);
1729 ret = ppgtt_handle_guest_write_page_table(spt,
1733 clear_bit(index, spt->post_shadow_bitmap);
1735 list_del_init(&spt->post_shadow_list);
1740 static int ppgtt_handle_guest_write_page_table_bytes(
1741 struct intel_vgpu_ppgtt_spt *spt,
1742 u64 pa, void *p_data, int bytes)
1744 struct intel_vgpu *vgpu = spt->vgpu;
1745 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
1746 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
1747 struct intel_gvt_gtt_entry we, se;
1748 unsigned long index;
1751 index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
1753 ppgtt_get_guest_entry(spt, &we, index);
1756 * For page table which has 64K gtt entry, only PTE#0, PTE#16,
1757 * PTE#32, ... PTE#496 are used. Unused PTEs update should be
1760 if (we.type == GTT_TYPE_PPGTT_PTE_64K_ENTRY &&
1761 (index % GTT_64K_PTE_STRIDE)) {
1762 gvt_vdbg_mm("Ignore write to unused PTE entry, index %lu\n",
1767 if (bytes == info->gtt_entry_size) {
1768 ret = ppgtt_handle_guest_write_page_table(spt, &we, index);
1772 if (!test_bit(index, spt->post_shadow_bitmap)) {
1773 int type = spt->shadow_page.type;
1775 ppgtt_get_shadow_entry(spt, &se, index);
1776 ret = ppgtt_handle_guest_entry_removal(spt, &se, index);
1779 ops->set_pfn(&se, vgpu->gtt.scratch_pt[type].page_mfn);
1780 ppgtt_set_shadow_entry(spt, &se, index);
1782 ppgtt_set_post_shadow(spt, index);
1785 if (!enable_out_of_sync)
1788 spt->guest_page.write_cnt++;
1790 if (spt->guest_page.oos_page)
1791 ops->set_entry(spt->guest_page.oos_page->mem, &we, index,
1794 if (can_do_out_of_sync(spt)) {
1795 if (!spt->guest_page.oos_page)
1796 ppgtt_allocate_oos_page(spt);
1798 ret = ppgtt_set_guest_page_oos(spt);
1805 static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm)
1807 struct intel_vgpu *vgpu = mm->vgpu;
1808 struct intel_gvt *gvt = vgpu->gvt;
1809 struct intel_gvt_gtt *gtt = &gvt->gtt;
1810 const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1811 struct intel_gvt_gtt_entry se;
1814 if (!mm->ppgtt_mm.shadowed)
1817 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.shadow_pdps); index++) {
1818 ppgtt_get_shadow_root_entry(mm, &se, index);
1820 if (!ops->test_present(&se))
1823 ppgtt_invalidate_spt_by_shadow_entry(vgpu, &se);
1825 ppgtt_set_shadow_root_entry(mm, &se, index);
1827 trace_spt_guest_change(vgpu->id, "destroy root pointer",
1828 NULL, se.type, se.val64, index);
1831 mm->ppgtt_mm.shadowed = false;
1835 static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm)
1837 struct intel_vgpu *vgpu = mm->vgpu;
1838 struct intel_gvt *gvt = vgpu->gvt;
1839 struct intel_gvt_gtt *gtt = &gvt->gtt;
1840 const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops;
1841 struct intel_vgpu_ppgtt_spt *spt;
1842 struct intel_gvt_gtt_entry ge, se;
1845 if (mm->ppgtt_mm.shadowed)
1848 mm->ppgtt_mm.shadowed = true;
1850 for (index = 0; index < ARRAY_SIZE(mm->ppgtt_mm.guest_pdps); index++) {
1851 ppgtt_get_guest_root_entry(mm, &ge, index);
1853 if (!ops->test_present(&ge))
1856 trace_spt_guest_change(vgpu->id, __func__, NULL,
1857 ge.type, ge.val64, index);
1859 spt = ppgtt_populate_spt_by_guest_entry(vgpu, &ge);
1861 gvt_vgpu_err("fail to populate guest root pointer\n");
1865 ppgtt_generate_shadow_entry(&se, spt, &ge);
1866 ppgtt_set_shadow_root_entry(mm, &se, index);
1868 trace_spt_guest_change(vgpu->id, "populate root pointer",
1869 NULL, se.type, se.val64, index);
1874 invalidate_ppgtt_mm(mm);
1878 static struct intel_vgpu_mm *vgpu_alloc_mm(struct intel_vgpu *vgpu)
1880 struct intel_vgpu_mm *mm;
1882 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
1887 kref_init(&mm->ref);
1888 atomic_set(&mm->pincount, 0);
1893 static void vgpu_free_mm(struct intel_vgpu_mm *mm)
1899 * intel_vgpu_create_ppgtt_mm - create a ppgtt mm object for a vGPU
1901 * @root_entry_type: ppgtt root entry type
1902 * @pdps: guest pdps.
1904 * This function is used to create a ppgtt mm object for a vGPU.
1907 * Zero on success, negative error code in pointer if failed.
1909 struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
1910 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
1912 struct intel_gvt *gvt = vgpu->gvt;
1913 struct intel_vgpu_mm *mm;
1916 mm = vgpu_alloc_mm(vgpu);
1918 return ERR_PTR(-ENOMEM);
1920 mm->type = INTEL_GVT_MM_PPGTT;
1922 GEM_BUG_ON(root_entry_type != GTT_TYPE_PPGTT_ROOT_L3_ENTRY &&
1923 root_entry_type != GTT_TYPE_PPGTT_ROOT_L4_ENTRY);
1924 mm->ppgtt_mm.root_entry_type = root_entry_type;
1926 INIT_LIST_HEAD(&mm->ppgtt_mm.list);
1927 INIT_LIST_HEAD(&mm->ppgtt_mm.lru_list);
1928 INIT_LIST_HEAD(&mm->ppgtt_mm.link);
1930 if (root_entry_type == GTT_TYPE_PPGTT_ROOT_L4_ENTRY)
1931 mm->ppgtt_mm.guest_pdps[0] = pdps[0];
1933 memcpy(mm->ppgtt_mm.guest_pdps, pdps,
1934 sizeof(mm->ppgtt_mm.guest_pdps));
1936 ret = shadow_ppgtt_mm(mm);
1938 gvt_vgpu_err("failed to shadow ppgtt mm\n");
1940 return ERR_PTR(ret);
1943 list_add_tail(&mm->ppgtt_mm.list, &vgpu->gtt.ppgtt_mm_list_head);
1945 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
1946 list_add_tail(&mm->ppgtt_mm.lru_list, &gvt->gtt.ppgtt_mm_lru_list_head);
1947 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
1952 static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
1954 struct intel_vgpu_mm *mm;
1955 unsigned long nr_entries;
1957 mm = vgpu_alloc_mm(vgpu);
1959 return ERR_PTR(-ENOMEM);
1961 mm->type = INTEL_GVT_MM_GGTT;
1963 nr_entries = gvt_ggtt_gm_sz(vgpu->gvt) >> I915_GTT_PAGE_SHIFT;
1964 mm->ggtt_mm.virtual_ggtt =
1965 vzalloc(array_size(nr_entries,
1966 vgpu->gvt->device_info.gtt_entry_size));
1967 if (!mm->ggtt_mm.virtual_ggtt) {
1969 return ERR_PTR(-ENOMEM);
1972 mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1973 if (!mm->ggtt_mm.host_ggtt_aperture) {
1974 vfree(mm->ggtt_mm.virtual_ggtt);
1976 return ERR_PTR(-ENOMEM);
1979 mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
1980 if (!mm->ggtt_mm.host_ggtt_hidden) {
1981 vfree(mm->ggtt_mm.host_ggtt_aperture);
1982 vfree(mm->ggtt_mm.virtual_ggtt);
1984 return ERR_PTR(-ENOMEM);
1991 * _intel_vgpu_mm_release - destroy a mm object
1992 * @mm_ref: a kref object
1994 * This function is used to destroy a mm object for vGPU
1997 void _intel_vgpu_mm_release(struct kref *mm_ref)
1999 struct intel_vgpu_mm *mm = container_of(mm_ref, typeof(*mm), ref);
2001 if (GEM_WARN_ON(atomic_read(&mm->pincount)))
2002 gvt_err("vgpu mm pin count bug detected\n");
2004 if (mm->type == INTEL_GVT_MM_PPGTT) {
2005 list_del(&mm->ppgtt_mm.list);
2007 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2008 list_del(&mm->ppgtt_mm.lru_list);
2009 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2011 invalidate_ppgtt_mm(mm);
2013 vfree(mm->ggtt_mm.virtual_ggtt);
2014 vfree(mm->ggtt_mm.host_ggtt_aperture);
2015 vfree(mm->ggtt_mm.host_ggtt_hidden);
2022 * intel_vgpu_unpin_mm - decrease the pin count of a vGPU mm object
2023 * @mm: a vGPU mm object
2025 * This function is called when user doesn't want to use a vGPU mm object
2027 void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm)
2029 atomic_dec_if_positive(&mm->pincount);
2033 * intel_vgpu_pin_mm - increase the pin count of a vGPU mm object
2034 * @mm: target vgpu mm
2036 * This function is called when user wants to use a vGPU mm object. If this
2037 * mm object hasn't been shadowed yet, the shadow will be populated at this
2041 * Zero on success, negative error code if failed.
2043 int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm)
2047 atomic_inc(&mm->pincount);
2049 if (mm->type == INTEL_GVT_MM_PPGTT) {
2050 ret = shadow_ppgtt_mm(mm);
2054 mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2055 list_move_tail(&mm->ppgtt_mm.lru_list,
2056 &mm->vgpu->gvt->gtt.ppgtt_mm_lru_list_head);
2057 mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
2063 static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt)
2065 struct intel_vgpu_mm *mm;
2066 struct list_head *pos, *n;
2068 mutex_lock(&gvt->gtt.ppgtt_mm_lock);
2070 list_for_each_safe(pos, n, &gvt->gtt.ppgtt_mm_lru_list_head) {
2071 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.lru_list);
2073 if (atomic_read(&mm->pincount))
2076 list_del_init(&mm->ppgtt_mm.lru_list);
2077 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2078 invalidate_ppgtt_mm(mm);
2081 mutex_unlock(&gvt->gtt.ppgtt_mm_lock);
2086 * GMA translation APIs.
2088 static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm,
2089 struct intel_gvt_gtt_entry *e, unsigned long index, bool guest)
2091 struct intel_vgpu *vgpu = mm->vgpu;
2092 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2093 struct intel_vgpu_ppgtt_spt *s;
2095 s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e));
2100 ppgtt_get_shadow_entry(s, e, index);
2102 ppgtt_get_guest_entry(s, e, index);
2107 * intel_vgpu_gma_to_gpa - translate a gma to GPA
2108 * @mm: mm object. could be a PPGTT or GGTT mm object
2109 * @gma: graphics memory address in this mm object
2111 * This function is used to translate a graphics memory address in specific
2112 * graphics memory space to guest physical address.
2115 * Guest physical address on success, INTEL_GVT_INVALID_ADDR if failed.
2117 unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
2119 struct intel_vgpu *vgpu = mm->vgpu;
2120 struct intel_gvt *gvt = vgpu->gvt;
2121 const struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops;
2122 const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops;
2123 unsigned long gpa = INTEL_GVT_INVALID_ADDR;
2124 unsigned long gma_index[4];
2125 struct intel_gvt_gtt_entry e;
2129 GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT &&
2130 mm->type != INTEL_GVT_MM_PPGTT);
2132 if (mm->type == INTEL_GVT_MM_GGTT) {
2133 if (!vgpu_gmadr_is_valid(vgpu, gma))
2136 ggtt_get_guest_entry(mm, &e,
2137 gma_ops->gma_to_ggtt_pte_index(gma));
2139 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
2140 + (gma & ~I915_GTT_PAGE_MASK);
2142 trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
2144 switch (mm->ppgtt_mm.root_entry_type) {
2145 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2146 ppgtt_get_shadow_root_entry(mm, &e, 0);
2148 gma_index[0] = gma_ops->gma_to_pml4_index(gma);
2149 gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
2150 gma_index[2] = gma_ops->gma_to_pde_index(gma);
2151 gma_index[3] = gma_ops->gma_to_pte_index(gma);
2154 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2155 ppgtt_get_shadow_root_entry(mm, &e,
2156 gma_ops->gma_to_l3_pdp_index(gma));
2158 gma_index[0] = gma_ops->gma_to_pde_index(gma);
2159 gma_index[1] = gma_ops->gma_to_pte_index(gma);
2166 /* walk the shadow page table and get gpa from guest entry */
2167 for (i = 0; i < levels; i++) {
2168 ret = ppgtt_get_next_level_entry(mm, &e, gma_index[i],
2173 if (!pte_ops->test_present(&e)) {
2174 gvt_dbg_core("GMA 0x%lx is not present\n", gma);
2179 gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT) +
2180 (gma & ~I915_GTT_PAGE_MASK);
2181 trace_gma_translate(vgpu->id, "ppgtt", 0,
2182 mm->ppgtt_mm.root_entry_type, gma, gpa);
2187 gvt_vgpu_err("invalid mm type: %d gma %lx\n", mm->type, gma);
2188 return INTEL_GVT_INVALID_ADDR;
2191 static int emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
2192 unsigned int off, void *p_data, unsigned int bytes)
2194 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2195 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2196 unsigned long index = off >> info->gtt_entry_size_shift;
2198 struct intel_gvt_gtt_entry e;
2200 if (bytes != 4 && bytes != 8)
2203 gma = index << I915_GTT_PAGE_SHIFT;
2204 if (!intel_gvt_ggtt_validate_range(vgpu,
2205 gma, 1 << I915_GTT_PAGE_SHIFT)) {
2206 gvt_dbg_mm("read invalid ggtt at 0x%lx\n", gma);
2207 memset(p_data, 0, bytes);
2211 ggtt_get_guest_entry(ggtt_mm, &e, index);
2212 memcpy(p_data, (void *)&e.val64 + (off & (info->gtt_entry_size - 1)),
2218 * intel_vgpu_emulate_ggtt_mmio_read - emulate GTT MMIO register read
2220 * @off: register offset
2221 * @p_data: data will be returned to guest
2222 * @bytes: data length
2224 * This function is used to emulate the GTT MMIO register read
2227 * Zero on success, error code if failed.
2229 int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
2230 void *p_data, unsigned int bytes)
2232 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2235 if (bytes != 4 && bytes != 8)
2238 off -= info->gtt_start_offset;
2239 ret = emulate_ggtt_mmio_read(vgpu, off, p_data, bytes);
2243 static void ggtt_invalidate_pte(struct intel_vgpu *vgpu,
2244 struct intel_gvt_gtt_entry *entry)
2246 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2249 pfn = pte_ops->get_pfn(entry);
2250 if (pfn != vgpu->gvt->gtt.scratch_mfn)
2251 intel_gvt_dma_unmap_guest_page(vgpu, pfn << PAGE_SHIFT);
2254 static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
2255 void *p_data, unsigned int bytes)
2257 struct intel_gvt *gvt = vgpu->gvt;
2258 const struct intel_gvt_device_info *info = &gvt->device_info;
2259 struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm;
2260 const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
2261 unsigned long g_gtt_index = off >> info->gtt_entry_size_shift;
2262 unsigned long gma, gfn;
2263 struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2264 struct intel_gvt_gtt_entry m = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE};
2265 dma_addr_t dma_addr;
2267 struct intel_gvt_partial_pte *partial_pte, *pos, *n;
2268 bool partial_update = false;
2270 if (bytes != 4 && bytes != 8)
2273 gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
2275 /* the VM may configure the whole GM space when ballooning is used */
2276 if (!vgpu_gmadr_is_valid(vgpu, gma))
2279 e.type = GTT_TYPE_GGTT_PTE;
2280 memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
2283 /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
2284 * write, save the first 4 bytes in a list and update virtual
2285 * PTE. Only update shadow PTE when the second 4 bytes comes.
2287 if (bytes < info->gtt_entry_size) {
2290 list_for_each_entry_safe(pos, n,
2291 &ggtt_mm->ggtt_mm.partial_pte_list, list) {
2292 if (g_gtt_index == pos->offset >>
2293 info->gtt_entry_size_shift) {
2294 if (off != pos->offset) {
2295 /* the second partial part*/
2296 int last_off = pos->offset &
2297 (info->gtt_entry_size - 1);
2299 memcpy((void *)&e.val64 + last_off,
2300 (void *)&pos->data + last_off,
2303 list_del(&pos->list);
2309 /* update of the first partial part */
2310 pos->data = e.val64;
2311 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2317 /* the first partial part */
2318 partial_pte = kzalloc(sizeof(*partial_pte), GFP_KERNEL);
2321 partial_pte->offset = off;
2322 partial_pte->data = e.val64;
2323 list_add_tail(&partial_pte->list,
2324 &ggtt_mm->ggtt_mm.partial_pte_list);
2325 partial_update = true;
2329 if (!partial_update && (ops->test_present(&e))) {
2330 gfn = ops->get_pfn(&e);
2334 /* one PTE update may be issued in multiple writes and the
2335 * first write may not construct a valid gfn
2337 if (!intel_gvt_is_valid_gfn(vgpu, gfn)) {
2338 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2342 ret = intel_gvt_dma_map_guest_page(vgpu, gfn, PAGE_SIZE,
2345 gvt_vgpu_err("fail to populate guest ggtt entry\n");
2346 /* guest driver may read/write the entry when partial
2347 * update the entry in this situation p2m will fail
2348 * setting the shadow entry to point to a scratch page
2350 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2352 ops->set_pfn(&m, dma_addr >> PAGE_SHIFT);
2354 ops->set_pfn(&m, gvt->gtt.scratch_mfn);
2355 ops->clear_present(&m);
2359 ggtt_set_guest_entry(ggtt_mm, &e, g_gtt_index);
2361 ggtt_get_host_entry(ggtt_mm, &e, g_gtt_index);
2362 ggtt_invalidate_pte(vgpu, &e);
2364 ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
2365 ggtt_invalidate(gvt->gt);
2370 * intel_vgpu_emulate_ggtt_mmio_write - emulate GTT MMIO register write
2372 * @off: register offset
2373 * @p_data: data from guest write
2374 * @bytes: data length
2376 * This function is used to emulate the GTT MMIO register write
2379 * Zero on success, error code if failed.
2381 int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
2382 unsigned int off, void *p_data, unsigned int bytes)
2384 const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
2386 struct intel_vgpu_submission *s = &vgpu->submission;
2387 struct intel_engine_cs *engine;
2390 if (bytes != 4 && bytes != 8)
2393 off -= info->gtt_start_offset;
2394 ret = emulate_ggtt_mmio_write(vgpu, off, p_data, bytes);
2396 /* if ggtt of last submitted context is written,
2397 * that context is probably got unpinned.
2398 * Set last shadowed ctx to invalid.
2400 for_each_engine(engine, vgpu->gvt->gt, i) {
2401 if (!s->last_ctx[i].valid)
2404 if (s->last_ctx[i].lrca == (off >> info->gtt_entry_size_shift))
2405 s->last_ctx[i].valid = false;
2410 static int alloc_scratch_pages(struct intel_vgpu *vgpu,
2411 enum intel_gvt_gtt_type type)
2413 struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
2414 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2415 const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
2416 int page_entry_num = I915_GTT_PAGE_SIZE >>
2417 vgpu->gvt->device_info.gtt_entry_size_shift;
2420 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2423 if (drm_WARN_ON(&i915->drm,
2424 type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
2427 scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
2429 gvt_vgpu_err("fail to allocate scratch page\n");
2433 daddr = dma_map_page(dev, virt_to_page(scratch_pt), 0, 4096, DMA_BIDIRECTIONAL);
2434 if (dma_mapping_error(dev, daddr)) {
2435 gvt_vgpu_err("fail to dmamap scratch_pt\n");
2436 __free_page(virt_to_page(scratch_pt));
2439 gtt->scratch_pt[type].page_mfn =
2440 (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2441 gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
2442 gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
2443 vgpu->id, type, gtt->scratch_pt[type].page_mfn);
2445 /* Build the tree by full filled the scratch pt with the entries which
2446 * point to the next level scratch pt or scratch page. The
2447 * scratch_pt[type] indicate the scratch pt/scratch page used by the
2449 * e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
2450 * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
2451 * is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
2453 if (type > GTT_TYPE_PPGTT_PTE_PT) {
2454 struct intel_gvt_gtt_entry se;
2456 memset(&se, 0, sizeof(struct intel_gvt_gtt_entry));
2457 se.type = get_entry_type(type - 1);
2458 ops->set_pfn(&se, gtt->scratch_pt[type - 1].page_mfn);
2460 /* The entry parameters like present/writeable/cache type
2461 * set to the same as i915's scratch page tree.
2463 se.val64 |= GEN8_PAGE_PRESENT | GEN8_PAGE_RW;
2464 if (type == GTT_TYPE_PPGTT_PDE_PT)
2465 se.val64 |= PPAT_CACHED;
2467 for (i = 0; i < page_entry_num; i++)
2468 ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
2474 static int release_scratch_page_tree(struct intel_vgpu *vgpu)
2477 struct device *dev = vgpu->gvt->gt->i915->drm.dev;
2480 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2481 if (vgpu->gtt.scratch_pt[i].page != NULL) {
2482 daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
2483 I915_GTT_PAGE_SHIFT);
2484 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2485 __free_page(vgpu->gtt.scratch_pt[i].page);
2486 vgpu->gtt.scratch_pt[i].page = NULL;
2487 vgpu->gtt.scratch_pt[i].page_mfn = 0;
2494 static int create_scratch_page_tree(struct intel_vgpu *vgpu)
2498 for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
2499 ret = alloc_scratch_pages(vgpu, i);
2507 release_scratch_page_tree(vgpu);
2512 * intel_vgpu_init_gtt - initialize per-vGPU graphics memory virulization
2515 * This function is used to initialize per-vGPU graphics memory virtualization
2519 * Zero on success, error code if failed.
2521 int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
2523 struct intel_vgpu_gtt *gtt = &vgpu->gtt;
2525 INIT_RADIX_TREE(>t->spt_tree, GFP_KERNEL);
2527 INIT_LIST_HEAD(>t->ppgtt_mm_list_head);
2528 INIT_LIST_HEAD(>t->oos_page_list_head);
2529 INIT_LIST_HEAD(>t->post_shadow_list_head);
2531 gtt->ggtt_mm = intel_vgpu_create_ggtt_mm(vgpu);
2532 if (IS_ERR(gtt->ggtt_mm)) {
2533 gvt_vgpu_err("fail to create mm for ggtt.\n");
2534 return PTR_ERR(gtt->ggtt_mm);
2537 intel_vgpu_reset_ggtt(vgpu, false);
2539 INIT_LIST_HEAD(>t->ggtt_mm->ggtt_mm.partial_pte_list);
2541 return create_scratch_page_tree(vgpu);
2544 void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu)
2546 struct list_head *pos, *n;
2547 struct intel_vgpu_mm *mm;
2549 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2550 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2551 intel_vgpu_destroy_mm(mm);
2554 if (GEM_WARN_ON(!list_empty(&vgpu->gtt.ppgtt_mm_list_head)))
2555 gvt_err("vgpu ppgtt mm is not fully destroyed\n");
2557 if (GEM_WARN_ON(!radix_tree_empty(&vgpu->gtt.spt_tree))) {
2558 gvt_err("Why we still has spt not freed?\n");
2559 ppgtt_free_all_spt(vgpu);
2563 static void intel_vgpu_destroy_ggtt_mm(struct intel_vgpu *vgpu)
2565 struct intel_gvt_partial_pte *pos, *next;
2567 list_for_each_entry_safe(pos, next,
2568 &vgpu->gtt.ggtt_mm->ggtt_mm.partial_pte_list,
2570 gvt_dbg_mm("partial PTE update on hold 0x%lx : 0x%llx\n",
2571 pos->offset, pos->data);
2574 intel_vgpu_destroy_mm(vgpu->gtt.ggtt_mm);
2575 vgpu->gtt.ggtt_mm = NULL;
2579 * intel_vgpu_clean_gtt - clean up per-vGPU graphics memory virulization
2582 * This function is used to clean up per-vGPU graphics memory virtualization
2586 * Zero on success, error code if failed.
2588 void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu)
2590 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2591 intel_vgpu_destroy_ggtt_mm(vgpu);
2592 release_scratch_page_tree(vgpu);
2595 static void clean_spt_oos(struct intel_gvt *gvt)
2597 struct intel_gvt_gtt *gtt = &gvt->gtt;
2598 struct list_head *pos, *n;
2599 struct intel_vgpu_oos_page *oos_page;
2601 WARN(!list_empty(>t->oos_page_use_list_head),
2602 "someone is still using oos page\n");
2604 list_for_each_safe(pos, n, >t->oos_page_free_list_head) {
2605 oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
2606 list_del(&oos_page->list);
2607 free_page((unsigned long)oos_page->mem);
2612 static int setup_spt_oos(struct intel_gvt *gvt)
2614 struct intel_gvt_gtt *gtt = &gvt->gtt;
2615 struct intel_vgpu_oos_page *oos_page;
2619 INIT_LIST_HEAD(>t->oos_page_free_list_head);
2620 INIT_LIST_HEAD(>t->oos_page_use_list_head);
2622 for (i = 0; i < preallocated_oos_pages; i++) {
2623 oos_page = kzalloc(sizeof(*oos_page), GFP_KERNEL);
2628 oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
2629 if (!oos_page->mem) {
2635 INIT_LIST_HEAD(&oos_page->list);
2636 INIT_LIST_HEAD(&oos_page->vm_list);
2638 list_add_tail(&oos_page->list, >t->oos_page_free_list_head);
2641 gvt_dbg_mm("%d oos pages preallocated\n", i);
2650 * intel_vgpu_find_ppgtt_mm - find a PPGTT mm object
2652 * @pdps: pdp root array
2654 * This function is used to find a PPGTT mm object from mm object pool
2657 * pointer to mm object on success, NULL if failed.
2659 struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
2662 struct intel_vgpu_mm *mm;
2663 struct list_head *pos;
2665 list_for_each(pos, &vgpu->gtt.ppgtt_mm_list_head) {
2666 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2668 switch (mm->ppgtt_mm.root_entry_type) {
2669 case GTT_TYPE_PPGTT_ROOT_L4_ENTRY:
2670 if (pdps[0] == mm->ppgtt_mm.guest_pdps[0])
2673 case GTT_TYPE_PPGTT_ROOT_L3_ENTRY:
2674 if (!memcmp(pdps, mm->ppgtt_mm.guest_pdps,
2675 sizeof(mm->ppgtt_mm.guest_pdps)))
2686 * intel_vgpu_get_ppgtt_mm - get or create a PPGTT mm object.
2688 * @root_entry_type: ppgtt root entry type
2691 * This function is used to find or create a PPGTT mm object from a guest.
2694 * Zero on success, negative error code if failed.
2696 struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
2697 enum intel_gvt_gtt_type root_entry_type, u64 pdps[])
2699 struct intel_vgpu_mm *mm;
2701 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2703 intel_vgpu_mm_get(mm);
2705 mm = intel_vgpu_create_ppgtt_mm(vgpu, root_entry_type, pdps);
2707 gvt_vgpu_err("fail to create mm\n");
2713 * intel_vgpu_put_ppgtt_mm - find and put a PPGTT mm object.
2717 * This function is used to find a PPGTT mm object from a guest and destroy it.
2720 * Zero on success, negative error code if failed.
2722 int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[])
2724 struct intel_vgpu_mm *mm;
2726 mm = intel_vgpu_find_ppgtt_mm(vgpu, pdps);
2728 gvt_vgpu_err("fail to find ppgtt instance.\n");
2731 intel_vgpu_mm_put(mm);
2736 * intel_gvt_init_gtt - initialize mm components of a GVT device
2739 * This function is called at the initialization stage, to initialize
2740 * the mm components of a GVT device.
2743 * zero on success, negative error code if failed.
2745 int intel_gvt_init_gtt(struct intel_gvt *gvt)
2749 struct device *dev = gvt->gt->i915->drm.dev;
2752 gvt_dbg_core("init gtt\n");
2754 gvt->gtt.pte_ops = &gen8_gtt_pte_ops;
2755 gvt->gtt.gma_ops = &gen8_gtt_gma_ops;
2757 page = (void *)get_zeroed_page(GFP_KERNEL);
2759 gvt_err("fail to allocate scratch ggtt page\n");
2763 daddr = dma_map_page(dev, virt_to_page(page), 0,
2764 4096, DMA_BIDIRECTIONAL);
2765 if (dma_mapping_error(dev, daddr)) {
2766 gvt_err("fail to dmamap scratch ggtt page\n");
2767 __free_page(virt_to_page(page));
2771 gvt->gtt.scratch_page = virt_to_page(page);
2772 gvt->gtt.scratch_mfn = (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
2774 if (enable_out_of_sync) {
2775 ret = setup_spt_oos(gvt);
2777 gvt_err("fail to initialize SPT oos\n");
2778 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2779 __free_page(gvt->gtt.scratch_page);
2783 INIT_LIST_HEAD(&gvt->gtt.ppgtt_mm_lru_list_head);
2784 mutex_init(&gvt->gtt.ppgtt_mm_lock);
2789 * intel_gvt_clean_gtt - clean up mm components of a GVT device
2792 * This function is called at the driver unloading stage, to clean up
2793 * the mm components of a GVT device.
2796 void intel_gvt_clean_gtt(struct intel_gvt *gvt)
2798 struct device *dev = gvt->gt->i915->drm.dev;
2799 dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
2800 I915_GTT_PAGE_SHIFT);
2802 dma_unmap_page(dev, daddr, 4096, DMA_BIDIRECTIONAL);
2804 __free_page(gvt->gtt.scratch_page);
2806 if (enable_out_of_sync)
2811 * intel_vgpu_invalidate_ppgtt - invalidate PPGTT instances
2814 * This function is called when invalidate all PPGTT instances of a vGPU.
2817 void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
2819 struct list_head *pos, *n;
2820 struct intel_vgpu_mm *mm;
2822 list_for_each_safe(pos, n, &vgpu->gtt.ppgtt_mm_list_head) {
2823 mm = container_of(pos, struct intel_vgpu_mm, ppgtt_mm.list);
2824 if (mm->type == INTEL_GVT_MM_PPGTT) {
2825 mutex_lock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2826 list_del_init(&mm->ppgtt_mm.lru_list);
2827 mutex_unlock(&vgpu->gvt->gtt.ppgtt_mm_lock);
2828 if (mm->ppgtt_mm.shadowed)
2829 invalidate_ppgtt_mm(mm);
2835 * intel_vgpu_reset_ggtt - reset the GGTT entry
2837 * @invalidate_old: invalidate old entries
2839 * This function is called at the vGPU create stage
2840 * to reset all the GGTT entries.
2843 void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
2845 struct intel_gvt *gvt = vgpu->gvt;
2846 const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
2847 struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
2848 struct intel_gvt_gtt_entry old_entry;
2852 pte_ops->set_pfn(&entry, gvt->gtt.scratch_mfn);
2853 pte_ops->set_present(&entry);
2855 index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2856 num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2857 while (num_entries--) {
2858 if (invalidate_old) {
2859 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2860 ggtt_invalidate_pte(vgpu, &old_entry);
2862 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2865 index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2866 num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2867 while (num_entries--) {
2868 if (invalidate_old) {
2869 ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index);
2870 ggtt_invalidate_pte(vgpu, &old_entry);
2872 ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
2875 ggtt_invalidate(gvt->gt);
2879 * intel_vgpu_reset_gtt - reset the all GTT related status
2882 * This function is called from vfio core to reset reset all
2883 * GTT related status, including GGTT, PPGTT, scratch page.
2886 void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
2888 /* Shadow pages are only created when there is no page
2889 * table tracking data, so remove page tracking data after
2890 * removing the shadow pages.
2892 intel_vgpu_destroy_all_ppgtt_mm(vgpu);
2893 intel_vgpu_reset_ggtt(vgpu, true);
2897 * intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
2898 * @gvt: intel gvt device
2900 * This function is called at driver resume stage to restore
2901 * GGTT entries of every vGPU.
2904 void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
2906 struct intel_vgpu *vgpu;
2907 struct intel_vgpu_mm *mm;
2910 u32 idx, num_low, num_hi, offset;
2912 /* Restore dirty host ggtt for all vGPUs */
2913 idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
2914 mm = vgpu->gtt.ggtt_mm;
2916 num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
2917 offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
2918 for (idx = 0; idx < num_low; idx++) {
2919 pte = mm->ggtt_mm.host_ggtt_aperture[idx];
2920 if (pte & GEN8_PAGE_PRESENT)
2921 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
2924 num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
2925 offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
2926 for (idx = 0; idx < num_hi; idx++) {
2927 pte = mm->ggtt_mm.host_ggtt_hidden[idx];
2928 if (pte & GEN8_PAGE_PRESENT)
2929 write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);