2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
22 #include <linux/hugetlb.h>
23 #include <trace/events/kvm.h>
24 #include <asm/pgalloc.h>
25 #include <asm/cacheflush.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_mmu.h>
28 #include <asm/kvm_mmio.h>
29 #include <asm/kvm_asm.h>
30 #include <asm/kvm_emulate.h>
34 extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
36 static pgd_t *boot_hyp_pgd;
37 static pgd_t *hyp_pgd;
38 static pgd_t *merged_hyp_pgd;
39 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
41 static unsigned long hyp_idmap_start;
42 static unsigned long hyp_idmap_end;
43 static phys_addr_t hyp_idmap_vector;
45 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
47 #define kvm_pmd_huge(_x) (pmd_huge(_x) || pmd_trans_huge(_x))
48 #define kvm_pud_huge(_x) pud_huge(_x)
50 #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0)
51 #define KVM_S2_FLAG_LOGGING_ACTIVE (1UL << 1)
53 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
55 return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
59 * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
60 * @kvm: pointer to kvm structure.
62 * Interface to HYP function to flush all VM TLB entries
64 void kvm_flush_remote_tlbs(struct kvm *kvm)
66 kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
69 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
72 * This function also gets called when dealing with HYP page
73 * tables. As HYP doesn't have an associated struct kvm (and
74 * the HYP page tables are fairly static), we don't do
78 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
82 * D-Cache management functions. They take the page table entries by
83 * value, as they are flushing the cache using the kernel mapping (or
86 static void kvm_flush_dcache_pte(pte_t pte)
88 __kvm_flush_dcache_pte(pte);
91 static void kvm_flush_dcache_pmd(pmd_t pmd)
93 __kvm_flush_dcache_pmd(pmd);
96 static void kvm_flush_dcache_pud(pud_t pud)
98 __kvm_flush_dcache_pud(pud);
101 static bool kvm_is_device_pfn(unsigned long pfn)
103 return !pfn_valid(pfn);
107 * stage2_dissolve_pmd() - clear and flush huge PMD entry
108 * @kvm: pointer to kvm structure.
110 * @pmd: pmd pointer for IPA
112 * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
113 * pages in the range dirty.
115 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
117 if (!kvm_pmd_huge(*pmd))
121 kvm_tlb_flush_vmid_ipa(kvm, addr);
122 put_page(virt_to_page(pmd));
125 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
130 BUG_ON(max > KVM_NR_MEM_OBJS);
131 if (cache->nobjs >= min)
133 while (cache->nobjs < max) {
134 page = (void *)__get_free_page(PGALLOC_GFP);
137 cache->objects[cache->nobjs++] = page;
142 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
145 free_page((unsigned long)mc->objects[--mc->nobjs]);
148 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
152 BUG_ON(!mc || !mc->nobjs);
153 p = mc->objects[--mc->nobjs];
157 static void clear_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
159 pud_t *pud_table __maybe_unused = pud_offset(pgd, 0);
161 kvm_tlb_flush_vmid_ipa(kvm, addr);
162 pud_free(NULL, pud_table);
163 put_page(virt_to_page(pgd));
166 static void clear_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
168 pmd_t *pmd_table = pmd_offset(pud, 0);
169 VM_BUG_ON(pud_huge(*pud));
171 kvm_tlb_flush_vmid_ipa(kvm, addr);
172 pmd_free(NULL, pmd_table);
173 put_page(virt_to_page(pud));
176 static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
178 pte_t *pte_table = pte_offset_kernel(pmd, 0);
179 VM_BUG_ON(kvm_pmd_huge(*pmd));
181 kvm_tlb_flush_vmid_ipa(kvm, addr);
182 pte_free_kernel(NULL, pte_table);
183 put_page(virt_to_page(pmd));
187 * Unmapping vs dcache management:
189 * If a guest maps certain memory pages as uncached, all writes will
190 * bypass the data cache and go directly to RAM. However, the CPUs
191 * can still speculate reads (not writes) and fill cache lines with
194 * Those cache lines will be *clean* cache lines though, so a
195 * clean+invalidate operation is equivalent to an invalidate
196 * operation, because no cache lines are marked dirty.
198 * Those clean cache lines could be filled prior to an uncached write
199 * by the guest, and the cache coherent IO subsystem would therefore
200 * end up writing old data to disk.
202 * This is why right after unmapping a page/section and invalidating
203 * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
204 * the IO subsystem will never hit in the cache.
206 static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
207 phys_addr_t addr, phys_addr_t end)
209 phys_addr_t start_addr = addr;
210 pte_t *pte, *start_pte;
212 start_pte = pte = pte_offset_kernel(pmd, addr);
214 if (!pte_none(*pte)) {
215 pte_t old_pte = *pte;
217 kvm_set_pte(pte, __pte(0));
218 kvm_tlb_flush_vmid_ipa(kvm, addr);
220 /* No need to invalidate the cache for device mappings */
221 if (!kvm_is_device_pfn(pte_pfn(old_pte)))
222 kvm_flush_dcache_pte(old_pte);
224 put_page(virt_to_page(pte));
226 } while (pte++, addr += PAGE_SIZE, addr != end);
228 if (kvm_pte_table_empty(kvm, start_pte))
229 clear_pmd_entry(kvm, pmd, start_addr);
232 static void unmap_pmds(struct kvm *kvm, pud_t *pud,
233 phys_addr_t addr, phys_addr_t end)
235 phys_addr_t next, start_addr = addr;
236 pmd_t *pmd, *start_pmd;
238 start_pmd = pmd = pmd_offset(pud, addr);
240 next = kvm_pmd_addr_end(addr, end);
241 if (!pmd_none(*pmd)) {
242 if (kvm_pmd_huge(*pmd)) {
243 pmd_t old_pmd = *pmd;
246 kvm_tlb_flush_vmid_ipa(kvm, addr);
248 kvm_flush_dcache_pmd(old_pmd);
250 put_page(virt_to_page(pmd));
252 unmap_ptes(kvm, pmd, addr, next);
255 } while (pmd++, addr = next, addr != end);
257 if (kvm_pmd_table_empty(kvm, start_pmd))
258 clear_pud_entry(kvm, pud, start_addr);
261 static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
262 phys_addr_t addr, phys_addr_t end)
264 phys_addr_t next, start_addr = addr;
265 pud_t *pud, *start_pud;
267 start_pud = pud = pud_offset(pgd, addr);
269 next = kvm_pud_addr_end(addr, end);
270 if (!pud_none(*pud)) {
271 if (pud_huge(*pud)) {
272 pud_t old_pud = *pud;
275 kvm_tlb_flush_vmid_ipa(kvm, addr);
277 kvm_flush_dcache_pud(old_pud);
279 put_page(virt_to_page(pud));
281 unmap_pmds(kvm, pud, addr, next);
284 } while (pud++, addr = next, addr != end);
286 if (kvm_pud_table_empty(kvm, start_pud))
287 clear_pgd_entry(kvm, pgd, start_addr);
291 static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
292 phys_addr_t start, u64 size)
295 phys_addr_t addr = start, end = start + size;
298 pgd = pgdp + kvm_pgd_index(addr);
300 next = kvm_pgd_addr_end(addr, end);
302 unmap_puds(kvm, pgd, addr, next);
303 } while (pgd++, addr = next, addr != end);
306 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
307 phys_addr_t addr, phys_addr_t end)
311 pte = pte_offset_kernel(pmd, addr);
313 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
314 kvm_flush_dcache_pte(*pte);
315 } while (pte++, addr += PAGE_SIZE, addr != end);
318 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
319 phys_addr_t addr, phys_addr_t end)
324 pmd = pmd_offset(pud, addr);
326 next = kvm_pmd_addr_end(addr, end);
327 if (!pmd_none(*pmd)) {
328 if (kvm_pmd_huge(*pmd))
329 kvm_flush_dcache_pmd(*pmd);
331 stage2_flush_ptes(kvm, pmd, addr, next);
333 } while (pmd++, addr = next, addr != end);
336 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
337 phys_addr_t addr, phys_addr_t end)
342 pud = pud_offset(pgd, addr);
344 next = kvm_pud_addr_end(addr, end);
345 if (!pud_none(*pud)) {
347 kvm_flush_dcache_pud(*pud);
349 stage2_flush_pmds(kvm, pud, addr, next);
351 } while (pud++, addr = next, addr != end);
354 static void stage2_flush_memslot(struct kvm *kvm,
355 struct kvm_memory_slot *memslot)
357 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
358 phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
362 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
364 next = kvm_pgd_addr_end(addr, end);
365 stage2_flush_puds(kvm, pgd, addr, next);
366 } while (pgd++, addr = next, addr != end);
370 * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
371 * @kvm: The struct kvm pointer
373 * Go through the stage 2 page tables and invalidate any cache lines
374 * backing memory already mapped to the VM.
376 static void stage2_flush_vm(struct kvm *kvm)
378 struct kvm_memslots *slots;
379 struct kvm_memory_slot *memslot;
382 idx = srcu_read_lock(&kvm->srcu);
383 spin_lock(&kvm->mmu_lock);
385 slots = kvm_memslots(kvm);
386 kvm_for_each_memslot(memslot, slots)
387 stage2_flush_memslot(kvm, memslot);
389 spin_unlock(&kvm->mmu_lock);
390 srcu_read_unlock(&kvm->srcu, idx);
394 * free_boot_hyp_pgd - free HYP boot page tables
396 * Free the HYP boot page tables. The bounce page is also freed.
398 void free_boot_hyp_pgd(void)
400 mutex_lock(&kvm_hyp_pgd_mutex);
403 unmap_range(NULL, boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
404 unmap_range(NULL, boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
405 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
410 unmap_range(NULL, hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
412 mutex_unlock(&kvm_hyp_pgd_mutex);
416 * free_hyp_pgds - free Hyp-mode page tables
418 * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
419 * therefore contains either mappings in the kernel memory area (above
420 * PAGE_OFFSET), or device mappings in the vmalloc range (from
421 * VMALLOC_START to VMALLOC_END).
423 * boot_hyp_pgd should only map two pages for the init code.
425 void free_hyp_pgds(void)
431 mutex_lock(&kvm_hyp_pgd_mutex);
434 for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
435 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
436 for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
437 unmap_range(NULL, hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
439 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
442 if (merged_hyp_pgd) {
443 clear_page(merged_hyp_pgd);
444 free_page((unsigned long)merged_hyp_pgd);
445 merged_hyp_pgd = NULL;
448 mutex_unlock(&kvm_hyp_pgd_mutex);
451 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
452 unsigned long end, unsigned long pfn,
460 pte = pte_offset_kernel(pmd, addr);
461 kvm_set_pte(pte, pfn_pte(pfn, prot));
462 get_page(virt_to_page(pte));
463 kvm_flush_dcache_to_poc(pte, sizeof(*pte));
465 } while (addr += PAGE_SIZE, addr != end);
468 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
469 unsigned long end, unsigned long pfn,
474 unsigned long addr, next;
478 pmd = pmd_offset(pud, addr);
480 BUG_ON(pmd_sect(*pmd));
482 if (pmd_none(*pmd)) {
483 pte = pte_alloc_one_kernel(NULL, addr);
485 kvm_err("Cannot allocate Hyp pte\n");
488 pmd_populate_kernel(NULL, pmd, pte);
489 get_page(virt_to_page(pmd));
490 kvm_flush_dcache_to_poc(pmd, sizeof(*pmd));
493 next = pmd_addr_end(addr, end);
495 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
496 pfn += (next - addr) >> PAGE_SHIFT;
497 } while (addr = next, addr != end);
502 static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
503 unsigned long end, unsigned long pfn,
508 unsigned long addr, next;
513 pud = pud_offset(pgd, addr);
515 if (pud_none_or_clear_bad(pud)) {
516 pmd = pmd_alloc_one(NULL, addr);
518 kvm_err("Cannot allocate Hyp pmd\n");
521 pud_populate(NULL, pud, pmd);
522 get_page(virt_to_page(pud));
523 kvm_flush_dcache_to_poc(pud, sizeof(*pud));
526 next = pud_addr_end(addr, end);
527 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
530 pfn += (next - addr) >> PAGE_SHIFT;
531 } while (addr = next, addr != end);
536 static int __create_hyp_mappings(pgd_t *pgdp,
537 unsigned long start, unsigned long end,
538 unsigned long pfn, pgprot_t prot)
542 unsigned long addr, next;
545 mutex_lock(&kvm_hyp_pgd_mutex);
546 addr = start & PAGE_MASK;
547 end = PAGE_ALIGN(end);
549 pgd = pgdp + pgd_index(addr);
551 if (pgd_none(*pgd)) {
552 pud = pud_alloc_one(NULL, addr);
554 kvm_err("Cannot allocate Hyp pud\n");
558 pgd_populate(NULL, pgd, pud);
559 get_page(virt_to_page(pgd));
560 kvm_flush_dcache_to_poc(pgd, sizeof(*pgd));
563 next = pgd_addr_end(addr, end);
564 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
567 pfn += (next - addr) >> PAGE_SHIFT;
568 } while (addr = next, addr != end);
570 mutex_unlock(&kvm_hyp_pgd_mutex);
574 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
576 if (!is_vmalloc_addr(kaddr)) {
577 BUG_ON(!virt_addr_valid(kaddr));
580 return page_to_phys(vmalloc_to_page(kaddr)) +
581 offset_in_page(kaddr);
586 * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
587 * @from: The virtual kernel start address of the range
588 * @to: The virtual kernel end address of the range (exclusive)
590 * The same virtual address as the kernel virtual address is also used
591 * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
594 int create_hyp_mappings(void *from, void *to)
596 phys_addr_t phys_addr;
597 unsigned long virt_addr;
598 unsigned long start = KERN_TO_HYP((unsigned long)from);
599 unsigned long end = KERN_TO_HYP((unsigned long)to);
601 start = start & PAGE_MASK;
602 end = PAGE_ALIGN(end);
604 for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
607 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
608 err = __create_hyp_mappings(hyp_pgd, virt_addr,
609 virt_addr + PAGE_SIZE,
610 __phys_to_pfn(phys_addr),
620 * create_hyp_io_mappings - duplicate a kernel IO mapping into Hyp mode
621 * @from: The kernel start VA of the range
622 * @to: The kernel end VA of the range (exclusive)
623 * @phys_addr: The physical start address which gets mapped
625 * The resulting HYP VA is the same as the kernel VA, modulo
628 int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
630 unsigned long start = KERN_TO_HYP((unsigned long)from);
631 unsigned long end = KERN_TO_HYP((unsigned long)to);
633 /* Check for a valid kernel IO mapping */
634 if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
637 return __create_hyp_mappings(hyp_pgd, start, end,
638 __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE);
641 /* Free the HW pgd, one page at a time */
642 static void kvm_free_hwpgd(void *hwpgd)
644 free_pages_exact(hwpgd, kvm_get_hwpgd_size());
647 /* Allocate the HW PGD, making sure that each page gets its own refcount */
648 static void *kvm_alloc_hwpgd(void)
650 unsigned int size = kvm_get_hwpgd_size();
652 return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO);
656 * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
657 * @kvm: The KVM struct pointer for the VM.
659 * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can
660 * support either full 40-bit input addresses or limited to 32-bit input
661 * addresses). Clears the allocated pages.
663 * Note we don't need locking here as this is only called when the VM is
664 * created, which can only be done once.
666 int kvm_alloc_stage2_pgd(struct kvm *kvm)
671 if (kvm->arch.pgd != NULL) {
672 kvm_err("kvm_arch already initialized?\n");
676 hwpgd = kvm_alloc_hwpgd();
680 /* When the kernel uses more levels of page tables than the
681 * guest, we allocate a fake PGD and pre-populate it to point
682 * to the next-level page table, which will be the real
683 * initial page table pointed to by the VTTBR.
685 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
686 * the PMD and the kernel will use folded pud.
687 * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
690 if (KVM_PREALLOC_LEVEL > 0) {
694 * Allocate fake pgd for the page table manipulation macros to
695 * work. This is not used by the hardware and we have no
696 * alignment requirement for this allocation.
698 pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
699 GFP_KERNEL | __GFP_ZERO);
702 kvm_free_hwpgd(hwpgd);
706 /* Plug the HW PGD into the fake one. */
707 for (i = 0; i < PTRS_PER_S2_PGD; i++) {
708 if (KVM_PREALLOC_LEVEL == 1)
709 pgd_populate(NULL, pgd + i,
710 (pud_t *)hwpgd + i * PTRS_PER_PUD);
711 else if (KVM_PREALLOC_LEVEL == 2)
712 pud_populate(NULL, pud_offset(pgd, 0) + i,
713 (pmd_t *)hwpgd + i * PTRS_PER_PMD);
717 * Allocate actual first-level Stage-2 page table used by the
718 * hardware for Stage-2 page table walks.
720 pgd = (pgd_t *)hwpgd;
729 * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
730 * @kvm: The VM pointer
731 * @start: The intermediate physical base address of the range to unmap
732 * @size: The size of the area to unmap
734 * Clear a range of stage-2 mappings, lowering the various ref-counts. Must
735 * be called while holding mmu_lock (unless for freeing the stage2 pgd before
736 * destroying the VM), otherwise another faulting VCPU may come in and mess
737 * with things behind our backs.
739 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
741 unmap_range(kvm, kvm->arch.pgd, start, size);
744 static void stage2_unmap_memslot(struct kvm *kvm,
745 struct kvm_memory_slot *memslot)
747 hva_t hva = memslot->userspace_addr;
748 phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
749 phys_addr_t size = PAGE_SIZE * memslot->npages;
750 hva_t reg_end = hva + size;
753 * A memory region could potentially cover multiple VMAs, and any holes
754 * between them, so iterate over all of them to find out if we should
757 * +--------------------------------------------+
758 * +---------------+----------------+ +----------------+
759 * | : VMA 1 | VMA 2 | | VMA 3 : |
760 * +---------------+----------------+ +----------------+
762 * +--------------------------------------------+
765 struct vm_area_struct *vma = find_vma(current->mm, hva);
766 hva_t vm_start, vm_end;
768 if (!vma || vma->vm_start >= reg_end)
772 * Take the intersection of this VMA with the memory region
774 vm_start = max(hva, vma->vm_start);
775 vm_end = min(reg_end, vma->vm_end);
777 if (!(vma->vm_flags & VM_PFNMAP)) {
778 gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
779 unmap_stage2_range(kvm, gpa, vm_end - vm_start);
782 } while (hva < reg_end);
786 * stage2_unmap_vm - Unmap Stage-2 RAM mappings
787 * @kvm: The struct kvm pointer
789 * Go through the memregions and unmap any reguler RAM
790 * backing memory already mapped to the VM.
792 void stage2_unmap_vm(struct kvm *kvm)
794 struct kvm_memslots *slots;
795 struct kvm_memory_slot *memslot;
798 idx = srcu_read_lock(&kvm->srcu);
799 down_read(¤t->mm->mmap_sem);
800 spin_lock(&kvm->mmu_lock);
802 slots = kvm_memslots(kvm);
803 kvm_for_each_memslot(memslot, slots)
804 stage2_unmap_memslot(kvm, memslot);
806 spin_unlock(&kvm->mmu_lock);
807 up_read(¤t->mm->mmap_sem);
808 srcu_read_unlock(&kvm->srcu, idx);
812 * kvm_free_stage2_pgd - free all stage-2 tables
813 * @kvm: The KVM struct pointer for the VM.
815 * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
816 * underlying level-2 and level-3 tables before freeing the actual level-1 table
817 * and setting the struct pointer to NULL.
819 * Note we don't need locking here as this is only called when the VM is
820 * destroyed, which can only be done once.
822 void kvm_free_stage2_pgd(struct kvm *kvm)
824 if (kvm->arch.pgd == NULL)
827 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
828 kvm_free_hwpgd(kvm_get_hwpgd(kvm));
829 if (KVM_PREALLOC_LEVEL > 0)
830 kfree(kvm->arch.pgd);
832 kvm->arch.pgd = NULL;
835 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
841 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
842 if (WARN_ON(pgd_none(*pgd))) {
845 pud = mmu_memory_cache_alloc(cache);
846 pgd_populate(NULL, pgd, pud);
847 get_page(virt_to_page(pgd));
850 return pud_offset(pgd, addr);
853 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
859 pud = stage2_get_pud(kvm, cache, addr);
860 if (pud_none(*pud)) {
863 pmd = mmu_memory_cache_alloc(cache);
864 pud_populate(NULL, pud, pmd);
865 get_page(virt_to_page(pud));
868 return pmd_offset(pud, addr);
871 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
872 *cache, phys_addr_t addr, const pmd_t *new_pmd)
876 pmd = stage2_get_pmd(kvm, cache, addr);
880 * Mapping in huge pages should only happen through a fault. If a
881 * page is merged into a transparent huge page, the individual
882 * subpages of that huge page should be unmapped through MMU
883 * notifiers before we get here.
885 * Merging of CompoundPages is not supported; they should become
886 * splitting first, unmapped, merged, and mapped back in on-demand.
888 VM_BUG_ON(pmd_present(*pmd) && pmd_pfn(*pmd) != pmd_pfn(*new_pmd));
891 if (pmd_present(old_pmd)) {
893 kvm_tlb_flush_vmid_ipa(kvm, addr);
895 get_page(virt_to_page(pmd));
898 kvm_set_pmd(pmd, *new_pmd);
902 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
903 phys_addr_t addr, const pte_t *new_pte,
908 bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
909 bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
911 VM_BUG_ON(logging_active && !cache);
913 /* Create stage-2 page table mapping - Levels 0 and 1 */
914 pmd = stage2_get_pmd(kvm, cache, addr);
917 * Ignore calls from kvm_set_spte_hva for unallocated
924 * While dirty page logging - dissolve huge PMD, then continue on to
928 stage2_dissolve_pmd(kvm, addr, pmd);
930 /* Create stage-2 page mappings - Level 2 */
931 if (pmd_none(*pmd)) {
933 return 0; /* ignore calls from kvm_set_spte_hva */
934 pte = mmu_memory_cache_alloc(cache);
936 pmd_populate_kernel(NULL, pmd, pte);
937 get_page(virt_to_page(pmd));
940 pte = pte_offset_kernel(pmd, addr);
942 if (iomap && pte_present(*pte))
945 /* Create 2nd stage page table mapping - Level 3 */
947 if (pte_present(old_pte)) {
948 kvm_set_pte(pte, __pte(0));
949 kvm_tlb_flush_vmid_ipa(kvm, addr);
951 get_page(virt_to_page(pte));
954 kvm_set_pte(pte, *new_pte);
959 * kvm_phys_addr_ioremap - map a device range to guest IPA
961 * @kvm: The KVM pointer
962 * @guest_ipa: The IPA at which to insert the mapping
963 * @pa: The physical address of the device
964 * @size: The size of the mapping
966 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
967 phys_addr_t pa, unsigned long size, bool writable)
969 phys_addr_t addr, end;
972 struct kvm_mmu_memory_cache cache = { 0, };
974 end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
975 pfn = __phys_to_pfn(pa);
977 for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
978 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
981 kvm_set_s2pte_writable(&pte);
983 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
987 spin_lock(&kvm->mmu_lock);
988 ret = stage2_set_pte(kvm, &cache, addr, &pte,
989 KVM_S2PTE_FLAG_IS_IOMAP);
990 spin_unlock(&kvm->mmu_lock);
998 mmu_free_memory_cache(&cache);
1002 static bool transparent_hugepage_adjust(pfn_t *pfnp, phys_addr_t *ipap)
1005 gfn_t gfn = *ipap >> PAGE_SHIFT;
1007 if (PageTransCompound(pfn_to_page(pfn))) {
1010 * The address we faulted on is backed by a transparent huge
1011 * page. However, because we map the compound huge page and
1012 * not the individual tail page, we need to transfer the
1013 * refcount to the head page. We have to be careful that the
1014 * THP doesn't start to split while we are adjusting the
1017 * We are sure this doesn't happen, because mmu_notifier_retry
1018 * was successful and we are holding the mmu_lock, so if this
1019 * THP is trying to split, it will be blocked in the mmu
1020 * notifier before touching any of the pages, specifically
1021 * before being able to call __split_huge_page_refcount().
1023 * We can therefore safely transfer the refcount from PG_tail
1024 * to PG_head and switch the pfn from a tail page to the head
1027 mask = PTRS_PER_PMD - 1;
1028 VM_BUG_ON((gfn & mask) != (pfn & mask));
1031 kvm_release_pfn_clean(pfn);
1043 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
1045 if (kvm_vcpu_trap_is_iabt(vcpu))
1048 return kvm_vcpu_dabt_iswrite(vcpu);
1052 * stage2_wp_ptes - write protect PMD range
1053 * @pmd: pointer to pmd entry
1054 * @addr: range start address
1055 * @end: range end address
1057 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1061 pte = pte_offset_kernel(pmd, addr);
1063 if (!pte_none(*pte)) {
1064 if (!kvm_s2pte_readonly(pte))
1065 kvm_set_s2pte_readonly(pte);
1067 } while (pte++, addr += PAGE_SIZE, addr != end);
1071 * stage2_wp_pmds - write protect PUD range
1072 * @pud: pointer to pud entry
1073 * @addr: range start address
1074 * @end: range end address
1076 static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
1081 pmd = pmd_offset(pud, addr);
1084 next = kvm_pmd_addr_end(addr, end);
1085 if (!pmd_none(*pmd)) {
1086 if (kvm_pmd_huge(*pmd)) {
1087 if (!kvm_s2pmd_readonly(pmd))
1088 kvm_set_s2pmd_readonly(pmd);
1090 stage2_wp_ptes(pmd, addr, next);
1093 } while (pmd++, addr = next, addr != end);
1097 * stage2_wp_puds - write protect PGD range
1098 * @pgd: pointer to pgd entry
1099 * @addr: range start address
1100 * @end: range end address
1102 * Process PUD entries, for a huge PUD we cause a panic.
1104 static void stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
1109 pud = pud_offset(pgd, addr);
1111 next = kvm_pud_addr_end(addr, end);
1112 if (!pud_none(*pud)) {
1113 /* TODO:PUD not supported, revisit later if supported */
1114 BUG_ON(kvm_pud_huge(*pud));
1115 stage2_wp_pmds(pud, addr, next);
1117 } while (pud++, addr = next, addr != end);
1121 * stage2_wp_range() - write protect stage2 memory region range
1122 * @kvm: The KVM pointer
1123 * @addr: Start address of range
1124 * @end: End address of range
1126 static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1131 pgd = kvm->arch.pgd + kvm_pgd_index(addr);
1134 * Release kvm_mmu_lock periodically if the memory region is
1135 * large. Otherwise, we may see kernel panics with
1136 * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1137 * CONFIG_LOCKDEP. Additionally, holding the lock too long
1138 * will also starve other vCPUs.
1140 if (need_resched() || spin_needbreak(&kvm->mmu_lock))
1141 cond_resched_lock(&kvm->mmu_lock);
1143 next = kvm_pgd_addr_end(addr, end);
1144 if (pgd_present(*pgd))
1145 stage2_wp_puds(pgd, addr, next);
1146 } while (pgd++, addr = next, addr != end);
1150 * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1151 * @kvm: The KVM pointer
1152 * @slot: The memory slot to write protect
1154 * Called to start logging dirty pages after memory region
1155 * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1156 * all present PMD and PTEs are write protected in the memory region.
1157 * Afterwards read of dirty page log can be called.
1159 * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1160 * serializing operations for VM memory regions.
1162 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1164 struct kvm_memslots *slots = kvm_memslots(kvm);
1165 struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1166 phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1167 phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1169 spin_lock(&kvm->mmu_lock);
1170 stage2_wp_range(kvm, start, end);
1171 spin_unlock(&kvm->mmu_lock);
1172 kvm_flush_remote_tlbs(kvm);
1176 * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1177 * @kvm: The KVM pointer
1178 * @slot: The memory slot associated with mask
1179 * @gfn_offset: The gfn offset in memory slot
1180 * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory
1181 * slot to be write protected
1183 * Walks bits set in mask write protects the associated pte's. Caller must
1184 * acquire kvm_mmu_lock.
1186 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1187 struct kvm_memory_slot *slot,
1188 gfn_t gfn_offset, unsigned long mask)
1190 phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1191 phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT;
1192 phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1194 stage2_wp_range(kvm, start, end);
1198 * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1201 * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1202 * enable dirty logging for them.
1204 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1205 struct kvm_memory_slot *slot,
1206 gfn_t gfn_offset, unsigned long mask)
1208 kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1211 static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
1212 unsigned long size, bool uncached)
1214 __coherent_cache_guest_page(vcpu, pfn, size, uncached);
1217 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1218 struct kvm_memory_slot *memslot, unsigned long hva,
1219 unsigned long fault_status)
1222 bool write_fault, writable, hugetlb = false, force_pte = false;
1223 unsigned long mmu_seq;
1224 gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1225 struct kvm *kvm = vcpu->kvm;
1226 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1227 struct vm_area_struct *vma;
1229 pgprot_t mem_type = PAGE_S2;
1230 bool fault_ipa_uncached;
1231 bool logging_active = memslot_is_logging(memslot);
1232 unsigned long flags = 0;
1234 write_fault = kvm_is_write_fault(vcpu);
1235 if (fault_status == FSC_PERM && !write_fault) {
1236 kvm_err("Unexpected L2 read permission error\n");
1240 /* Let's check if we will get back a huge page backed by hugetlbfs */
1241 down_read(¤t->mm->mmap_sem);
1242 vma = find_vma_intersection(current->mm, hva, hva + 1);
1243 if (unlikely(!vma)) {
1244 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1245 up_read(¤t->mm->mmap_sem);
1249 if (is_vm_hugetlb_page(vma) && !logging_active) {
1251 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
1254 * Pages belonging to memslots that don't have the same
1255 * alignment for userspace and IPA cannot be mapped using
1256 * block descriptors even if the pages belong to a THP for
1257 * the process, because the stage-2 block descriptor will
1258 * cover more than a single THP and we loose atomicity for
1259 * unmapping, updates, and splits of the THP or other pages
1260 * in the stage-2 block range.
1262 if ((memslot->userspace_addr & ~PMD_MASK) !=
1263 ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
1266 up_read(¤t->mm->mmap_sem);
1268 /* We need minimum second+third level pages */
1269 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
1274 mmu_seq = vcpu->kvm->mmu_notifier_seq;
1276 * Ensure the read of mmu_notifier_seq happens before we call
1277 * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1278 * the page we just got a reference to gets unmapped before we have a
1279 * chance to grab the mmu_lock, which ensure that if the page gets
1280 * unmapped afterwards, the call to kvm_unmap_hva will take it away
1281 * from us again properly. This smp_rmb() interacts with the smp_wmb()
1282 * in kvm_mmu_notifier_invalidate_<page|range_end>.
1286 pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1287 if (is_error_pfn(pfn))
1290 if (kvm_is_device_pfn(pfn)) {
1291 mem_type = PAGE_S2_DEVICE;
1292 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1293 } else if (logging_active) {
1295 * Faults on pages in a memslot with logging enabled
1296 * should not be mapped with huge pages (it introduces churn
1297 * and performance degradation), so force a pte mapping.
1300 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1303 * Only actually map the page as writable if this was a write
1310 spin_lock(&kvm->mmu_lock);
1311 if (mmu_notifier_retry(kvm, mmu_seq))
1314 if (!hugetlb && !force_pte)
1315 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
1317 fault_ipa_uncached = memslot->flags & KVM_MEMSLOT_INCOHERENT;
1320 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
1321 new_pmd = pmd_mkhuge(new_pmd);
1323 kvm_set_s2pmd_writable(&new_pmd);
1324 kvm_set_pfn_dirty(pfn);
1326 coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
1327 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1329 pte_t new_pte = pfn_pte(pfn, mem_type);
1332 kvm_set_s2pte_writable(&new_pte);
1333 kvm_set_pfn_dirty(pfn);
1334 mark_page_dirty(kvm, gfn);
1336 coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
1337 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
1341 spin_unlock(&kvm->mmu_lock);
1342 kvm_set_pfn_accessed(pfn);
1343 kvm_release_pfn_clean(pfn);
1348 * Resolve the access fault by making the page young again.
1349 * Note that because the faulting entry is guaranteed not to be
1350 * cached in the TLB, we don't need to invalidate anything.
1352 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1357 bool pfn_valid = false;
1359 trace_kvm_access_fault(fault_ipa);
1361 spin_lock(&vcpu->kvm->mmu_lock);
1363 pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
1364 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1367 if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
1368 *pmd = pmd_mkyoung(*pmd);
1369 pfn = pmd_pfn(*pmd);
1374 pte = pte_offset_kernel(pmd, fault_ipa);
1375 if (pte_none(*pte)) /* Nothing there either */
1378 *pte = pte_mkyoung(*pte); /* Just a page... */
1379 pfn = pte_pfn(*pte);
1382 spin_unlock(&vcpu->kvm->mmu_lock);
1384 kvm_set_pfn_accessed(pfn);
1388 * kvm_handle_guest_abort - handles all 2nd stage aborts
1389 * @vcpu: the VCPU pointer
1390 * @run: the kvm_run structure
1392 * Any abort that gets to the host is almost guaranteed to be caused by a
1393 * missing second stage translation table entry, which can mean that either the
1394 * guest simply needs more memory and we must allocate an appropriate page or it
1395 * can mean that the guest tried to access I/O memory, which is emulated by user
1396 * space. The distinction is based on the IPA causing the fault and whether this
1397 * memory region has been registered as standard RAM by user space.
1399 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1401 unsigned long fault_status;
1402 phys_addr_t fault_ipa;
1403 struct kvm_memory_slot *memslot;
1405 bool is_iabt, write_fault, writable;
1409 is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1410 fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1412 trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1413 kvm_vcpu_get_hfar(vcpu), fault_ipa);
1415 /* Check the stage-2 fault is trans. fault or write fault */
1416 fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1417 if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1418 fault_status != FSC_ACCESS) {
1419 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1420 kvm_vcpu_trap_get_class(vcpu),
1421 (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1422 (unsigned long)kvm_vcpu_get_hsr(vcpu));
1426 idx = srcu_read_lock(&vcpu->kvm->srcu);
1428 gfn = fault_ipa >> PAGE_SHIFT;
1429 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1430 hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1431 write_fault = kvm_is_write_fault(vcpu);
1432 if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1434 /* Prefetch Abort on I/O address */
1435 kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1441 * The IPA is reported as [MAX:12], so we need to
1442 * complement it with the bottom 12 bits from the
1443 * faulting VA. This is always 12 bits, irrespective
1446 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1447 ret = io_mem_abort(vcpu, run, fault_ipa);
1451 /* Userspace should not be able to register out-of-bounds IPAs */
1452 VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1454 if (fault_status == FSC_ACCESS) {
1455 handle_access_fault(vcpu, fault_ipa);
1460 ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1464 srcu_read_unlock(&vcpu->kvm->srcu, idx);
1468 static int handle_hva_to_gpa(struct kvm *kvm,
1469 unsigned long start,
1471 int (*handler)(struct kvm *kvm,
1472 gpa_t gpa, void *data),
1475 struct kvm_memslots *slots;
1476 struct kvm_memory_slot *memslot;
1479 slots = kvm_memslots(kvm);
1481 /* we only care about the pages that the guest sees */
1482 kvm_for_each_memslot(memslot, slots) {
1483 unsigned long hva_start, hva_end;
1486 hva_start = max(start, memslot->userspace_addr);
1487 hva_end = min(end, memslot->userspace_addr +
1488 (memslot->npages << PAGE_SHIFT));
1489 if (hva_start >= hva_end)
1493 * {gfn(page) | page intersects with [hva_start, hva_end)} =
1494 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
1496 gfn = hva_to_gfn_memslot(hva_start, memslot);
1497 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
1499 for (; gfn < gfn_end; ++gfn) {
1500 gpa_t gpa = gfn << PAGE_SHIFT;
1501 ret |= handler(kvm, gpa, data);
1508 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1510 unmap_stage2_range(kvm, gpa, PAGE_SIZE);
1514 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
1516 unsigned long end = hva + PAGE_SIZE;
1521 trace_kvm_unmap_hva(hva);
1522 handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL);
1526 int kvm_unmap_hva_range(struct kvm *kvm,
1527 unsigned long start, unsigned long end)
1532 trace_kvm_unmap_hva_range(start, end);
1533 handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1537 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data)
1539 pte_t *pte = (pte_t *)data;
1542 * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
1543 * flag clear because MMU notifiers will have unmapped a huge PMD before
1544 * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
1545 * therefore stage2_set_pte() never needs to clear out a huge PMD
1546 * through this calling path.
1548 stage2_set_pte(kvm, NULL, gpa, pte, 0);
1553 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1555 unsigned long end = hva + PAGE_SIZE;
1561 trace_kvm_set_spte_hva(hva);
1562 stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2);
1563 handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1566 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1571 pmd = stage2_get_pmd(kvm, NULL, gpa);
1572 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1575 if (kvm_pmd_huge(*pmd)) { /* THP, HugeTLB */
1576 if (pmd_young(*pmd)) {
1577 *pmd = pmd_mkold(*pmd);
1584 pte = pte_offset_kernel(pmd, gpa);
1588 if (pte_young(*pte)) {
1589 *pte = pte_mkold(*pte); /* Just a page... */
1596 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, void *data)
1601 pmd = stage2_get_pmd(kvm, NULL, gpa);
1602 if (!pmd || pmd_none(*pmd)) /* Nothing there */
1605 if (kvm_pmd_huge(*pmd)) /* THP, HugeTLB */
1606 return pmd_young(*pmd);
1608 pte = pte_offset_kernel(pmd, gpa);
1609 if (!pte_none(*pte)) /* Just a page... */
1610 return pte_young(*pte);
1615 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1617 trace_kvm_age_hva(start, end);
1618 return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
1621 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1623 trace_kvm_test_age_hva(hva);
1624 return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
1627 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1629 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1632 phys_addr_t kvm_mmu_get_httbr(void)
1634 if (__kvm_cpu_uses_extended_idmap())
1635 return virt_to_phys(merged_hyp_pgd);
1637 return virt_to_phys(hyp_pgd);
1640 phys_addr_t kvm_mmu_get_boot_httbr(void)
1642 if (__kvm_cpu_uses_extended_idmap())
1643 return virt_to_phys(merged_hyp_pgd);
1645 return virt_to_phys(boot_hyp_pgd);
1648 phys_addr_t kvm_get_idmap_vector(void)
1650 return hyp_idmap_vector;
1653 int kvm_mmu_init(void)
1657 hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1658 hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1659 hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
1662 * We rely on the linker script to ensure at build time that the HYP
1663 * init code does not cross a page boundary.
1665 BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1667 hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1668 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1670 if (!hyp_pgd || !boot_hyp_pgd) {
1671 kvm_err("Hyp mode PGD not allocated\n");
1676 /* Create the idmap in the boot page tables */
1677 err = __create_hyp_mappings(boot_hyp_pgd,
1678 hyp_idmap_start, hyp_idmap_end,
1679 __phys_to_pfn(hyp_idmap_start),
1683 kvm_err("Failed to idmap %lx-%lx\n",
1684 hyp_idmap_start, hyp_idmap_end);
1688 if (__kvm_cpu_uses_extended_idmap()) {
1689 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
1690 if (!merged_hyp_pgd) {
1691 kvm_err("Failed to allocate extra HYP pgd\n");
1694 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
1699 /* Map the very same page at the trampoline VA */
1700 err = __create_hyp_mappings(boot_hyp_pgd,
1701 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1702 __phys_to_pfn(hyp_idmap_start),
1705 kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
1710 /* Map the same page again into the runtime page tables */
1711 err = __create_hyp_mappings(hyp_pgd,
1712 TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
1713 __phys_to_pfn(hyp_idmap_start),
1716 kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
1727 void kvm_arch_commit_memory_region(struct kvm *kvm,
1728 const struct kvm_userspace_memory_region *mem,
1729 const struct kvm_memory_slot *old,
1730 const struct kvm_memory_slot *new,
1731 enum kvm_mr_change change)
1734 * At this point memslot has been committed and there is an
1735 * allocated dirty_bitmap[], dirty pages will be be tracked while the
1736 * memory slot is write protected.
1738 if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
1739 kvm_mmu_wp_memory_region(kvm, mem->slot);
1742 int kvm_arch_prepare_memory_region(struct kvm *kvm,
1743 struct kvm_memory_slot *memslot,
1744 const struct kvm_userspace_memory_region *mem,
1745 enum kvm_mr_change change)
1747 hva_t hva = mem->userspace_addr;
1748 hva_t reg_end = hva + mem->memory_size;
1749 bool writable = !(mem->flags & KVM_MEM_READONLY);
1752 if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
1753 change != KVM_MR_FLAGS_ONLY)
1757 * Prevent userspace from creating a memory region outside of the IPA
1758 * space addressable by the KVM guest IPA space.
1760 if (memslot->base_gfn + memslot->npages >=
1761 (KVM_PHYS_SIZE >> PAGE_SHIFT))
1764 down_read(¤t->mm->mmap_sem);
1766 * A memory region could potentially cover multiple VMAs, and any holes
1767 * between them, so iterate over all of them to find out if we can map
1768 * any of them right now.
1770 * +--------------------------------------------+
1771 * +---------------+----------------+ +----------------+
1772 * | : VMA 1 | VMA 2 | | VMA 3 : |
1773 * +---------------+----------------+ +----------------+
1775 * +--------------------------------------------+
1778 struct vm_area_struct *vma = find_vma(current->mm, hva);
1779 hva_t vm_start, vm_end;
1781 if (!vma || vma->vm_start >= reg_end)
1785 * Mapping a read-only VMA is only allowed if the
1786 * memory region is configured as read-only.
1788 if (writable && !(vma->vm_flags & VM_WRITE)) {
1794 * Take the intersection of this VMA with the memory region
1796 vm_start = max(hva, vma->vm_start);
1797 vm_end = min(reg_end, vma->vm_end);
1799 if (vma->vm_flags & VM_PFNMAP) {
1800 gpa_t gpa = mem->guest_phys_addr +
1801 (vm_start - mem->userspace_addr);
1804 pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
1805 pa += vm_start - vma->vm_start;
1807 /* IO region dirty page logging not allowed */
1808 if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
1813 ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
1820 } while (hva < reg_end);
1822 if (change == KVM_MR_FLAGS_ONLY)
1825 spin_lock(&kvm->mmu_lock);
1827 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
1829 stage2_flush_memslot(kvm, memslot);
1830 spin_unlock(&kvm->mmu_lock);
1832 up_read(¤t->mm->mmap_sem);
1836 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
1837 struct kvm_memory_slot *dont)
1841 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1842 unsigned long npages)
1845 * Readonly memslots are not incoherent with the caches by definition,
1846 * but in practice, they are used mostly to emulate ROMs or NOR flashes
1847 * that the guest may consider devices and hence map as uncached.
1848 * To prevent incoherency issues in these cases, tag all readonly
1849 * regions as incoherent.
1851 if (slot->flags & KVM_MEM_READONLY)
1852 slot->flags |= KVM_MEMSLOT_INCOHERENT;
1856 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
1860 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1862 kvm_free_stage2_pgd(kvm);
1865 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1866 struct kvm_memory_slot *slot)
1868 gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
1869 phys_addr_t size = slot->npages << PAGE_SHIFT;
1871 spin_lock(&kvm->mmu_lock);
1872 unmap_stage2_range(kvm, gpa, size);
1873 spin_unlock(&kvm->mmu_lock);
1877 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
1880 * - S/W ops are local to a CPU (not broadcast)
1881 * - We have line migration behind our back (speculation)
1882 * - System caches don't support S/W at all (damn!)
1884 * In the face of the above, the best we can do is to try and convert
1885 * S/W ops to VA ops. Because the guest is not allowed to infer the
1886 * S/W to PA mapping, it can only use S/W to nuke the whole cache,
1887 * which is a rather good thing for us.
1889 * Also, it is only used when turning caches on/off ("The expected
1890 * usage of the cache maintenance instructions that operate by set/way
1891 * is associated with the cache maintenance instructions associated
1892 * with the powerdown and powerup of caches, if this is required by
1893 * the implementation.").
1895 * We use the following policy:
1897 * - If we trap a S/W operation, we enable VM trapping to detect
1898 * caches being turned on/off, and do a full clean.
1900 * - We flush the caches on both caches being turned on and off.
1902 * - Once the caches are enabled, we stop trapping VM ops.
1904 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
1906 unsigned long hcr = vcpu_get_hcr(vcpu);
1909 * If this is the first time we do a S/W operation
1910 * (i.e. HCR_TVM not set) flush the whole memory, and set the
1913 * Otherwise, rely on the VM trapping to wait for the MMU +
1914 * Caches to be turned off. At that point, we'll be able to
1915 * clean the caches again.
1917 if (!(hcr & HCR_TVM)) {
1918 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
1919 vcpu_has_cache_enabled(vcpu));
1920 stage2_flush_vm(vcpu->kvm);
1921 vcpu_set_hcr(vcpu, hcr | HCR_TVM);
1925 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
1927 bool now_enabled = vcpu_has_cache_enabled(vcpu);
1930 * If switching the MMU+caches on, need to invalidate the caches.
1931 * If switching it off, need to clean the caches.
1932 * Clean + invalidate does the trick always.
1934 if (now_enabled != was_enabled)
1935 stage2_flush_vm(vcpu->kvm);
1937 /* Caches are now on, stop trapping VM ops (until a S/W op) */
1939 vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
1941 trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);