OSDN Git Service

Merge branch 'siginfo-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ebieder...
[uclinux-h8/linux.git] / virt / kvm / arm / mmu.c
1 /*
2  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3  * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
17  */
18
19 #include <linux/mman.h>
20 #include <linux/kvm_host.h>
21 #include <linux/io.h>
22 #include <linux/hugetlb.h>
23 #include <linux/sched/signal.h>
24 #include <trace/events/kvm.h>
25 #include <asm/pgalloc.h>
26 #include <asm/cacheflush.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_mmu.h>
29 #include <asm/kvm_mmio.h>
30 #include <asm/kvm_asm.h>
31 #include <asm/kvm_emulate.h>
32 #include <asm/virt.h>
33 #include <asm/system_misc.h>
34
35 #include "trace.h"
36
37 static pgd_t *boot_hyp_pgd;
38 static pgd_t *hyp_pgd;
39 static pgd_t *merged_hyp_pgd;
40 static DEFINE_MUTEX(kvm_hyp_pgd_mutex);
41
42 static unsigned long hyp_idmap_start;
43 static unsigned long hyp_idmap_end;
44 static phys_addr_t hyp_idmap_vector;
45
46 static unsigned long io_map_base;
47
48 #define S2_PGD_SIZE     (PTRS_PER_S2_PGD * sizeof(pgd_t))
49 #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t))
50
51 #define KVM_S2PTE_FLAG_IS_IOMAP         (1UL << 0)
52 #define KVM_S2_FLAG_LOGGING_ACTIVE      (1UL << 1)
53
54 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
55 {
56         return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
57 }
58
59 /**
60  * kvm_flush_remote_tlbs() - flush all VM TLB entries for v7/8
61  * @kvm:        pointer to kvm structure.
62  *
63  * Interface to HYP function to flush all VM TLB entries
64  */
65 void kvm_flush_remote_tlbs(struct kvm *kvm)
66 {
67         kvm_call_hyp(__kvm_tlb_flush_vmid, kvm);
68 }
69
70 static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
71 {
72         kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
73 }
74
75 /*
76  * D-Cache management functions. They take the page table entries by
77  * value, as they are flushing the cache using the kernel mapping (or
78  * kmap on 32bit).
79  */
80 static void kvm_flush_dcache_pte(pte_t pte)
81 {
82         __kvm_flush_dcache_pte(pte);
83 }
84
85 static void kvm_flush_dcache_pmd(pmd_t pmd)
86 {
87         __kvm_flush_dcache_pmd(pmd);
88 }
89
90 static void kvm_flush_dcache_pud(pud_t pud)
91 {
92         __kvm_flush_dcache_pud(pud);
93 }
94
95 static bool kvm_is_device_pfn(unsigned long pfn)
96 {
97         return !pfn_valid(pfn);
98 }
99
100 /**
101  * stage2_dissolve_pmd() - clear and flush huge PMD entry
102  * @kvm:        pointer to kvm structure.
103  * @addr:       IPA
104  * @pmd:        pmd pointer for IPA
105  *
106  * Function clears a PMD entry, flushes addr 1st and 2nd stage TLBs. Marks all
107  * pages in the range dirty.
108  */
109 static void stage2_dissolve_pmd(struct kvm *kvm, phys_addr_t addr, pmd_t *pmd)
110 {
111         if (!pmd_thp_or_huge(*pmd))
112                 return;
113
114         pmd_clear(pmd);
115         kvm_tlb_flush_vmid_ipa(kvm, addr);
116         put_page(virt_to_page(pmd));
117 }
118
119 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
120                                   int min, int max)
121 {
122         void *page;
123
124         BUG_ON(max > KVM_NR_MEM_OBJS);
125         if (cache->nobjs >= min)
126                 return 0;
127         while (cache->nobjs < max) {
128                 page = (void *)__get_free_page(PGALLOC_GFP);
129                 if (!page)
130                         return -ENOMEM;
131                 cache->objects[cache->nobjs++] = page;
132         }
133         return 0;
134 }
135
136 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
137 {
138         while (mc->nobjs)
139                 free_page((unsigned long)mc->objects[--mc->nobjs]);
140 }
141
142 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
143 {
144         void *p;
145
146         BUG_ON(!mc || !mc->nobjs);
147         p = mc->objects[--mc->nobjs];
148         return p;
149 }
150
151 static void clear_stage2_pgd_entry(struct kvm *kvm, pgd_t *pgd, phys_addr_t addr)
152 {
153         pud_t *pud_table __maybe_unused = stage2_pud_offset(pgd, 0UL);
154         stage2_pgd_clear(pgd);
155         kvm_tlb_flush_vmid_ipa(kvm, addr);
156         stage2_pud_free(pud_table);
157         put_page(virt_to_page(pgd));
158 }
159
160 static void clear_stage2_pud_entry(struct kvm *kvm, pud_t *pud, phys_addr_t addr)
161 {
162         pmd_t *pmd_table __maybe_unused = stage2_pmd_offset(pud, 0);
163         VM_BUG_ON(stage2_pud_huge(*pud));
164         stage2_pud_clear(pud);
165         kvm_tlb_flush_vmid_ipa(kvm, addr);
166         stage2_pmd_free(pmd_table);
167         put_page(virt_to_page(pud));
168 }
169
170 static void clear_stage2_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
171 {
172         pte_t *pte_table = pte_offset_kernel(pmd, 0);
173         VM_BUG_ON(pmd_thp_or_huge(*pmd));
174         pmd_clear(pmd);
175         kvm_tlb_flush_vmid_ipa(kvm, addr);
176         pte_free_kernel(NULL, pte_table);
177         put_page(virt_to_page(pmd));
178 }
179
180 static inline void kvm_set_pte(pte_t *ptep, pte_t new_pte)
181 {
182         WRITE_ONCE(*ptep, new_pte);
183         dsb(ishst);
184 }
185
186 static inline void kvm_set_pmd(pmd_t *pmdp, pmd_t new_pmd)
187 {
188         WRITE_ONCE(*pmdp, new_pmd);
189         dsb(ishst);
190 }
191
192 static inline void kvm_pmd_populate(pmd_t *pmdp, pte_t *ptep)
193 {
194         kvm_set_pmd(pmdp, kvm_mk_pmd(ptep));
195 }
196
197 static inline void kvm_pud_populate(pud_t *pudp, pmd_t *pmdp)
198 {
199         WRITE_ONCE(*pudp, kvm_mk_pud(pmdp));
200         dsb(ishst);
201 }
202
203 static inline void kvm_pgd_populate(pgd_t *pgdp, pud_t *pudp)
204 {
205         WRITE_ONCE(*pgdp, kvm_mk_pgd(pudp));
206         dsb(ishst);
207 }
208
209 /*
210  * Unmapping vs dcache management:
211  *
212  * If a guest maps certain memory pages as uncached, all writes will
213  * bypass the data cache and go directly to RAM.  However, the CPUs
214  * can still speculate reads (not writes) and fill cache lines with
215  * data.
216  *
217  * Those cache lines will be *clean* cache lines though, so a
218  * clean+invalidate operation is equivalent to an invalidate
219  * operation, because no cache lines are marked dirty.
220  *
221  * Those clean cache lines could be filled prior to an uncached write
222  * by the guest, and the cache coherent IO subsystem would therefore
223  * end up writing old data to disk.
224  *
225  * This is why right after unmapping a page/section and invalidating
226  * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
227  * the IO subsystem will never hit in the cache.
228  *
229  * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as
230  * we then fully enforce cacheability of RAM, no matter what the guest
231  * does.
232  */
233 static void unmap_stage2_ptes(struct kvm *kvm, pmd_t *pmd,
234                        phys_addr_t addr, phys_addr_t end)
235 {
236         phys_addr_t start_addr = addr;
237         pte_t *pte, *start_pte;
238
239         start_pte = pte = pte_offset_kernel(pmd, addr);
240         do {
241                 if (!pte_none(*pte)) {
242                         pte_t old_pte = *pte;
243
244                         kvm_set_pte(pte, __pte(0));
245                         kvm_tlb_flush_vmid_ipa(kvm, addr);
246
247                         /* No need to invalidate the cache for device mappings */
248                         if (!kvm_is_device_pfn(pte_pfn(old_pte)))
249                                 kvm_flush_dcache_pte(old_pte);
250
251                         put_page(virt_to_page(pte));
252                 }
253         } while (pte++, addr += PAGE_SIZE, addr != end);
254
255         if (stage2_pte_table_empty(start_pte))
256                 clear_stage2_pmd_entry(kvm, pmd, start_addr);
257 }
258
259 static void unmap_stage2_pmds(struct kvm *kvm, pud_t *pud,
260                        phys_addr_t addr, phys_addr_t end)
261 {
262         phys_addr_t next, start_addr = addr;
263         pmd_t *pmd, *start_pmd;
264
265         start_pmd = pmd = stage2_pmd_offset(pud, addr);
266         do {
267                 next = stage2_pmd_addr_end(addr, end);
268                 if (!pmd_none(*pmd)) {
269                         if (pmd_thp_or_huge(*pmd)) {
270                                 pmd_t old_pmd = *pmd;
271
272                                 pmd_clear(pmd);
273                                 kvm_tlb_flush_vmid_ipa(kvm, addr);
274
275                                 kvm_flush_dcache_pmd(old_pmd);
276
277                                 put_page(virt_to_page(pmd));
278                         } else {
279                                 unmap_stage2_ptes(kvm, pmd, addr, next);
280                         }
281                 }
282         } while (pmd++, addr = next, addr != end);
283
284         if (stage2_pmd_table_empty(start_pmd))
285                 clear_stage2_pud_entry(kvm, pud, start_addr);
286 }
287
288 static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
289                        phys_addr_t addr, phys_addr_t end)
290 {
291         phys_addr_t next, start_addr = addr;
292         pud_t *pud, *start_pud;
293
294         start_pud = pud = stage2_pud_offset(pgd, addr);
295         do {
296                 next = stage2_pud_addr_end(addr, end);
297                 if (!stage2_pud_none(*pud)) {
298                         if (stage2_pud_huge(*pud)) {
299                                 pud_t old_pud = *pud;
300
301                                 stage2_pud_clear(pud);
302                                 kvm_tlb_flush_vmid_ipa(kvm, addr);
303                                 kvm_flush_dcache_pud(old_pud);
304                                 put_page(virt_to_page(pud));
305                         } else {
306                                 unmap_stage2_pmds(kvm, pud, addr, next);
307                         }
308                 }
309         } while (pud++, addr = next, addr != end);
310
311         if (stage2_pud_table_empty(start_pud))
312                 clear_stage2_pgd_entry(kvm, pgd, start_addr);
313 }
314
315 /**
316  * unmap_stage2_range -- Clear stage2 page table entries to unmap a range
317  * @kvm:   The VM pointer
318  * @start: The intermediate physical base address of the range to unmap
319  * @size:  The size of the area to unmap
320  *
321  * Clear a range of stage-2 mappings, lowering the various ref-counts.  Must
322  * be called while holding mmu_lock (unless for freeing the stage2 pgd before
323  * destroying the VM), otherwise another faulting VCPU may come in and mess
324  * with things behind our backs.
325  */
326 static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
327 {
328         pgd_t *pgd;
329         phys_addr_t addr = start, end = start + size;
330         phys_addr_t next;
331
332         assert_spin_locked(&kvm->mmu_lock);
333         WARN_ON(size & ~PAGE_MASK);
334
335         pgd = kvm->arch.pgd + stage2_pgd_index(addr);
336         do {
337                 /*
338                  * Make sure the page table is still active, as another thread
339                  * could have possibly freed the page table, while we released
340                  * the lock.
341                  */
342                 if (!READ_ONCE(kvm->arch.pgd))
343                         break;
344                 next = stage2_pgd_addr_end(addr, end);
345                 if (!stage2_pgd_none(*pgd))
346                         unmap_stage2_puds(kvm, pgd, addr, next);
347                 /*
348                  * If the range is too large, release the kvm->mmu_lock
349                  * to prevent starvation and lockup detector warnings.
350                  */
351                 if (next != end)
352                         cond_resched_lock(&kvm->mmu_lock);
353         } while (pgd++, addr = next, addr != end);
354 }
355
356 static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
357                               phys_addr_t addr, phys_addr_t end)
358 {
359         pte_t *pte;
360
361         pte = pte_offset_kernel(pmd, addr);
362         do {
363                 if (!pte_none(*pte) && !kvm_is_device_pfn(pte_pfn(*pte)))
364                         kvm_flush_dcache_pte(*pte);
365         } while (pte++, addr += PAGE_SIZE, addr != end);
366 }
367
368 static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
369                               phys_addr_t addr, phys_addr_t end)
370 {
371         pmd_t *pmd;
372         phys_addr_t next;
373
374         pmd = stage2_pmd_offset(pud, addr);
375         do {
376                 next = stage2_pmd_addr_end(addr, end);
377                 if (!pmd_none(*pmd)) {
378                         if (pmd_thp_or_huge(*pmd))
379                                 kvm_flush_dcache_pmd(*pmd);
380                         else
381                                 stage2_flush_ptes(kvm, pmd, addr, next);
382                 }
383         } while (pmd++, addr = next, addr != end);
384 }
385
386 static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
387                               phys_addr_t addr, phys_addr_t end)
388 {
389         pud_t *pud;
390         phys_addr_t next;
391
392         pud = stage2_pud_offset(pgd, addr);
393         do {
394                 next = stage2_pud_addr_end(addr, end);
395                 if (!stage2_pud_none(*pud)) {
396                         if (stage2_pud_huge(*pud))
397                                 kvm_flush_dcache_pud(*pud);
398                         else
399                                 stage2_flush_pmds(kvm, pud, addr, next);
400                 }
401         } while (pud++, addr = next, addr != end);
402 }
403
404 static void stage2_flush_memslot(struct kvm *kvm,
405                                  struct kvm_memory_slot *memslot)
406 {
407         phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
408         phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
409         phys_addr_t next;
410         pgd_t *pgd;
411
412         pgd = kvm->arch.pgd + stage2_pgd_index(addr);
413         do {
414                 next = stage2_pgd_addr_end(addr, end);
415                 stage2_flush_puds(kvm, pgd, addr, next);
416         } while (pgd++, addr = next, addr != end);
417 }
418
419 /**
420  * stage2_flush_vm - Invalidate cache for pages mapped in stage 2
421  * @kvm: The struct kvm pointer
422  *
423  * Go through the stage 2 page tables and invalidate any cache lines
424  * backing memory already mapped to the VM.
425  */
426 static void stage2_flush_vm(struct kvm *kvm)
427 {
428         struct kvm_memslots *slots;
429         struct kvm_memory_slot *memslot;
430         int idx;
431
432         idx = srcu_read_lock(&kvm->srcu);
433         spin_lock(&kvm->mmu_lock);
434
435         slots = kvm_memslots(kvm);
436         kvm_for_each_memslot(memslot, slots)
437                 stage2_flush_memslot(kvm, memslot);
438
439         spin_unlock(&kvm->mmu_lock);
440         srcu_read_unlock(&kvm->srcu, idx);
441 }
442
443 static void clear_hyp_pgd_entry(pgd_t *pgd)
444 {
445         pud_t *pud_table __maybe_unused = pud_offset(pgd, 0UL);
446         pgd_clear(pgd);
447         pud_free(NULL, pud_table);
448         put_page(virt_to_page(pgd));
449 }
450
451 static void clear_hyp_pud_entry(pud_t *pud)
452 {
453         pmd_t *pmd_table __maybe_unused = pmd_offset(pud, 0);
454         VM_BUG_ON(pud_huge(*pud));
455         pud_clear(pud);
456         pmd_free(NULL, pmd_table);
457         put_page(virt_to_page(pud));
458 }
459
460 static void clear_hyp_pmd_entry(pmd_t *pmd)
461 {
462         pte_t *pte_table = pte_offset_kernel(pmd, 0);
463         VM_BUG_ON(pmd_thp_or_huge(*pmd));
464         pmd_clear(pmd);
465         pte_free_kernel(NULL, pte_table);
466         put_page(virt_to_page(pmd));
467 }
468
469 static void unmap_hyp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
470 {
471         pte_t *pte, *start_pte;
472
473         start_pte = pte = pte_offset_kernel(pmd, addr);
474         do {
475                 if (!pte_none(*pte)) {
476                         kvm_set_pte(pte, __pte(0));
477                         put_page(virt_to_page(pte));
478                 }
479         } while (pte++, addr += PAGE_SIZE, addr != end);
480
481         if (hyp_pte_table_empty(start_pte))
482                 clear_hyp_pmd_entry(pmd);
483 }
484
485 static void unmap_hyp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
486 {
487         phys_addr_t next;
488         pmd_t *pmd, *start_pmd;
489
490         start_pmd = pmd = pmd_offset(pud, addr);
491         do {
492                 next = pmd_addr_end(addr, end);
493                 /* Hyp doesn't use huge pmds */
494                 if (!pmd_none(*pmd))
495                         unmap_hyp_ptes(pmd, addr, next);
496         } while (pmd++, addr = next, addr != end);
497
498         if (hyp_pmd_table_empty(start_pmd))
499                 clear_hyp_pud_entry(pud);
500 }
501
502 static void unmap_hyp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
503 {
504         phys_addr_t next;
505         pud_t *pud, *start_pud;
506
507         start_pud = pud = pud_offset(pgd, addr);
508         do {
509                 next = pud_addr_end(addr, end);
510                 /* Hyp doesn't use huge puds */
511                 if (!pud_none(*pud))
512                         unmap_hyp_pmds(pud, addr, next);
513         } while (pud++, addr = next, addr != end);
514
515         if (hyp_pud_table_empty(start_pud))
516                 clear_hyp_pgd_entry(pgd);
517 }
518
519 static unsigned int kvm_pgd_index(unsigned long addr, unsigned int ptrs_per_pgd)
520 {
521         return (addr >> PGDIR_SHIFT) & (ptrs_per_pgd - 1);
522 }
523
524 static void __unmap_hyp_range(pgd_t *pgdp, unsigned long ptrs_per_pgd,
525                               phys_addr_t start, u64 size)
526 {
527         pgd_t *pgd;
528         phys_addr_t addr = start, end = start + size;
529         phys_addr_t next;
530
531         /*
532          * We don't unmap anything from HYP, except at the hyp tear down.
533          * Hence, we don't have to invalidate the TLBs here.
534          */
535         pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
536         do {
537                 next = pgd_addr_end(addr, end);
538                 if (!pgd_none(*pgd))
539                         unmap_hyp_puds(pgd, addr, next);
540         } while (pgd++, addr = next, addr != end);
541 }
542
543 static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
544 {
545         __unmap_hyp_range(pgdp, PTRS_PER_PGD, start, size);
546 }
547
548 static void unmap_hyp_idmap_range(pgd_t *pgdp, phys_addr_t start, u64 size)
549 {
550         __unmap_hyp_range(pgdp, __kvm_idmap_ptrs_per_pgd(), start, size);
551 }
552
553 /**
554  * free_hyp_pgds - free Hyp-mode page tables
555  *
556  * Assumes hyp_pgd is a page table used strictly in Hyp-mode and
557  * therefore contains either mappings in the kernel memory area (above
558  * PAGE_OFFSET), or device mappings in the idmap range.
559  *
560  * boot_hyp_pgd should only map the idmap range, and is only used in
561  * the extended idmap case.
562  */
563 void free_hyp_pgds(void)
564 {
565         pgd_t *id_pgd;
566
567         mutex_lock(&kvm_hyp_pgd_mutex);
568
569         id_pgd = boot_hyp_pgd ? boot_hyp_pgd : hyp_pgd;
570
571         if (id_pgd) {
572                 /* In case we never called hyp_mmu_init() */
573                 if (!io_map_base)
574                         io_map_base = hyp_idmap_start;
575                 unmap_hyp_idmap_range(id_pgd, io_map_base,
576                                       hyp_idmap_start + PAGE_SIZE - io_map_base);
577         }
578
579         if (boot_hyp_pgd) {
580                 free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
581                 boot_hyp_pgd = NULL;
582         }
583
584         if (hyp_pgd) {
585                 unmap_hyp_range(hyp_pgd, kern_hyp_va(PAGE_OFFSET),
586                                 (uintptr_t)high_memory - PAGE_OFFSET);
587
588                 free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
589                 hyp_pgd = NULL;
590         }
591         if (merged_hyp_pgd) {
592                 clear_page(merged_hyp_pgd);
593                 free_page((unsigned long)merged_hyp_pgd);
594                 merged_hyp_pgd = NULL;
595         }
596
597         mutex_unlock(&kvm_hyp_pgd_mutex);
598 }
599
600 static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start,
601                                     unsigned long end, unsigned long pfn,
602                                     pgprot_t prot)
603 {
604         pte_t *pte;
605         unsigned long addr;
606
607         addr = start;
608         do {
609                 pte = pte_offset_kernel(pmd, addr);
610                 kvm_set_pte(pte, pfn_pte(pfn, prot));
611                 get_page(virt_to_page(pte));
612                 pfn++;
613         } while (addr += PAGE_SIZE, addr != end);
614 }
615
616 static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start,
617                                    unsigned long end, unsigned long pfn,
618                                    pgprot_t prot)
619 {
620         pmd_t *pmd;
621         pte_t *pte;
622         unsigned long addr, next;
623
624         addr = start;
625         do {
626                 pmd = pmd_offset(pud, addr);
627
628                 BUG_ON(pmd_sect(*pmd));
629
630                 if (pmd_none(*pmd)) {
631                         pte = pte_alloc_one_kernel(NULL, addr);
632                         if (!pte) {
633                                 kvm_err("Cannot allocate Hyp pte\n");
634                                 return -ENOMEM;
635                         }
636                         kvm_pmd_populate(pmd, pte);
637                         get_page(virt_to_page(pmd));
638                 }
639
640                 next = pmd_addr_end(addr, end);
641
642                 create_hyp_pte_mappings(pmd, addr, next, pfn, prot);
643                 pfn += (next - addr) >> PAGE_SHIFT;
644         } while (addr = next, addr != end);
645
646         return 0;
647 }
648
649 static int create_hyp_pud_mappings(pgd_t *pgd, unsigned long start,
650                                    unsigned long end, unsigned long pfn,
651                                    pgprot_t prot)
652 {
653         pud_t *pud;
654         pmd_t *pmd;
655         unsigned long addr, next;
656         int ret;
657
658         addr = start;
659         do {
660                 pud = pud_offset(pgd, addr);
661
662                 if (pud_none_or_clear_bad(pud)) {
663                         pmd = pmd_alloc_one(NULL, addr);
664                         if (!pmd) {
665                                 kvm_err("Cannot allocate Hyp pmd\n");
666                                 return -ENOMEM;
667                         }
668                         kvm_pud_populate(pud, pmd);
669                         get_page(virt_to_page(pud));
670                 }
671
672                 next = pud_addr_end(addr, end);
673                 ret = create_hyp_pmd_mappings(pud, addr, next, pfn, prot);
674                 if (ret)
675                         return ret;
676                 pfn += (next - addr) >> PAGE_SHIFT;
677         } while (addr = next, addr != end);
678
679         return 0;
680 }
681
682 static int __create_hyp_mappings(pgd_t *pgdp, unsigned long ptrs_per_pgd,
683                                  unsigned long start, unsigned long end,
684                                  unsigned long pfn, pgprot_t prot)
685 {
686         pgd_t *pgd;
687         pud_t *pud;
688         unsigned long addr, next;
689         int err = 0;
690
691         mutex_lock(&kvm_hyp_pgd_mutex);
692         addr = start & PAGE_MASK;
693         end = PAGE_ALIGN(end);
694         do {
695                 pgd = pgdp + kvm_pgd_index(addr, ptrs_per_pgd);
696
697                 if (pgd_none(*pgd)) {
698                         pud = pud_alloc_one(NULL, addr);
699                         if (!pud) {
700                                 kvm_err("Cannot allocate Hyp pud\n");
701                                 err = -ENOMEM;
702                                 goto out;
703                         }
704                         kvm_pgd_populate(pgd, pud);
705                         get_page(virt_to_page(pgd));
706                 }
707
708                 next = pgd_addr_end(addr, end);
709                 err = create_hyp_pud_mappings(pgd, addr, next, pfn, prot);
710                 if (err)
711                         goto out;
712                 pfn += (next - addr) >> PAGE_SHIFT;
713         } while (addr = next, addr != end);
714 out:
715         mutex_unlock(&kvm_hyp_pgd_mutex);
716         return err;
717 }
718
719 static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
720 {
721         if (!is_vmalloc_addr(kaddr)) {
722                 BUG_ON(!virt_addr_valid(kaddr));
723                 return __pa(kaddr);
724         } else {
725                 return page_to_phys(vmalloc_to_page(kaddr)) +
726                        offset_in_page(kaddr);
727         }
728 }
729
730 /**
731  * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
732  * @from:       The virtual kernel start address of the range
733  * @to:         The virtual kernel end address of the range (exclusive)
734  * @prot:       The protection to be applied to this range
735  *
736  * The same virtual address as the kernel virtual address is also used
737  * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
738  * physical pages.
739  */
740 int create_hyp_mappings(void *from, void *to, pgprot_t prot)
741 {
742         phys_addr_t phys_addr;
743         unsigned long virt_addr;
744         unsigned long start = kern_hyp_va((unsigned long)from);
745         unsigned long end = kern_hyp_va((unsigned long)to);
746
747         if (is_kernel_in_hyp_mode())
748                 return 0;
749
750         start = start & PAGE_MASK;
751         end = PAGE_ALIGN(end);
752
753         for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) {
754                 int err;
755
756                 phys_addr = kvm_kaddr_to_phys(from + virt_addr - start);
757                 err = __create_hyp_mappings(hyp_pgd, PTRS_PER_PGD,
758                                             virt_addr, virt_addr + PAGE_SIZE,
759                                             __phys_to_pfn(phys_addr),
760                                             prot);
761                 if (err)
762                         return err;
763         }
764
765         return 0;
766 }
767
768 static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size,
769                                         unsigned long *haddr, pgprot_t prot)
770 {
771         pgd_t *pgd = hyp_pgd;
772         unsigned long base;
773         int ret = 0;
774
775         mutex_lock(&kvm_hyp_pgd_mutex);
776
777         /*
778          * This assumes that we we have enough space below the idmap
779          * page to allocate our VAs. If not, the check below will
780          * kick. A potential alternative would be to detect that
781          * overflow and switch to an allocation above the idmap.
782          *
783          * The allocated size is always a multiple of PAGE_SIZE.
784          */
785         size = PAGE_ALIGN(size + offset_in_page(phys_addr));
786         base = io_map_base - size;
787
788         /*
789          * Verify that BIT(VA_BITS - 1) hasn't been flipped by
790          * allocating the new area, as it would indicate we've
791          * overflowed the idmap/IO address range.
792          */
793         if ((base ^ io_map_base) & BIT(VA_BITS - 1))
794                 ret = -ENOMEM;
795         else
796                 io_map_base = base;
797
798         mutex_unlock(&kvm_hyp_pgd_mutex);
799
800         if (ret)
801                 goto out;
802
803         if (__kvm_cpu_uses_extended_idmap())
804                 pgd = boot_hyp_pgd;
805
806         ret = __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
807                                     base, base + size,
808                                     __phys_to_pfn(phys_addr), prot);
809         if (ret)
810                 goto out;
811
812         *haddr = base + offset_in_page(phys_addr);
813
814 out:
815         return ret;
816 }
817
818 /**
819  * create_hyp_io_mappings - Map IO into both kernel and HYP
820  * @phys_addr:  The physical start address which gets mapped
821  * @size:       Size of the region being mapped
822  * @kaddr:      Kernel VA for this mapping
823  * @haddr:      HYP VA for this mapping
824  */
825 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
826                            void __iomem **kaddr,
827                            void __iomem **haddr)
828 {
829         unsigned long addr;
830         int ret;
831
832         *kaddr = ioremap(phys_addr, size);
833         if (!*kaddr)
834                 return -ENOMEM;
835
836         if (is_kernel_in_hyp_mode()) {
837                 *haddr = *kaddr;
838                 return 0;
839         }
840
841         ret = __create_hyp_private_mapping(phys_addr, size,
842                                            &addr, PAGE_HYP_DEVICE);
843         if (ret) {
844                 iounmap(*kaddr);
845                 *kaddr = NULL;
846                 *haddr = NULL;
847                 return ret;
848         }
849
850         *haddr = (void __iomem *)addr;
851         return 0;
852 }
853
854 /**
855  * create_hyp_exec_mappings - Map an executable range into HYP
856  * @phys_addr:  The physical start address which gets mapped
857  * @size:       Size of the region being mapped
858  * @haddr:      HYP VA for this mapping
859  */
860 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
861                              void **haddr)
862 {
863         unsigned long addr;
864         int ret;
865
866         BUG_ON(is_kernel_in_hyp_mode());
867
868         ret = __create_hyp_private_mapping(phys_addr, size,
869                                            &addr, PAGE_HYP_EXEC);
870         if (ret) {
871                 *haddr = NULL;
872                 return ret;
873         }
874
875         *haddr = (void *)addr;
876         return 0;
877 }
878
879 /**
880  * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation.
881  * @kvm:        The KVM struct pointer for the VM.
882  *
883  * Allocates only the stage-2 HW PGD level table(s) (can support either full
884  * 40-bit input addresses or limited to 32-bit input addresses). Clears the
885  * allocated pages.
886  *
887  * Note we don't need locking here as this is only called when the VM is
888  * created, which can only be done once.
889  */
890 int kvm_alloc_stage2_pgd(struct kvm *kvm)
891 {
892         pgd_t *pgd;
893
894         if (kvm->arch.pgd != NULL) {
895                 kvm_err("kvm_arch already initialized?\n");
896                 return -EINVAL;
897         }
898
899         /* Allocate the HW PGD, making sure that each page gets its own refcount */
900         pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO);
901         if (!pgd)
902                 return -ENOMEM;
903
904         kvm->arch.pgd = pgd;
905         return 0;
906 }
907
908 static void stage2_unmap_memslot(struct kvm *kvm,
909                                  struct kvm_memory_slot *memslot)
910 {
911         hva_t hva = memslot->userspace_addr;
912         phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
913         phys_addr_t size = PAGE_SIZE * memslot->npages;
914         hva_t reg_end = hva + size;
915
916         /*
917          * A memory region could potentially cover multiple VMAs, and any holes
918          * between them, so iterate over all of them to find out if we should
919          * unmap any of them.
920          *
921          *     +--------------------------------------------+
922          * +---------------+----------------+   +----------------+
923          * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
924          * +---------------+----------------+   +----------------+
925          *     |               memory region                |
926          *     +--------------------------------------------+
927          */
928         do {
929                 struct vm_area_struct *vma = find_vma(current->mm, hva);
930                 hva_t vm_start, vm_end;
931
932                 if (!vma || vma->vm_start >= reg_end)
933                         break;
934
935                 /*
936                  * Take the intersection of this VMA with the memory region
937                  */
938                 vm_start = max(hva, vma->vm_start);
939                 vm_end = min(reg_end, vma->vm_end);
940
941                 if (!(vma->vm_flags & VM_PFNMAP)) {
942                         gpa_t gpa = addr + (vm_start - memslot->userspace_addr);
943                         unmap_stage2_range(kvm, gpa, vm_end - vm_start);
944                 }
945                 hva = vm_end;
946         } while (hva < reg_end);
947 }
948
949 /**
950  * stage2_unmap_vm - Unmap Stage-2 RAM mappings
951  * @kvm: The struct kvm pointer
952  *
953  * Go through the memregions and unmap any reguler RAM
954  * backing memory already mapped to the VM.
955  */
956 void stage2_unmap_vm(struct kvm *kvm)
957 {
958         struct kvm_memslots *slots;
959         struct kvm_memory_slot *memslot;
960         int idx;
961
962         idx = srcu_read_lock(&kvm->srcu);
963         down_read(&current->mm->mmap_sem);
964         spin_lock(&kvm->mmu_lock);
965
966         slots = kvm_memslots(kvm);
967         kvm_for_each_memslot(memslot, slots)
968                 stage2_unmap_memslot(kvm, memslot);
969
970         spin_unlock(&kvm->mmu_lock);
971         up_read(&current->mm->mmap_sem);
972         srcu_read_unlock(&kvm->srcu, idx);
973 }
974
975 /**
976  * kvm_free_stage2_pgd - free all stage-2 tables
977  * @kvm:        The KVM struct pointer for the VM.
978  *
979  * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all
980  * underlying level-2 and level-3 tables before freeing the actual level-1 table
981  * and setting the struct pointer to NULL.
982  */
983 void kvm_free_stage2_pgd(struct kvm *kvm)
984 {
985         void *pgd = NULL;
986
987         spin_lock(&kvm->mmu_lock);
988         if (kvm->arch.pgd) {
989                 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
990                 pgd = READ_ONCE(kvm->arch.pgd);
991                 kvm->arch.pgd = NULL;
992         }
993         spin_unlock(&kvm->mmu_lock);
994
995         /* Free the HW pgd, one page at a time */
996         if (pgd)
997                 free_pages_exact(pgd, S2_PGD_SIZE);
998 }
999
1000 static pud_t *stage2_get_pud(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1001                              phys_addr_t addr)
1002 {
1003         pgd_t *pgd;
1004         pud_t *pud;
1005
1006         pgd = kvm->arch.pgd + stage2_pgd_index(addr);
1007         if (WARN_ON(stage2_pgd_none(*pgd))) {
1008                 if (!cache)
1009                         return NULL;
1010                 pud = mmu_memory_cache_alloc(cache);
1011                 stage2_pgd_populate(pgd, pud);
1012                 get_page(virt_to_page(pgd));
1013         }
1014
1015         return stage2_pud_offset(pgd, addr);
1016 }
1017
1018 static pmd_t *stage2_get_pmd(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1019                              phys_addr_t addr)
1020 {
1021         pud_t *pud;
1022         pmd_t *pmd;
1023
1024         pud = stage2_get_pud(kvm, cache, addr);
1025         if (!pud)
1026                 return NULL;
1027
1028         if (stage2_pud_none(*pud)) {
1029                 if (!cache)
1030                         return NULL;
1031                 pmd = mmu_memory_cache_alloc(cache);
1032                 stage2_pud_populate(pud, pmd);
1033                 get_page(virt_to_page(pud));
1034         }
1035
1036         return stage2_pmd_offset(pud, addr);
1037 }
1038
1039 static int stage2_set_pmd_huge(struct kvm *kvm, struct kvm_mmu_memory_cache
1040                                *cache, phys_addr_t addr, const pmd_t *new_pmd)
1041 {
1042         pmd_t *pmd, old_pmd;
1043
1044         pmd = stage2_get_pmd(kvm, cache, addr);
1045         VM_BUG_ON(!pmd);
1046
1047         old_pmd = *pmd;
1048         if (pmd_present(old_pmd)) {
1049                 /*
1050                  * Multiple vcpus faulting on the same PMD entry, can
1051                  * lead to them sequentially updating the PMD with the
1052                  * same value. Following the break-before-make
1053                  * (pmd_clear() followed by tlb_flush()) process can
1054                  * hinder forward progress due to refaults generated
1055                  * on missing translations.
1056                  *
1057                  * Skip updating the page table if the entry is
1058                  * unchanged.
1059                  */
1060                 if (pmd_val(old_pmd) == pmd_val(*new_pmd))
1061                         return 0;
1062
1063                 /*
1064                  * Mapping in huge pages should only happen through a
1065                  * fault.  If a page is merged into a transparent huge
1066                  * page, the individual subpages of that huge page
1067                  * should be unmapped through MMU notifiers before we
1068                  * get here.
1069                  *
1070                  * Merging of CompoundPages is not supported; they
1071                  * should become splitting first, unmapped, merged,
1072                  * and mapped back in on-demand.
1073                  */
1074                 VM_BUG_ON(pmd_pfn(old_pmd) != pmd_pfn(*new_pmd));
1075
1076                 pmd_clear(pmd);
1077                 kvm_tlb_flush_vmid_ipa(kvm, addr);
1078         } else {
1079                 get_page(virt_to_page(pmd));
1080         }
1081
1082         kvm_set_pmd(pmd, *new_pmd);
1083         return 0;
1084 }
1085
1086 static bool stage2_is_exec(struct kvm *kvm, phys_addr_t addr)
1087 {
1088         pmd_t *pmdp;
1089         pte_t *ptep;
1090
1091         pmdp = stage2_get_pmd(kvm, NULL, addr);
1092         if (!pmdp || pmd_none(*pmdp) || !pmd_present(*pmdp))
1093                 return false;
1094
1095         if (pmd_thp_or_huge(*pmdp))
1096                 return kvm_s2pmd_exec(pmdp);
1097
1098         ptep = pte_offset_kernel(pmdp, addr);
1099         if (!ptep || pte_none(*ptep) || !pte_present(*ptep))
1100                 return false;
1101
1102         return kvm_s2pte_exec(ptep);
1103 }
1104
1105 static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache,
1106                           phys_addr_t addr, const pte_t *new_pte,
1107                           unsigned long flags)
1108 {
1109         pmd_t *pmd;
1110         pte_t *pte, old_pte;
1111         bool iomap = flags & KVM_S2PTE_FLAG_IS_IOMAP;
1112         bool logging_active = flags & KVM_S2_FLAG_LOGGING_ACTIVE;
1113
1114         VM_BUG_ON(logging_active && !cache);
1115
1116         /* Create stage-2 page table mapping - Levels 0 and 1 */
1117         pmd = stage2_get_pmd(kvm, cache, addr);
1118         if (!pmd) {
1119                 /*
1120                  * Ignore calls from kvm_set_spte_hva for unallocated
1121                  * address ranges.
1122                  */
1123                 return 0;
1124         }
1125
1126         /*
1127          * While dirty page logging - dissolve huge PMD, then continue on to
1128          * allocate page.
1129          */
1130         if (logging_active)
1131                 stage2_dissolve_pmd(kvm, addr, pmd);
1132
1133         /* Create stage-2 page mappings - Level 2 */
1134         if (pmd_none(*pmd)) {
1135                 if (!cache)
1136                         return 0; /* ignore calls from kvm_set_spte_hva */
1137                 pte = mmu_memory_cache_alloc(cache);
1138                 kvm_pmd_populate(pmd, pte);
1139                 get_page(virt_to_page(pmd));
1140         }
1141
1142         pte = pte_offset_kernel(pmd, addr);
1143
1144         if (iomap && pte_present(*pte))
1145                 return -EFAULT;
1146
1147         /* Create 2nd stage page table mapping - Level 3 */
1148         old_pte = *pte;
1149         if (pte_present(old_pte)) {
1150                 /* Skip page table update if there is no change */
1151                 if (pte_val(old_pte) == pte_val(*new_pte))
1152                         return 0;
1153
1154                 kvm_set_pte(pte, __pte(0));
1155                 kvm_tlb_flush_vmid_ipa(kvm, addr);
1156         } else {
1157                 get_page(virt_to_page(pte));
1158         }
1159
1160         kvm_set_pte(pte, *new_pte);
1161         return 0;
1162 }
1163
1164 #ifndef __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1165 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1166 {
1167         if (pte_young(*pte)) {
1168                 *pte = pte_mkold(*pte);
1169                 return 1;
1170         }
1171         return 0;
1172 }
1173 #else
1174 static int stage2_ptep_test_and_clear_young(pte_t *pte)
1175 {
1176         return __ptep_test_and_clear_young(pte);
1177 }
1178 #endif
1179
1180 static int stage2_pmdp_test_and_clear_young(pmd_t *pmd)
1181 {
1182         return stage2_ptep_test_and_clear_young((pte_t *)pmd);
1183 }
1184
1185 /**
1186  * kvm_phys_addr_ioremap - map a device range to guest IPA
1187  *
1188  * @kvm:        The KVM pointer
1189  * @guest_ipa:  The IPA at which to insert the mapping
1190  * @pa:         The physical address of the device
1191  * @size:       The size of the mapping
1192  */
1193 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
1194                           phys_addr_t pa, unsigned long size, bool writable)
1195 {
1196         phys_addr_t addr, end;
1197         int ret = 0;
1198         unsigned long pfn;
1199         struct kvm_mmu_memory_cache cache = { 0, };
1200
1201         end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK;
1202         pfn = __phys_to_pfn(pa);
1203
1204         for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) {
1205                 pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE);
1206
1207                 if (writable)
1208                         pte = kvm_s2pte_mkwrite(pte);
1209
1210                 ret = mmu_topup_memory_cache(&cache, KVM_MMU_CACHE_MIN_PAGES,
1211                                                 KVM_NR_MEM_OBJS);
1212                 if (ret)
1213                         goto out;
1214                 spin_lock(&kvm->mmu_lock);
1215                 ret = stage2_set_pte(kvm, &cache, addr, &pte,
1216                                                 KVM_S2PTE_FLAG_IS_IOMAP);
1217                 spin_unlock(&kvm->mmu_lock);
1218                 if (ret)
1219                         goto out;
1220
1221                 pfn++;
1222         }
1223
1224 out:
1225         mmu_free_memory_cache(&cache);
1226         return ret;
1227 }
1228
1229 static bool transparent_hugepage_adjust(kvm_pfn_t *pfnp, phys_addr_t *ipap)
1230 {
1231         kvm_pfn_t pfn = *pfnp;
1232         gfn_t gfn = *ipap >> PAGE_SHIFT;
1233
1234         if (PageTransCompoundMap(pfn_to_page(pfn))) {
1235                 unsigned long mask;
1236                 /*
1237                  * The address we faulted on is backed by a transparent huge
1238                  * page.  However, because we map the compound huge page and
1239                  * not the individual tail page, we need to transfer the
1240                  * refcount to the head page.  We have to be careful that the
1241                  * THP doesn't start to split while we are adjusting the
1242                  * refcounts.
1243                  *
1244                  * We are sure this doesn't happen, because mmu_notifier_retry
1245                  * was successful and we are holding the mmu_lock, so if this
1246                  * THP is trying to split, it will be blocked in the mmu
1247                  * notifier before touching any of the pages, specifically
1248                  * before being able to call __split_huge_page_refcount().
1249                  *
1250                  * We can therefore safely transfer the refcount from PG_tail
1251                  * to PG_head and switch the pfn from a tail page to the head
1252                  * page accordingly.
1253                  */
1254                 mask = PTRS_PER_PMD - 1;
1255                 VM_BUG_ON((gfn & mask) != (pfn & mask));
1256                 if (pfn & mask) {
1257                         *ipap &= PMD_MASK;
1258                         kvm_release_pfn_clean(pfn);
1259                         pfn &= ~mask;
1260                         kvm_get_pfn(pfn);
1261                         *pfnp = pfn;
1262                 }
1263
1264                 return true;
1265         }
1266
1267         return false;
1268 }
1269
1270 static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
1271 {
1272         if (kvm_vcpu_trap_is_iabt(vcpu))
1273                 return false;
1274
1275         return kvm_vcpu_dabt_iswrite(vcpu);
1276 }
1277
1278 /**
1279  * stage2_wp_ptes - write protect PMD range
1280  * @pmd:        pointer to pmd entry
1281  * @addr:       range start address
1282  * @end:        range end address
1283  */
1284 static void stage2_wp_ptes(pmd_t *pmd, phys_addr_t addr, phys_addr_t end)
1285 {
1286         pte_t *pte;
1287
1288         pte = pte_offset_kernel(pmd, addr);
1289         do {
1290                 if (!pte_none(*pte)) {
1291                         if (!kvm_s2pte_readonly(pte))
1292                                 kvm_set_s2pte_readonly(pte);
1293                 }
1294         } while (pte++, addr += PAGE_SIZE, addr != end);
1295 }
1296
1297 /**
1298  * stage2_wp_pmds - write protect PUD range
1299  * @pud:        pointer to pud entry
1300  * @addr:       range start address
1301  * @end:        range end address
1302  */
1303 static void stage2_wp_pmds(pud_t *pud, phys_addr_t addr, phys_addr_t end)
1304 {
1305         pmd_t *pmd;
1306         phys_addr_t next;
1307
1308         pmd = stage2_pmd_offset(pud, addr);
1309
1310         do {
1311                 next = stage2_pmd_addr_end(addr, end);
1312                 if (!pmd_none(*pmd)) {
1313                         if (pmd_thp_or_huge(*pmd)) {
1314                                 if (!kvm_s2pmd_readonly(pmd))
1315                                         kvm_set_s2pmd_readonly(pmd);
1316                         } else {
1317                                 stage2_wp_ptes(pmd, addr, next);
1318                         }
1319                 }
1320         } while (pmd++, addr = next, addr != end);
1321 }
1322
1323 /**
1324   * stage2_wp_puds - write protect PGD range
1325   * @pgd:       pointer to pgd entry
1326   * @addr:      range start address
1327   * @end:       range end address
1328   *
1329   * Process PUD entries, for a huge PUD we cause a panic.
1330   */
1331 static void  stage2_wp_puds(pgd_t *pgd, phys_addr_t addr, phys_addr_t end)
1332 {
1333         pud_t *pud;
1334         phys_addr_t next;
1335
1336         pud = stage2_pud_offset(pgd, addr);
1337         do {
1338                 next = stage2_pud_addr_end(addr, end);
1339                 if (!stage2_pud_none(*pud)) {
1340                         /* TODO:PUD not supported, revisit later if supported */
1341                         BUG_ON(stage2_pud_huge(*pud));
1342                         stage2_wp_pmds(pud, addr, next);
1343                 }
1344         } while (pud++, addr = next, addr != end);
1345 }
1346
1347 /**
1348  * stage2_wp_range() - write protect stage2 memory region range
1349  * @kvm:        The KVM pointer
1350  * @addr:       Start address of range
1351  * @end:        End address of range
1352  */
1353 static void stage2_wp_range(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
1354 {
1355         pgd_t *pgd;
1356         phys_addr_t next;
1357
1358         pgd = kvm->arch.pgd + stage2_pgd_index(addr);
1359         do {
1360                 /*
1361                  * Release kvm_mmu_lock periodically if the memory region is
1362                  * large. Otherwise, we may see kernel panics with
1363                  * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
1364                  * CONFIG_LOCKDEP. Additionally, holding the lock too long
1365                  * will also starve other vCPUs. We have to also make sure
1366                  * that the page tables are not freed while we released
1367                  * the lock.
1368                  */
1369                 cond_resched_lock(&kvm->mmu_lock);
1370                 if (!READ_ONCE(kvm->arch.pgd))
1371                         break;
1372                 next = stage2_pgd_addr_end(addr, end);
1373                 if (stage2_pgd_present(*pgd))
1374                         stage2_wp_puds(pgd, addr, next);
1375         } while (pgd++, addr = next, addr != end);
1376 }
1377
1378 /**
1379  * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot
1380  * @kvm:        The KVM pointer
1381  * @slot:       The memory slot to write protect
1382  *
1383  * Called to start logging dirty pages after memory region
1384  * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns
1385  * all present PMD and PTEs are write protected in the memory region.
1386  * Afterwards read of dirty page log can be called.
1387  *
1388  * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired,
1389  * serializing operations for VM memory regions.
1390  */
1391 void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot)
1392 {
1393         struct kvm_memslots *slots = kvm_memslots(kvm);
1394         struct kvm_memory_slot *memslot = id_to_memslot(slots, slot);
1395         phys_addr_t start = memslot->base_gfn << PAGE_SHIFT;
1396         phys_addr_t end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT;
1397
1398         spin_lock(&kvm->mmu_lock);
1399         stage2_wp_range(kvm, start, end);
1400         spin_unlock(&kvm->mmu_lock);
1401         kvm_flush_remote_tlbs(kvm);
1402 }
1403
1404 /**
1405  * kvm_mmu_write_protect_pt_masked() - write protect dirty pages
1406  * @kvm:        The KVM pointer
1407  * @slot:       The memory slot associated with mask
1408  * @gfn_offset: The gfn offset in memory slot
1409  * @mask:       The mask of dirty pages at offset 'gfn_offset' in this memory
1410  *              slot to be write protected
1411  *
1412  * Walks bits set in mask write protects the associated pte's. Caller must
1413  * acquire kvm_mmu_lock.
1414  */
1415 static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1416                 struct kvm_memory_slot *slot,
1417                 gfn_t gfn_offset, unsigned long mask)
1418 {
1419         phys_addr_t base_gfn = slot->base_gfn + gfn_offset;
1420         phys_addr_t start = (base_gfn +  __ffs(mask)) << PAGE_SHIFT;
1421         phys_addr_t end = (base_gfn + __fls(mask) + 1) << PAGE_SHIFT;
1422
1423         stage2_wp_range(kvm, start, end);
1424 }
1425
1426 /*
1427  * kvm_arch_mmu_enable_log_dirty_pt_masked - enable dirty logging for selected
1428  * dirty pages.
1429  *
1430  * It calls kvm_mmu_write_protect_pt_masked to write protect selected pages to
1431  * enable dirty logging for them.
1432  */
1433 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
1434                 struct kvm_memory_slot *slot,
1435                 gfn_t gfn_offset, unsigned long mask)
1436 {
1437         kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
1438 }
1439
1440 static void clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
1441 {
1442         __clean_dcache_guest_page(pfn, size);
1443 }
1444
1445 static void invalidate_icache_guest_page(kvm_pfn_t pfn, unsigned long size)
1446 {
1447         __invalidate_icache_guest_page(pfn, size);
1448 }
1449
1450 static void kvm_send_hwpoison_signal(unsigned long address,
1451                                      struct vm_area_struct *vma)
1452 {
1453         short lsb;
1454
1455         if (is_vm_hugetlb_page(vma))
1456                 lsb = huge_page_shift(hstate_vma(vma));
1457         else
1458                 lsb = PAGE_SHIFT;
1459
1460         send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current);
1461 }
1462
1463 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
1464                           struct kvm_memory_slot *memslot, unsigned long hva,
1465                           unsigned long fault_status)
1466 {
1467         int ret;
1468         bool write_fault, exec_fault, writable, hugetlb = false, force_pte = false;
1469         unsigned long mmu_seq;
1470         gfn_t gfn = fault_ipa >> PAGE_SHIFT;
1471         struct kvm *kvm = vcpu->kvm;
1472         struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
1473         struct vm_area_struct *vma;
1474         kvm_pfn_t pfn;
1475         pgprot_t mem_type = PAGE_S2;
1476         bool logging_active = memslot_is_logging(memslot);
1477         unsigned long flags = 0;
1478
1479         write_fault = kvm_is_write_fault(vcpu);
1480         exec_fault = kvm_vcpu_trap_is_iabt(vcpu);
1481         VM_BUG_ON(write_fault && exec_fault);
1482
1483         if (fault_status == FSC_PERM && !write_fault && !exec_fault) {
1484                 kvm_err("Unexpected L2 read permission error\n");
1485                 return -EFAULT;
1486         }
1487
1488         /* Let's check if we will get back a huge page backed by hugetlbfs */
1489         down_read(&current->mm->mmap_sem);
1490         vma = find_vma_intersection(current->mm, hva, hva + 1);
1491         if (unlikely(!vma)) {
1492                 kvm_err("Failed to find VMA for hva 0x%lx\n", hva);
1493                 up_read(&current->mm->mmap_sem);
1494                 return -EFAULT;
1495         }
1496
1497         if (vma_kernel_pagesize(vma) == PMD_SIZE && !logging_active) {
1498                 hugetlb = true;
1499                 gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT;
1500         } else {
1501                 /*
1502                  * Pages belonging to memslots that don't have the same
1503                  * alignment for userspace and IPA cannot be mapped using
1504                  * block descriptors even if the pages belong to a THP for
1505                  * the process, because the stage-2 block descriptor will
1506                  * cover more than a single THP and we loose atomicity for
1507                  * unmapping, updates, and splits of the THP or other pages
1508                  * in the stage-2 block range.
1509                  */
1510                 if ((memslot->userspace_addr & ~PMD_MASK) !=
1511                     ((memslot->base_gfn << PAGE_SHIFT) & ~PMD_MASK))
1512                         force_pte = true;
1513         }
1514         up_read(&current->mm->mmap_sem);
1515
1516         /* We need minimum second+third level pages */
1517         ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
1518                                      KVM_NR_MEM_OBJS);
1519         if (ret)
1520                 return ret;
1521
1522         mmu_seq = vcpu->kvm->mmu_notifier_seq;
1523         /*
1524          * Ensure the read of mmu_notifier_seq happens before we call
1525          * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk
1526          * the page we just got a reference to gets unmapped before we have a
1527          * chance to grab the mmu_lock, which ensure that if the page gets
1528          * unmapped afterwards, the call to kvm_unmap_hva will take it away
1529          * from us again properly. This smp_rmb() interacts with the smp_wmb()
1530          * in kvm_mmu_notifier_invalidate_<page|range_end>.
1531          */
1532         smp_rmb();
1533
1534         pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
1535         if (pfn == KVM_PFN_ERR_HWPOISON) {
1536                 kvm_send_hwpoison_signal(hva, vma);
1537                 return 0;
1538         }
1539         if (is_error_noslot_pfn(pfn))
1540                 return -EFAULT;
1541
1542         if (kvm_is_device_pfn(pfn)) {
1543                 mem_type = PAGE_S2_DEVICE;
1544                 flags |= KVM_S2PTE_FLAG_IS_IOMAP;
1545         } else if (logging_active) {
1546                 /*
1547                  * Faults on pages in a memslot with logging enabled
1548                  * should not be mapped with huge pages (it introduces churn
1549                  * and performance degradation), so force a pte mapping.
1550                  */
1551                 force_pte = true;
1552                 flags |= KVM_S2_FLAG_LOGGING_ACTIVE;
1553
1554                 /*
1555                  * Only actually map the page as writable if this was a write
1556                  * fault.
1557                  */
1558                 if (!write_fault)
1559                         writable = false;
1560         }
1561
1562         spin_lock(&kvm->mmu_lock);
1563         if (mmu_notifier_retry(kvm, mmu_seq))
1564                 goto out_unlock;
1565
1566         if (!hugetlb && !force_pte)
1567                 hugetlb = transparent_hugepage_adjust(&pfn, &fault_ipa);
1568
1569         if (hugetlb) {
1570                 pmd_t new_pmd = pfn_pmd(pfn, mem_type);
1571                 new_pmd = pmd_mkhuge(new_pmd);
1572                 if (writable) {
1573                         new_pmd = kvm_s2pmd_mkwrite(new_pmd);
1574                         kvm_set_pfn_dirty(pfn);
1575                 }
1576
1577                 if (fault_status != FSC_PERM)
1578                         clean_dcache_guest_page(pfn, PMD_SIZE);
1579
1580                 if (exec_fault) {
1581                         new_pmd = kvm_s2pmd_mkexec(new_pmd);
1582                         invalidate_icache_guest_page(pfn, PMD_SIZE);
1583                 } else if (fault_status == FSC_PERM) {
1584                         /* Preserve execute if XN was already cleared */
1585                         if (stage2_is_exec(kvm, fault_ipa))
1586                                 new_pmd = kvm_s2pmd_mkexec(new_pmd);
1587                 }
1588
1589                 ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
1590         } else {
1591                 pte_t new_pte = pfn_pte(pfn, mem_type);
1592
1593                 if (writable) {
1594                         new_pte = kvm_s2pte_mkwrite(new_pte);
1595                         kvm_set_pfn_dirty(pfn);
1596                         mark_page_dirty(kvm, gfn);
1597                 }
1598
1599                 if (fault_status != FSC_PERM)
1600                         clean_dcache_guest_page(pfn, PAGE_SIZE);
1601
1602                 if (exec_fault) {
1603                         new_pte = kvm_s2pte_mkexec(new_pte);
1604                         invalidate_icache_guest_page(pfn, PAGE_SIZE);
1605                 } else if (fault_status == FSC_PERM) {
1606                         /* Preserve execute if XN was already cleared */
1607                         if (stage2_is_exec(kvm, fault_ipa))
1608                                 new_pte = kvm_s2pte_mkexec(new_pte);
1609                 }
1610
1611                 ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, flags);
1612         }
1613
1614 out_unlock:
1615         spin_unlock(&kvm->mmu_lock);
1616         kvm_set_pfn_accessed(pfn);
1617         kvm_release_pfn_clean(pfn);
1618         return ret;
1619 }
1620
1621 /*
1622  * Resolve the access fault by making the page young again.
1623  * Note that because the faulting entry is guaranteed not to be
1624  * cached in the TLB, we don't need to invalidate anything.
1625  * Only the HW Access Flag updates are supported for Stage 2 (no DBM),
1626  * so there is no need for atomic (pte|pmd)_mkyoung operations.
1627  */
1628 static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
1629 {
1630         pmd_t *pmd;
1631         pte_t *pte;
1632         kvm_pfn_t pfn;
1633         bool pfn_valid = false;
1634
1635         trace_kvm_access_fault(fault_ipa);
1636
1637         spin_lock(&vcpu->kvm->mmu_lock);
1638
1639         pmd = stage2_get_pmd(vcpu->kvm, NULL, fault_ipa);
1640         if (!pmd || pmd_none(*pmd))     /* Nothing there */
1641                 goto out;
1642
1643         if (pmd_thp_or_huge(*pmd)) {    /* THP, HugeTLB */
1644                 *pmd = pmd_mkyoung(*pmd);
1645                 pfn = pmd_pfn(*pmd);
1646                 pfn_valid = true;
1647                 goto out;
1648         }
1649
1650         pte = pte_offset_kernel(pmd, fault_ipa);
1651         if (pte_none(*pte))             /* Nothing there either */
1652                 goto out;
1653
1654         *pte = pte_mkyoung(*pte);       /* Just a page... */
1655         pfn = pte_pfn(*pte);
1656         pfn_valid = true;
1657 out:
1658         spin_unlock(&vcpu->kvm->mmu_lock);
1659         if (pfn_valid)
1660                 kvm_set_pfn_accessed(pfn);
1661 }
1662
1663 /**
1664  * kvm_handle_guest_abort - handles all 2nd stage aborts
1665  * @vcpu:       the VCPU pointer
1666  * @run:        the kvm_run structure
1667  *
1668  * Any abort that gets to the host is almost guaranteed to be caused by a
1669  * missing second stage translation table entry, which can mean that either the
1670  * guest simply needs more memory and we must allocate an appropriate page or it
1671  * can mean that the guest tried to access I/O memory, which is emulated by user
1672  * space. The distinction is based on the IPA causing the fault and whether this
1673  * memory region has been registered as standard RAM by user space.
1674  */
1675 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
1676 {
1677         unsigned long fault_status;
1678         phys_addr_t fault_ipa;
1679         struct kvm_memory_slot *memslot;
1680         unsigned long hva;
1681         bool is_iabt, write_fault, writable;
1682         gfn_t gfn;
1683         int ret, idx;
1684
1685         fault_status = kvm_vcpu_trap_get_fault_type(vcpu);
1686
1687         fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);
1688         is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
1689
1690         /* Synchronous External Abort? */
1691         if (kvm_vcpu_dabt_isextabt(vcpu)) {
1692                 /*
1693                  * For RAS the host kernel may handle this abort.
1694                  * There is no need to pass the error into the guest.
1695                  */
1696                 if (!handle_guest_sea(fault_ipa, kvm_vcpu_get_hsr(vcpu)))
1697                         return 1;
1698
1699                 if (unlikely(!is_iabt)) {
1700                         kvm_inject_vabt(vcpu);
1701                         return 1;
1702                 }
1703         }
1704
1705         trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
1706                               kvm_vcpu_get_hfar(vcpu), fault_ipa);
1707
1708         /* Check the stage-2 fault is trans. fault or write fault */
1709         if (fault_status != FSC_FAULT && fault_status != FSC_PERM &&
1710             fault_status != FSC_ACCESS) {
1711                 kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n",
1712                         kvm_vcpu_trap_get_class(vcpu),
1713                         (unsigned long)kvm_vcpu_trap_get_fault(vcpu),
1714                         (unsigned long)kvm_vcpu_get_hsr(vcpu));
1715                 return -EFAULT;
1716         }
1717
1718         idx = srcu_read_lock(&vcpu->kvm->srcu);
1719
1720         gfn = fault_ipa >> PAGE_SHIFT;
1721         memslot = gfn_to_memslot(vcpu->kvm, gfn);
1722         hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
1723         write_fault = kvm_is_write_fault(vcpu);
1724         if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
1725                 if (is_iabt) {
1726                         /* Prefetch Abort on I/O address */
1727                         kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
1728                         ret = 1;
1729                         goto out_unlock;
1730                 }
1731
1732                 /*
1733                  * Check for a cache maintenance operation. Since we
1734                  * ended-up here, we know it is outside of any memory
1735                  * slot. But we can't find out if that is for a device,
1736                  * or if the guest is just being stupid. The only thing
1737                  * we know for sure is that this range cannot be cached.
1738                  *
1739                  * So let's assume that the guest is just being
1740                  * cautious, and skip the instruction.
1741                  */
1742                 if (kvm_vcpu_dabt_is_cm(vcpu)) {
1743                         kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
1744                         ret = 1;
1745                         goto out_unlock;
1746                 }
1747
1748                 /*
1749                  * The IPA is reported as [MAX:12], so we need to
1750                  * complement it with the bottom 12 bits from the
1751                  * faulting VA. This is always 12 bits, irrespective
1752                  * of the page size.
1753                  */
1754                 fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
1755                 ret = io_mem_abort(vcpu, run, fault_ipa);
1756                 goto out_unlock;
1757         }
1758
1759         /* Userspace should not be able to register out-of-bounds IPAs */
1760         VM_BUG_ON(fault_ipa >= KVM_PHYS_SIZE);
1761
1762         if (fault_status == FSC_ACCESS) {
1763                 handle_access_fault(vcpu, fault_ipa);
1764                 ret = 1;
1765                 goto out_unlock;
1766         }
1767
1768         ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, fault_status);
1769         if (ret == 0)
1770                 ret = 1;
1771 out_unlock:
1772         srcu_read_unlock(&vcpu->kvm->srcu, idx);
1773         return ret;
1774 }
1775
1776 static int handle_hva_to_gpa(struct kvm *kvm,
1777                              unsigned long start,
1778                              unsigned long end,
1779                              int (*handler)(struct kvm *kvm,
1780                                             gpa_t gpa, u64 size,
1781                                             void *data),
1782                              void *data)
1783 {
1784         struct kvm_memslots *slots;
1785         struct kvm_memory_slot *memslot;
1786         int ret = 0;
1787
1788         slots = kvm_memslots(kvm);
1789
1790         /* we only care about the pages that the guest sees */
1791         kvm_for_each_memslot(memslot, slots) {
1792                 unsigned long hva_start, hva_end;
1793                 gfn_t gpa;
1794
1795                 hva_start = max(start, memslot->userspace_addr);
1796                 hva_end = min(end, memslot->userspace_addr +
1797                                         (memslot->npages << PAGE_SHIFT));
1798                 if (hva_start >= hva_end)
1799                         continue;
1800
1801                 gpa = hva_to_gfn_memslot(hva_start, memslot) << PAGE_SHIFT;
1802                 ret |= handler(kvm, gpa, (u64)(hva_end - hva_start), data);
1803         }
1804
1805         return ret;
1806 }
1807
1808 static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1809 {
1810         unmap_stage2_range(kvm, gpa, size);
1811         return 0;
1812 }
1813
1814 int kvm_unmap_hva_range(struct kvm *kvm,
1815                         unsigned long start, unsigned long end)
1816 {
1817         if (!kvm->arch.pgd)
1818                 return 0;
1819
1820         trace_kvm_unmap_hva_range(start, end);
1821         handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
1822         return 0;
1823 }
1824
1825 static int kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1826 {
1827         pte_t *pte = (pte_t *)data;
1828
1829         WARN_ON(size != PAGE_SIZE);
1830         /*
1831          * We can always call stage2_set_pte with KVM_S2PTE_FLAG_LOGGING_ACTIVE
1832          * flag clear because MMU notifiers will have unmapped a huge PMD before
1833          * calling ->change_pte() (which in turn calls kvm_set_spte_hva()) and
1834          * therefore stage2_set_pte() never needs to clear out a huge PMD
1835          * through this calling path.
1836          */
1837         stage2_set_pte(kvm, NULL, gpa, pte, 0);
1838         return 0;
1839 }
1840
1841
1842 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1843 {
1844         unsigned long end = hva + PAGE_SIZE;
1845         kvm_pfn_t pfn = pte_pfn(pte);
1846         pte_t stage2_pte;
1847
1848         if (!kvm->arch.pgd)
1849                 return;
1850
1851         trace_kvm_set_spte_hva(hva);
1852
1853         /*
1854          * We've moved a page around, probably through CoW, so let's treat it
1855          * just like a translation fault and clean the cache to the PoC.
1856          */
1857         clean_dcache_guest_page(pfn, PAGE_SIZE);
1858         stage2_pte = pfn_pte(pfn, PAGE_S2);
1859         handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte);
1860 }
1861
1862 static int kvm_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1863 {
1864         pmd_t *pmd;
1865         pte_t *pte;
1866
1867         WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
1868         pmd = stage2_get_pmd(kvm, NULL, gpa);
1869         if (!pmd || pmd_none(*pmd))     /* Nothing there */
1870                 return 0;
1871
1872         if (pmd_thp_or_huge(*pmd))      /* THP, HugeTLB */
1873                 return stage2_pmdp_test_and_clear_young(pmd);
1874
1875         pte = pte_offset_kernel(pmd, gpa);
1876         if (pte_none(*pte))
1877                 return 0;
1878
1879         return stage2_ptep_test_and_clear_young(pte);
1880 }
1881
1882 static int kvm_test_age_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
1883 {
1884         pmd_t *pmd;
1885         pte_t *pte;
1886
1887         WARN_ON(size != PAGE_SIZE && size != PMD_SIZE);
1888         pmd = stage2_get_pmd(kvm, NULL, gpa);
1889         if (!pmd || pmd_none(*pmd))     /* Nothing there */
1890                 return 0;
1891
1892         if (pmd_thp_or_huge(*pmd))              /* THP, HugeTLB */
1893                 return pmd_young(*pmd);
1894
1895         pte = pte_offset_kernel(pmd, gpa);
1896         if (!pte_none(*pte))            /* Just a page... */
1897                 return pte_young(*pte);
1898
1899         return 0;
1900 }
1901
1902 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
1903 {
1904         if (!kvm->arch.pgd)
1905                 return 0;
1906         trace_kvm_age_hva(start, end);
1907         return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
1908 }
1909
1910 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
1911 {
1912         if (!kvm->arch.pgd)
1913                 return 0;
1914         trace_kvm_test_age_hva(hva);
1915         return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
1916 }
1917
1918 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
1919 {
1920         mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
1921 }
1922
1923 phys_addr_t kvm_mmu_get_httbr(void)
1924 {
1925         if (__kvm_cpu_uses_extended_idmap())
1926                 return virt_to_phys(merged_hyp_pgd);
1927         else
1928                 return virt_to_phys(hyp_pgd);
1929 }
1930
1931 phys_addr_t kvm_get_idmap_vector(void)
1932 {
1933         return hyp_idmap_vector;
1934 }
1935
1936 static int kvm_map_idmap_text(pgd_t *pgd)
1937 {
1938         int err;
1939
1940         /* Create the idmap in the boot page tables */
1941         err =   __create_hyp_mappings(pgd, __kvm_idmap_ptrs_per_pgd(),
1942                                       hyp_idmap_start, hyp_idmap_end,
1943                                       __phys_to_pfn(hyp_idmap_start),
1944                                       PAGE_HYP_EXEC);
1945         if (err)
1946                 kvm_err("Failed to idmap %lx-%lx\n",
1947                         hyp_idmap_start, hyp_idmap_end);
1948
1949         return err;
1950 }
1951
1952 int kvm_mmu_init(void)
1953 {
1954         int err;
1955
1956         hyp_idmap_start = kvm_virt_to_phys(__hyp_idmap_text_start);
1957         hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE);
1958         hyp_idmap_end = kvm_virt_to_phys(__hyp_idmap_text_end);
1959         hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE);
1960         hyp_idmap_vector = kvm_virt_to_phys(__kvm_hyp_init);
1961
1962         /*
1963          * We rely on the linker script to ensure at build time that the HYP
1964          * init code does not cross a page boundary.
1965          */
1966         BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
1967
1968         kvm_debug("IDMAP page: %lx\n", hyp_idmap_start);
1969         kvm_debug("HYP VA range: %lx:%lx\n",
1970                   kern_hyp_va(PAGE_OFFSET),
1971                   kern_hyp_va((unsigned long)high_memory - 1));
1972
1973         if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
1974             hyp_idmap_start <  kern_hyp_va((unsigned long)high_memory - 1) &&
1975             hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) {
1976                 /*
1977                  * The idmap page is intersecting with the VA space,
1978                  * it is not safe to continue further.
1979                  */
1980                 kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
1981                 err = -EINVAL;
1982                 goto out;
1983         }
1984
1985         hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
1986         if (!hyp_pgd) {
1987                 kvm_err("Hyp mode PGD not allocated\n");
1988                 err = -ENOMEM;
1989                 goto out;
1990         }
1991
1992         if (__kvm_cpu_uses_extended_idmap()) {
1993                 boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1994                                                          hyp_pgd_order);
1995                 if (!boot_hyp_pgd) {
1996                         kvm_err("Hyp boot PGD not allocated\n");
1997                         err = -ENOMEM;
1998                         goto out;
1999                 }
2000
2001                 err = kvm_map_idmap_text(boot_hyp_pgd);
2002                 if (err)
2003                         goto out;
2004
2005                 merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
2006                 if (!merged_hyp_pgd) {
2007                         kvm_err("Failed to allocate extra HYP pgd\n");
2008                         goto out;
2009                 }
2010                 __kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
2011                                     hyp_idmap_start);
2012         } else {
2013                 err = kvm_map_idmap_text(hyp_pgd);
2014                 if (err)
2015                         goto out;
2016         }
2017
2018         io_map_base = hyp_idmap_start;
2019         return 0;
2020 out:
2021         free_hyp_pgds();
2022         return err;
2023 }
2024
2025 void kvm_arch_commit_memory_region(struct kvm *kvm,
2026                                    const struct kvm_userspace_memory_region *mem,
2027                                    const struct kvm_memory_slot *old,
2028                                    const struct kvm_memory_slot *new,
2029                                    enum kvm_mr_change change)
2030 {
2031         /*
2032          * At this point memslot has been committed and there is an
2033          * allocated dirty_bitmap[], dirty pages will be be tracked while the
2034          * memory slot is write protected.
2035          */
2036         if (change != KVM_MR_DELETE && mem->flags & KVM_MEM_LOG_DIRTY_PAGES)
2037                 kvm_mmu_wp_memory_region(kvm, mem->slot);
2038 }
2039
2040 int kvm_arch_prepare_memory_region(struct kvm *kvm,
2041                                    struct kvm_memory_slot *memslot,
2042                                    const struct kvm_userspace_memory_region *mem,
2043                                    enum kvm_mr_change change)
2044 {
2045         hva_t hva = mem->userspace_addr;
2046         hva_t reg_end = hva + mem->memory_size;
2047         bool writable = !(mem->flags & KVM_MEM_READONLY);
2048         int ret = 0;
2049
2050         if (change != KVM_MR_CREATE && change != KVM_MR_MOVE &&
2051                         change != KVM_MR_FLAGS_ONLY)
2052                 return 0;
2053
2054         /*
2055          * Prevent userspace from creating a memory region outside of the IPA
2056          * space addressable by the KVM guest IPA space.
2057          */
2058         if (memslot->base_gfn + memslot->npages >=
2059             (KVM_PHYS_SIZE >> PAGE_SHIFT))
2060                 return -EFAULT;
2061
2062         down_read(&current->mm->mmap_sem);
2063         /*
2064          * A memory region could potentially cover multiple VMAs, and any holes
2065          * between them, so iterate over all of them to find out if we can map
2066          * any of them right now.
2067          *
2068          *     +--------------------------------------------+
2069          * +---------------+----------------+   +----------------+
2070          * |   : VMA 1     |      VMA 2     |   |    VMA 3  :    |
2071          * +---------------+----------------+   +----------------+
2072          *     |               memory region                |
2073          *     +--------------------------------------------+
2074          */
2075         do {
2076                 struct vm_area_struct *vma = find_vma(current->mm, hva);
2077                 hva_t vm_start, vm_end;
2078
2079                 if (!vma || vma->vm_start >= reg_end)
2080                         break;
2081
2082                 /*
2083                  * Mapping a read-only VMA is only allowed if the
2084                  * memory region is configured as read-only.
2085                  */
2086                 if (writable && !(vma->vm_flags & VM_WRITE)) {
2087                         ret = -EPERM;
2088                         break;
2089                 }
2090
2091                 /*
2092                  * Take the intersection of this VMA with the memory region
2093                  */
2094                 vm_start = max(hva, vma->vm_start);
2095                 vm_end = min(reg_end, vma->vm_end);
2096
2097                 if (vma->vm_flags & VM_PFNMAP) {
2098                         gpa_t gpa = mem->guest_phys_addr +
2099                                     (vm_start - mem->userspace_addr);
2100                         phys_addr_t pa;
2101
2102                         pa = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
2103                         pa += vm_start - vma->vm_start;
2104
2105                         /* IO region dirty page logging not allowed */
2106                         if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES) {
2107                                 ret = -EINVAL;
2108                                 goto out;
2109                         }
2110
2111                         ret = kvm_phys_addr_ioremap(kvm, gpa, pa,
2112                                                     vm_end - vm_start,
2113                                                     writable);
2114                         if (ret)
2115                                 break;
2116                 }
2117                 hva = vm_end;
2118         } while (hva < reg_end);
2119
2120         if (change == KVM_MR_FLAGS_ONLY)
2121                 goto out;
2122
2123         spin_lock(&kvm->mmu_lock);
2124         if (ret)
2125                 unmap_stage2_range(kvm, mem->guest_phys_addr, mem->memory_size);
2126         else
2127                 stage2_flush_memslot(kvm, memslot);
2128         spin_unlock(&kvm->mmu_lock);
2129 out:
2130         up_read(&current->mm->mmap_sem);
2131         return ret;
2132 }
2133
2134 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
2135                            struct kvm_memory_slot *dont)
2136 {
2137 }
2138
2139 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2140                             unsigned long npages)
2141 {
2142         return 0;
2143 }
2144
2145 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots)
2146 {
2147 }
2148
2149 void kvm_arch_flush_shadow_all(struct kvm *kvm)
2150 {
2151         kvm_free_stage2_pgd(kvm);
2152 }
2153
2154 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
2155                                    struct kvm_memory_slot *slot)
2156 {
2157         gpa_t gpa = slot->base_gfn << PAGE_SHIFT;
2158         phys_addr_t size = slot->npages << PAGE_SHIFT;
2159
2160         spin_lock(&kvm->mmu_lock);
2161         unmap_stage2_range(kvm, gpa, size);
2162         spin_unlock(&kvm->mmu_lock);
2163 }
2164
2165 /*
2166  * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
2167  *
2168  * Main problems:
2169  * - S/W ops are local to a CPU (not broadcast)
2170  * - We have line migration behind our back (speculation)
2171  * - System caches don't support S/W at all (damn!)
2172  *
2173  * In the face of the above, the best we can do is to try and convert
2174  * S/W ops to VA ops. Because the guest is not allowed to infer the
2175  * S/W to PA mapping, it can only use S/W to nuke the whole cache,
2176  * which is a rather good thing for us.
2177  *
2178  * Also, it is only used when turning caches on/off ("The expected
2179  * usage of the cache maintenance instructions that operate by set/way
2180  * is associated with the cache maintenance instructions associated
2181  * with the powerdown and powerup of caches, if this is required by
2182  * the implementation.").
2183  *
2184  * We use the following policy:
2185  *
2186  * - If we trap a S/W operation, we enable VM trapping to detect
2187  *   caches being turned on/off, and do a full clean.
2188  *
2189  * - We flush the caches on both caches being turned on and off.
2190  *
2191  * - Once the caches are enabled, we stop trapping VM ops.
2192  */
2193 void kvm_set_way_flush(struct kvm_vcpu *vcpu)
2194 {
2195         unsigned long hcr = *vcpu_hcr(vcpu);
2196
2197         /*
2198          * If this is the first time we do a S/W operation
2199          * (i.e. HCR_TVM not set) flush the whole memory, and set the
2200          * VM trapping.
2201          *
2202          * Otherwise, rely on the VM trapping to wait for the MMU +
2203          * Caches to be turned off. At that point, we'll be able to
2204          * clean the caches again.
2205          */
2206         if (!(hcr & HCR_TVM)) {
2207                 trace_kvm_set_way_flush(*vcpu_pc(vcpu),
2208                                         vcpu_has_cache_enabled(vcpu));
2209                 stage2_flush_vm(vcpu->kvm);
2210                 *vcpu_hcr(vcpu) = hcr | HCR_TVM;
2211         }
2212 }
2213
2214 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
2215 {
2216         bool now_enabled = vcpu_has_cache_enabled(vcpu);
2217
2218         /*
2219          * If switching the MMU+caches on, need to invalidate the caches.
2220          * If switching it off, need to clean the caches.
2221          * Clean + invalidate does the trick always.
2222          */
2223         if (now_enabled != was_enabled)
2224                 stage2_flush_vm(vcpu->kvm);
2225
2226         /* Caches are now on, stop trapping VM ops (until a S/W op) */
2227         if (now_enabled)
2228                 *vcpu_hcr(vcpu) &= ~HCR_TVM;
2229
2230         trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
2231 }