2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/hugetlb.h>
28 #include <linux/list.h>
29 #include <linux/stringify.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/kvm_book3s.h>
33 #include <asm/book3s/64/mmu-hash.h>
34 #include <asm/mmu_context.h>
35 #include <asm/hvcall.h>
36 #include <asm/synch.h>
37 #include <asm/ppc-opcode.h>
38 #include <asm/kvm_host.h>
40 #include <asm/iommu.h>
42 #include <asm/pte-walk.h>
46 #define WARN_ON_ONCE_RM(condition) ({ \
47 static bool __section(.data.unlikely) __warned; \
48 int __ret_warn_once = !!(condition); \
50 if (unlikely(__ret_warn_once && !__warned)) { \
52 pr_err("WARN_ON_ONCE_RM: (%s) at %s:%u\n", \
53 __stringify(condition), \
54 __func__, __LINE__); \
57 unlikely(__ret_warn_once); \
62 #define WARN_ON_ONCE_RM(condition) ({ \
63 int __ret_warn_on = !!(condition); \
64 unlikely(__ret_warn_on); \
69 #define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
72 * Finds a TCE table descriptor by LIOBN.
74 * WARNING: This will be called in real or virtual mode on HV KVM and virtual
77 struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm *kvm,
80 struct kvmppc_spapr_tce_table *stt;
82 list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
83 if (stt->liobn == liobn)
88 EXPORT_SYMBOL_GPL(kvmppc_find_table);
90 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
92 * Validates TCE address.
93 * At the moment flags and page mask are validated.
94 * As the host kernel does not access those addresses (just puts them
95 * to the table and user space is supposed to process them), we can skip
96 * checking other things (such as TCE is a guest RAM address or the page
97 * was actually allocated).
99 static long kvmppc_rm_tce_validate(struct kvmppc_spapr_tce_table *stt,
102 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
103 enum dma_data_direction dir = iommu_tce_direction(tce);
104 struct kvmppc_spapr_tce_iommu_table *stit;
105 unsigned long ua = 0;
107 /* Allow userspace to poison TCE table */
111 if (iommu_tce_check_gpa(stt->page_shift, gpa))
114 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua, NULL))
117 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
118 unsigned long hpa = 0;
119 struct mm_iommu_table_group_mem_t *mem;
120 long shift = stit->tbl->it_page_shift;
122 mem = mm_iommu_lookup_rm(stt->kvm->mm, ua, 1ULL << shift);
126 if (mm_iommu_ua_to_hpa_rm(mem, ua, shift, &hpa))
132 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
134 /* Note on the use of page_address() in real mode,
136 * It is safe to use page_address() in real mode on ppc64 because
137 * page_address() is always defined as lowmem_page_address()
138 * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
139 * operation and does not access page struct.
141 * Theoretically page_address() could be defined different
142 * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
143 * would have to be enabled.
144 * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
145 * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
146 * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
147 * is not expected to be enabled on ppc32, page_address()
148 * is safe for ppc32 as well.
150 * WARNING: This will be called in real-mode on HV KVM and virtual
153 static u64 *kvmppc_page_address(struct page *page)
155 #if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
156 #error TODO: fix to avoid page_address() here
158 return (u64 *) page_address(page);
162 * Handles TCE requests for emulated devices.
163 * Puts guest TCE values to the table and expects user space to convert them.
164 * Called in both real and virtual modes.
165 * Cannot fail so kvmppc_tce_validate must be called before it.
167 * WARNING: This will be called in real-mode on HV KVM and virtual
170 void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
171 unsigned long idx, unsigned long tce)
177 page = stt->pages[idx / TCES_PER_PAGE];
178 tbl = kvmppc_page_address(page);
180 tbl[idx % TCES_PER_PAGE] = tce;
182 EXPORT_SYMBOL_GPL(kvmppc_tce_put);
184 long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
185 unsigned long *ua, unsigned long **prmap)
187 unsigned long gfn = tce >> PAGE_SHIFT;
188 struct kvm_memory_slot *memslot;
190 memslot = search_memslots(kvm_memslots(kvm), gfn);
194 *ua = __gfn_to_hva_memslot(memslot, gfn) |
195 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
197 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
199 *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
204 EXPORT_SYMBOL_GPL(kvmppc_tce_to_ua);
206 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
207 static long iommu_tce_xchg_rm(struct mm_struct *mm, struct iommu_table *tbl,
208 unsigned long entry, unsigned long *hpa,
209 enum dma_data_direction *direction)
213 ret = tbl->it_ops->exchange_rm(tbl, entry, hpa, direction);
215 if (!ret && ((*direction == DMA_FROM_DEVICE) ||
216 (*direction == DMA_BIDIRECTIONAL))) {
217 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
219 * kvmppc_rm_tce_iommu_do_map() updates the UA cache after
220 * calling this so we still get here a valid UA.
223 mm_iommu_ua_mark_dirty_rm(mm, be64_to_cpu(*pua));
229 static void kvmppc_rm_clear_tce(struct kvm *kvm, struct iommu_table *tbl,
232 unsigned long hpa = 0;
233 enum dma_data_direction dir = DMA_NONE;
235 iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
238 static long kvmppc_rm_tce_iommu_mapped_dec(struct kvm *kvm,
239 struct iommu_table *tbl, unsigned long entry)
241 struct mm_iommu_table_group_mem_t *mem = NULL;
242 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
243 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
246 /* it_userspace allocation might be delayed */
249 mem = mm_iommu_lookup_rm(kvm->mm, be64_to_cpu(*pua), pgsize);
253 mm_iommu_mapped_dec(mem);
255 *pua = cpu_to_be64(0);
260 static long kvmppc_rm_tce_iommu_do_unmap(struct kvm *kvm,
261 struct iommu_table *tbl, unsigned long entry)
263 enum dma_data_direction dir = DMA_NONE;
264 unsigned long hpa = 0;
267 if (iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir))
269 * real mode xchg can fail if struct page crosses
277 ret = kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
279 iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
284 static long kvmppc_rm_tce_iommu_unmap(struct kvm *kvm,
285 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
288 unsigned long i, ret = H_SUCCESS;
289 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
290 unsigned long io_entry = entry * subpages;
292 for (i = 0; i < subpages; ++i) {
293 ret = kvmppc_rm_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
294 if (ret != H_SUCCESS)
301 static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
302 unsigned long entry, unsigned long ua,
303 enum dma_data_direction dir)
306 unsigned long hpa = 0;
307 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
308 struct mm_iommu_table_group_mem_t *mem;
311 /* it_userspace allocation might be delayed */
314 mem = mm_iommu_lookup_rm(kvm->mm, ua, 1ULL << tbl->it_page_shift);
318 if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
322 if (WARN_ON_ONCE_RM(mm_iommu_mapped_inc(mem)))
325 ret = iommu_tce_xchg_rm(kvm->mm, tbl, entry, &hpa, &dir);
327 mm_iommu_mapped_dec(mem);
329 * real mode xchg can fail if struct page crosses
336 kvmppc_rm_tce_iommu_mapped_dec(kvm, tbl, entry);
338 *pua = cpu_to_be64(ua);
343 static long kvmppc_rm_tce_iommu_map(struct kvm *kvm,
344 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
345 unsigned long entry, unsigned long ua,
346 enum dma_data_direction dir)
348 unsigned long i, pgoff, ret = H_SUCCESS;
349 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
350 unsigned long io_entry = entry * subpages;
352 for (i = 0, pgoff = 0; i < subpages;
353 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
355 ret = kvmppc_rm_tce_iommu_do_map(kvm, tbl,
356 io_entry + i, ua + pgoff, dir);
357 if (ret != H_SUCCESS)
364 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
365 unsigned long ioba, unsigned long tce)
367 struct kvmppc_spapr_tce_table *stt;
369 struct kvmppc_spapr_tce_iommu_table *stit;
370 unsigned long entry, ua = 0;
371 enum dma_data_direction dir;
373 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
374 /* liobn, ioba, tce); */
376 /* For radix, we might be in virtual mode, so punt */
377 if (kvm_is_radix(vcpu->kvm))
380 stt = kvmppc_find_table(vcpu->kvm, liobn);
384 ret = kvmppc_ioba_validate(stt, ioba, 1);
385 if (ret != H_SUCCESS)
388 ret = kvmppc_rm_tce_validate(stt, tce);
389 if (ret != H_SUCCESS)
392 dir = iommu_tce_direction(tce);
393 if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
396 entry = ioba >> stt->page_shift;
398 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
400 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
403 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
404 stit->tbl, entry, ua, dir);
406 if (ret != H_SUCCESS) {
407 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
412 kvmppc_tce_put(stt, entry, tce);
417 static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
418 unsigned long ua, unsigned long *phpa)
424 * Called in real mode with MSR_EE = 0. We are safe here.
425 * It is ok to do the lookup with arch.pgdir here, because
426 * we are doing this on secondary cpus and current task there
427 * is not the hypervisor. Also this is safe against THP in the
428 * host, because an IPI to primary thread will wait for the secondary
429 * to exit which will agains result in the below page table walk
432 ptep = __find_linux_pte(vcpu->arch.pgdir, ua, NULL, &shift);
433 if (!ptep || !pte_present(*ptep))
440 /* Avoid handling anything potentially complicated in realmode */
441 if (shift > PAGE_SHIFT)
447 *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
453 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
454 unsigned long liobn, unsigned long ioba,
455 unsigned long tce_list, unsigned long npages)
457 struct kvmppc_spapr_tce_table *stt;
458 long i, ret = H_SUCCESS;
459 unsigned long tces, entry, ua = 0;
460 unsigned long *rmap = NULL;
462 struct kvmppc_spapr_tce_iommu_table *stit;
464 /* For radix, we might be in virtual mode, so punt */
465 if (kvm_is_radix(vcpu->kvm))
468 stt = kvmppc_find_table(vcpu->kvm, liobn);
472 entry = ioba >> stt->page_shift;
474 * The spec says that the maximum size of the list is 512 TCEs
475 * so the whole table addressed resides in 4K page
480 if (tce_list & (SZ_4K - 1))
483 ret = kvmppc_ioba_validate(stt, ioba, npages);
484 if (ret != H_SUCCESS)
487 if (mm_iommu_preregistered(vcpu->kvm->mm)) {
489 * We get here if guest memory was pre-registered which
490 * is normally VFIO case and gpa->hpa translation does not
493 struct mm_iommu_table_group_mem_t *mem;
495 if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, NULL))
498 mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
500 prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
501 IOMMU_PAGE_SHIFT_4K, &tces) == 0;
506 * This is usually a case of a guest with emulated devices only
507 * when TCE list is not in preregistered memory.
508 * We do not require memory to be preregistered in this case
509 * so lock rmap and do __find_linux_pte_or_hugepte().
511 if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
514 rmap = (void *) vmalloc_to_phys(rmap);
515 if (WARN_ON_ONCE_RM(!rmap))
519 * Synchronize with the MMU notifier callbacks in
520 * book3s_64_mmu_hv.c (kvm_unmap_hva_range_hv etc.).
521 * While we have the rmap lock, code running on other CPUs
522 * cannot finish unmapping the host real page that backs
523 * this guest real page, so we are OK to access the host
527 if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
533 for (i = 0; i < npages; ++i) {
534 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
536 ret = kvmppc_rm_tce_validate(stt, tce);
537 if (ret != H_SUCCESS)
541 for (i = 0; i < npages; ++i) {
542 unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
545 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua, NULL))
548 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
549 ret = kvmppc_rm_tce_iommu_map(vcpu->kvm, stt,
550 stit->tbl, entry + i, ua,
551 iommu_tce_direction(tce));
553 if (ret != H_SUCCESS) {
554 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl,
560 kvmppc_tce_put(stt, entry + i, tce);
570 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
571 unsigned long liobn, unsigned long ioba,
572 unsigned long tce_value, unsigned long npages)
574 struct kvmppc_spapr_tce_table *stt;
576 struct kvmppc_spapr_tce_iommu_table *stit;
578 /* For radix, we might be in virtual mode, so punt */
579 if (kvm_is_radix(vcpu->kvm))
582 stt = kvmppc_find_table(vcpu->kvm, liobn);
586 ret = kvmppc_ioba_validate(stt, ioba, npages);
587 if (ret != H_SUCCESS)
590 /* Check permission bits only to allow userspace poison TCE for debug */
591 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
594 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
595 unsigned long entry = ioba >> stt->page_shift;
597 for (i = 0; i < npages; ++i) {
598 ret = kvmppc_rm_tce_iommu_unmap(vcpu->kvm, stt,
599 stit->tbl, entry + i);
601 if (ret == H_SUCCESS)
604 if (ret == H_TOO_HARD)
608 kvmppc_rm_clear_tce(vcpu->kvm, stit->tbl, entry);
612 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
613 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
618 /* This can be called in either virtual mode or real mode */
619 long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
622 struct kvmppc_spapr_tce_table *stt;
628 stt = kvmppc_find_table(vcpu->kvm, liobn);
632 ret = kvmppc_ioba_validate(stt, ioba, 1);
633 if (ret != H_SUCCESS)
636 idx = (ioba >> stt->page_shift) - stt->offset;
637 page = stt->pages[idx / TCES_PER_PAGE];
638 tbl = (u64 *)page_address(page);
640 vcpu->arch.regs.gpr[4] = tbl[idx % TCES_PER_PAGE];
644 EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
646 #endif /* KVM_BOOK3S_HV_POSSIBLE */