2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/sched/signal.h>
28 #include <linux/hugetlb.h>
29 #include <linux/list.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/iommu.h>
32 #include <linux/file.h>
34 #include <asm/kvm_ppc.h>
35 #include <asm/kvm_book3s.h>
36 #include <asm/book3s/64/mmu-hash.h>
37 #include <asm/hvcall.h>
38 #include <asm/synch.h>
39 #include <asm/ppc-opcode.h>
40 #include <asm/kvm_host.h>
42 #include <asm/iommu.h>
44 #include <asm/mmu_context.h>
46 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
48 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
51 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
53 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
54 (tce_pages * sizeof(struct page *));
56 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
59 static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
63 if (!current || !current->mm)
64 return ret; /* process exited */
66 down_write(¤t->mm->mmap_sem);
69 unsigned long locked, lock_limit;
71 locked = current->mm->locked_vm + stt_pages;
72 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
73 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
76 current->mm->locked_vm += stt_pages;
78 if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
79 stt_pages = current->mm->locked_vm;
81 current->mm->locked_vm -= stt_pages;
84 pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
86 stt_pages << PAGE_SHIFT,
87 current->mm->locked_vm << PAGE_SHIFT,
88 rlimit(RLIMIT_MEMLOCK),
89 ret ? " - exceeded" : "");
91 up_write(¤t->mm->mmap_sem);
96 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
98 struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
99 struct kvmppc_spapr_tce_iommu_table, rcu);
101 iommu_tce_table_put(stit->tbl);
106 static void kvm_spapr_tce_liobn_put(struct kref *kref)
108 struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
109 struct kvmppc_spapr_tce_iommu_table, kref);
111 list_del_rcu(&stit->next);
113 call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
116 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
117 struct iommu_group *grp)
120 struct kvmppc_spapr_tce_table *stt;
121 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
122 struct iommu_table_group *table_group = NULL;
124 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
126 table_group = iommu_group_get_iommudata(grp);
127 if (WARN_ON(!table_group))
130 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
131 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
132 if (table_group->tables[i] != stit->tbl)
135 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
142 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
143 struct iommu_group *grp)
145 struct kvmppc_spapr_tce_table *stt = NULL;
147 struct iommu_table *tbl = NULL;
148 struct iommu_table_group *table_group;
150 struct kvmppc_spapr_tce_iommu_table *stit;
157 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
158 if (stt == f.file->private_data) {
169 table_group = iommu_group_get_iommudata(grp);
170 if (WARN_ON(!table_group))
173 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
174 struct iommu_table *tbltmp = table_group->tables[i];
178 /* Make sure hardware table parameters are compatible */
179 if ((tbltmp->it_page_shift <= stt->page_shift) &&
180 (tbltmp->it_offset << tbltmp->it_page_shift ==
181 stt->offset << stt->page_shift) &&
182 (tbltmp->it_size << tbltmp->it_page_shift >=
183 stt->size << stt->page_shift)) {
185 * Reference the table to avoid races with
186 * add/remove DMA windows.
188 tbl = iommu_tce_table_get(tbltmp);
195 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
196 if (tbl != stit->tbl)
199 if (!kref_get_unless_zero(&stit->kref)) {
200 /* stit is being destroyed */
201 iommu_tce_table_put(tbl);
205 * The table is already known to this KVM, we just increased
206 * its KVM reference counter and can return.
211 stit = kzalloc(sizeof(*stit), GFP_KERNEL);
213 iommu_tce_table_put(tbl);
218 kref_init(&stit->kref);
220 list_add_rcu(&stit->next, &stt->iommu_tables);
225 static void release_spapr_tce_table(struct rcu_head *head)
227 struct kvmppc_spapr_tce_table *stt = container_of(head,
228 struct kvmppc_spapr_tce_table, rcu);
229 unsigned long i, npages = kvmppc_tce_pages(stt->size);
231 for (i = 0; i < npages; i++)
232 __free_page(stt->pages[i]);
237 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
239 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
242 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
243 return VM_FAULT_SIGBUS;
245 page = stt->pages[vmf->pgoff];
251 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
252 .fault = kvm_spapr_tce_fault,
255 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
257 vma->vm_ops = &kvm_spapr_tce_vm_ops;
261 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
263 struct kvmppc_spapr_tce_table *stt = filp->private_data;
264 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
265 struct kvm *kvm = stt->kvm;
267 mutex_lock(&kvm->lock);
268 list_del_rcu(&stt->list);
269 mutex_unlock(&kvm->lock);
271 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
272 WARN_ON(!kref_read(&stit->kref));
274 if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
279 kvm_put_kvm(stt->kvm);
281 kvmppc_account_memlimit(
282 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
283 call_rcu(&stt->rcu, release_spapr_tce_table);
288 static const struct file_operations kvm_spapr_tce_fops = {
289 .mmap = kvm_spapr_tce_mmap,
290 .release = kvm_spapr_tce_release,
293 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
294 struct kvm_create_spapr_tce_64 *args)
296 struct kvmppc_spapr_tce_table *stt = NULL;
297 struct kvmppc_spapr_tce_table *siter;
298 unsigned long npages, size = args->size;
302 if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
303 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
306 npages = kvmppc_tce_pages(size);
307 ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
312 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
317 stt->liobn = args->liobn;
318 stt->page_shift = args->page_shift;
319 stt->offset = args->offset;
322 INIT_LIST_HEAD_RCU(&stt->iommu_tables);
324 for (i = 0; i < npages; i++) {
325 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
330 mutex_lock(&kvm->lock);
332 /* Check this LIOBN hasn't been previously allocated */
334 list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
335 if (siter->liobn == args->liobn) {
342 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
343 stt, O_RDWR | O_CLOEXEC);
346 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
350 mutex_unlock(&kvm->lock);
356 for (i = 0; i < npages; i++)
358 __free_page(stt->pages[i]);
362 kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
366 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
369 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
370 enum dma_data_direction dir = iommu_tce_direction(tce);
371 struct kvmppc_spapr_tce_iommu_table *stit;
372 unsigned long ua = 0;
374 /* Allow userspace to poison TCE table */
378 if (iommu_tce_check_gpa(stt->page_shift, gpa))
381 if (kvmppc_gpa_to_ua(stt->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
385 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
386 unsigned long hpa = 0;
387 struct mm_iommu_table_group_mem_t *mem;
388 long shift = stit->tbl->it_page_shift;
390 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
394 if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
401 static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
403 unsigned long hpa = 0;
404 enum dma_data_direction dir = DMA_NONE;
406 iommu_tce_xchg(tbl, entry, &hpa, &dir);
409 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
410 struct iommu_table *tbl, unsigned long entry)
412 struct mm_iommu_table_group_mem_t *mem = NULL;
413 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
414 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
417 /* it_userspace allocation might be delayed */
420 mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
424 mm_iommu_mapped_dec(mem);
426 *pua = cpu_to_be64(0);
431 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
432 struct iommu_table *tbl, unsigned long entry)
434 enum dma_data_direction dir = DMA_NONE;
435 unsigned long hpa = 0;
438 if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
444 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
445 if (ret != H_SUCCESS)
446 iommu_tce_xchg(tbl, entry, &hpa, &dir);
451 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
452 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
455 unsigned long i, ret = H_SUCCESS;
456 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
457 unsigned long io_entry = entry * subpages;
459 for (i = 0; i < subpages; ++i) {
460 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
461 if (ret != H_SUCCESS)
468 long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
469 unsigned long entry, unsigned long ua,
470 enum dma_data_direction dir)
474 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
475 struct mm_iommu_table_group_mem_t *mem;
478 /* it_userspace allocation might be delayed */
481 mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
483 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
486 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
489 if (mm_iommu_mapped_inc(mem))
492 ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
493 if (WARN_ON_ONCE(ret)) {
494 mm_iommu_mapped_dec(mem);
499 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
501 *pua = cpu_to_be64(ua);
506 static long kvmppc_tce_iommu_map(struct kvm *kvm,
507 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
508 unsigned long entry, unsigned long ua,
509 enum dma_data_direction dir)
511 unsigned long i, pgoff, ret = H_SUCCESS;
512 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
513 unsigned long io_entry = entry * subpages;
515 for (i = 0, pgoff = 0; i < subpages;
516 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
518 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
519 io_entry + i, ua + pgoff, dir);
520 if (ret != H_SUCCESS)
527 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
528 unsigned long ioba, unsigned long tce)
530 struct kvmppc_spapr_tce_table *stt;
532 struct kvmppc_spapr_tce_iommu_table *stit;
533 unsigned long entry, ua = 0;
534 enum dma_data_direction dir;
536 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
537 /* liobn, ioba, tce); */
539 stt = kvmppc_find_table(vcpu->kvm, liobn);
543 ret = kvmppc_ioba_validate(stt, ioba, 1);
544 if (ret != H_SUCCESS)
547 ret = kvmppc_tce_validate(stt, tce);
548 if (ret != H_SUCCESS)
551 dir = iommu_tce_direction(tce);
553 idx = srcu_read_lock(&vcpu->kvm->srcu);
555 if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
556 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
561 entry = ioba >> stt->page_shift;
563 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
565 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
568 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
571 if (ret == H_SUCCESS)
574 if (ret == H_TOO_HARD)
578 kvmppc_clear_tce(stit->tbl, entry);
581 kvmppc_tce_put(stt, entry, tce);
584 srcu_read_unlock(&vcpu->kvm->srcu, idx);
588 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
590 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
591 unsigned long liobn, unsigned long ioba,
592 unsigned long tce_list, unsigned long npages)
594 struct kvmppc_spapr_tce_table *stt;
595 long i, ret = H_SUCCESS, idx;
596 unsigned long entry, ua = 0;
599 struct kvmppc_spapr_tce_iommu_table *stit;
601 stt = kvmppc_find_table(vcpu->kvm, liobn);
605 entry = ioba >> stt->page_shift;
607 * SPAPR spec says that the maximum size of the list is 512 TCEs
608 * so the whole table fits in 4K page
613 if (tce_list & (SZ_4K - 1))
616 ret = kvmppc_ioba_validate(stt, ioba, npages);
617 if (ret != H_SUCCESS)
620 idx = srcu_read_lock(&vcpu->kvm->srcu);
621 if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
625 tces = (u64 __user *) ua;
627 for (i = 0; i < npages; ++i) {
628 if (get_user(tce, tces + i)) {
632 tce = be64_to_cpu(tce);
634 ret = kvmppc_tce_validate(stt, tce);
635 if (ret != H_SUCCESS)
639 for (i = 0; i < npages; ++i) {
641 * This looks unsafe, because we validate, then regrab
642 * the TCE from userspace which could have been changed by
645 * But it actually is safe, because the relevant checks will be
646 * re-executed in the following code. If userspace tries to
647 * change this dodgily it will result in a messier failure mode
648 * but won't threaten the host.
650 if (get_user(tce, tces + i)) {
654 tce = be64_to_cpu(tce);
656 if (kvmppc_gpa_to_ua(vcpu->kvm,
657 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
661 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
662 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
663 stit->tbl, entry + i, ua,
664 iommu_tce_direction(tce));
666 if (ret == H_SUCCESS)
669 if (ret == H_TOO_HARD)
673 kvmppc_clear_tce(stit->tbl, entry);
676 kvmppc_tce_put(stt, entry + i, tce);
680 srcu_read_unlock(&vcpu->kvm->srcu, idx);
684 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
686 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
687 unsigned long liobn, unsigned long ioba,
688 unsigned long tce_value, unsigned long npages)
690 struct kvmppc_spapr_tce_table *stt;
692 struct kvmppc_spapr_tce_iommu_table *stit;
694 stt = kvmppc_find_table(vcpu->kvm, liobn);
698 ret = kvmppc_ioba_validate(stt, ioba, npages);
699 if (ret != H_SUCCESS)
702 /* Check permission bits only to allow userspace poison TCE for debug */
703 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
706 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
707 unsigned long entry = ioba >> stt->page_shift;
709 for (i = 0; i < npages; ++i) {
710 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
711 stit->tbl, entry + i);
713 if (ret == H_SUCCESS)
716 if (ret == H_TOO_HARD)
720 kvmppc_clear_tce(stit->tbl, entry);
724 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
725 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
729 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);