OSDN Git Service

KVM: PPC: Validate TCEs against preregistered memory page sizes
[uclinux-h8/linux.git] / arch / powerpc / kvm / book3s_64_vio.c
1 /*
2  * This program is free software; you can redistribute it and/or modify
3  * it under the terms of the GNU General Public License, version 2, as
4  * published by the Free Software Foundation.
5  *
6  * This program is distributed in the hope that it will be useful,
7  * but WITHOUT ANY WARRANTY; without even the implied warranty of
8  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
9  * GNU General Public License for more details.
10  *
11  * You should have received a copy of the GNU General Public License
12  * along with this program; if not, write to the Free Software
13  * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
14  *
15  * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16  * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17  * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
18  */
19
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/sched/signal.h>
28 #include <linux/hugetlb.h>
29 #include <linux/list.h>
30 #include <linux/anon_inodes.h>
31 #include <linux/iommu.h>
32 #include <linux/file.h>
33
34 #include <asm/kvm_ppc.h>
35 #include <asm/kvm_book3s.h>
36 #include <asm/book3s/64/mmu-hash.h>
37 #include <asm/hvcall.h>
38 #include <asm/synch.h>
39 #include <asm/ppc-opcode.h>
40 #include <asm/kvm_host.h>
41 #include <asm/udbg.h>
42 #include <asm/iommu.h>
43 #include <asm/tce.h>
44 #include <asm/mmu_context.h>
45
46 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
47 {
48         return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
49 }
50
51 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
52 {
53         unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
54                         (tce_pages * sizeof(struct page *));
55
56         return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
57 }
58
59 static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
60 {
61         long ret = 0;
62
63         if (!current || !current->mm)
64                 return ret; /* process exited */
65
66         down_write(&current->mm->mmap_sem);
67
68         if (inc) {
69                 unsigned long locked, lock_limit;
70
71                 locked = current->mm->locked_vm + stt_pages;
72                 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
73                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
74                         ret = -ENOMEM;
75                 else
76                         current->mm->locked_vm += stt_pages;
77         } else {
78                 if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
79                         stt_pages = current->mm->locked_vm;
80
81                 current->mm->locked_vm -= stt_pages;
82         }
83
84         pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
85                         inc ? '+' : '-',
86                         stt_pages << PAGE_SHIFT,
87                         current->mm->locked_vm << PAGE_SHIFT,
88                         rlimit(RLIMIT_MEMLOCK),
89                         ret ? " - exceeded" : "");
90
91         up_write(&current->mm->mmap_sem);
92
93         return ret;
94 }
95
96 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
97 {
98         struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
99                         struct kvmppc_spapr_tce_iommu_table, rcu);
100
101         iommu_tce_table_put(stit->tbl);
102
103         kfree(stit);
104 }
105
106 static void kvm_spapr_tce_liobn_put(struct kref *kref)
107 {
108         struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
109                         struct kvmppc_spapr_tce_iommu_table, kref);
110
111         list_del_rcu(&stit->next);
112
113         call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
114 }
115
116 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
117                 struct iommu_group *grp)
118 {
119         int i;
120         struct kvmppc_spapr_tce_table *stt;
121         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
122         struct iommu_table_group *table_group = NULL;
123
124         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
125
126                 table_group = iommu_group_get_iommudata(grp);
127                 if (WARN_ON(!table_group))
128                         continue;
129
130                 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
131                         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
132                                 if (table_group->tables[i] != stit->tbl)
133                                         continue;
134
135                                 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
136                                 return;
137                         }
138                 }
139         }
140 }
141
142 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
143                 struct iommu_group *grp)
144 {
145         struct kvmppc_spapr_tce_table *stt = NULL;
146         bool found = false;
147         struct iommu_table *tbl = NULL;
148         struct iommu_table_group *table_group;
149         long i;
150         struct kvmppc_spapr_tce_iommu_table *stit;
151         struct fd f;
152
153         f = fdget(tablefd);
154         if (!f.file)
155                 return -EBADF;
156
157         list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
158                 if (stt == f.file->private_data) {
159                         found = true;
160                         break;
161                 }
162         }
163
164         fdput(f);
165
166         if (!found)
167                 return -EINVAL;
168
169         table_group = iommu_group_get_iommudata(grp);
170         if (WARN_ON(!table_group))
171                 return -EFAULT;
172
173         for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
174                 struct iommu_table *tbltmp = table_group->tables[i];
175
176                 if (!tbltmp)
177                         continue;
178                 /* Make sure hardware table parameters are compatible */
179                 if ((tbltmp->it_page_shift <= stt->page_shift) &&
180                                 (tbltmp->it_offset << tbltmp->it_page_shift ==
181                                  stt->offset << stt->page_shift) &&
182                                 (tbltmp->it_size << tbltmp->it_page_shift >=
183                                  stt->size << stt->page_shift)) {
184                         /*
185                          * Reference the table to avoid races with
186                          * add/remove DMA windows.
187                          */
188                         tbl = iommu_tce_table_get(tbltmp);
189                         break;
190                 }
191         }
192         if (!tbl)
193                 return -EINVAL;
194
195         list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
196                 if (tbl != stit->tbl)
197                         continue;
198
199                 if (!kref_get_unless_zero(&stit->kref)) {
200                         /* stit is being destroyed */
201                         iommu_tce_table_put(tbl);
202                         return -ENOTTY;
203                 }
204                 /*
205                  * The table is already known to this KVM, we just increased
206                  * its KVM reference counter and can return.
207                  */
208                 return 0;
209         }
210
211         stit = kzalloc(sizeof(*stit), GFP_KERNEL);
212         if (!stit) {
213                 iommu_tce_table_put(tbl);
214                 return -ENOMEM;
215         }
216
217         stit->tbl = tbl;
218         kref_init(&stit->kref);
219
220         list_add_rcu(&stit->next, &stt->iommu_tables);
221
222         return 0;
223 }
224
225 static void release_spapr_tce_table(struct rcu_head *head)
226 {
227         struct kvmppc_spapr_tce_table *stt = container_of(head,
228                         struct kvmppc_spapr_tce_table, rcu);
229         unsigned long i, npages = kvmppc_tce_pages(stt->size);
230
231         for (i = 0; i < npages; i++)
232                 __free_page(stt->pages[i]);
233
234         kfree(stt);
235 }
236
237 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
238 {
239         struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
240         struct page *page;
241
242         if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
243                 return VM_FAULT_SIGBUS;
244
245         page = stt->pages[vmf->pgoff];
246         get_page(page);
247         vmf->page = page;
248         return 0;
249 }
250
251 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
252         .fault = kvm_spapr_tce_fault,
253 };
254
255 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
256 {
257         vma->vm_ops = &kvm_spapr_tce_vm_ops;
258         return 0;
259 }
260
261 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
262 {
263         struct kvmppc_spapr_tce_table *stt = filp->private_data;
264         struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
265         struct kvm *kvm = stt->kvm;
266
267         mutex_lock(&kvm->lock);
268         list_del_rcu(&stt->list);
269         mutex_unlock(&kvm->lock);
270
271         list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
272                 WARN_ON(!kref_read(&stit->kref));
273                 while (1) {
274                         if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
275                                 break;
276                 }
277         }
278
279         kvm_put_kvm(stt->kvm);
280
281         kvmppc_account_memlimit(
282                 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
283         call_rcu(&stt->rcu, release_spapr_tce_table);
284
285         return 0;
286 }
287
288 static const struct file_operations kvm_spapr_tce_fops = {
289         .mmap           = kvm_spapr_tce_mmap,
290         .release        = kvm_spapr_tce_release,
291 };
292
293 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
294                                    struct kvm_create_spapr_tce_64 *args)
295 {
296         struct kvmppc_spapr_tce_table *stt = NULL;
297         struct kvmppc_spapr_tce_table *siter;
298         unsigned long npages, size = args->size;
299         int ret = -ENOMEM;
300         int i;
301
302         if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
303                 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
304                 return -EINVAL;
305
306         npages = kvmppc_tce_pages(size);
307         ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
308         if (ret)
309                 return ret;
310
311         ret = -ENOMEM;
312         stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
313                       GFP_KERNEL);
314         if (!stt)
315                 goto fail_acct;
316
317         stt->liobn = args->liobn;
318         stt->page_shift = args->page_shift;
319         stt->offset = args->offset;
320         stt->size = size;
321         stt->kvm = kvm;
322         INIT_LIST_HEAD_RCU(&stt->iommu_tables);
323
324         for (i = 0; i < npages; i++) {
325                 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
326                 if (!stt->pages[i])
327                         goto fail;
328         }
329
330         mutex_lock(&kvm->lock);
331
332         /* Check this LIOBN hasn't been previously allocated */
333         ret = 0;
334         list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
335                 if (siter->liobn == args->liobn) {
336                         ret = -EBUSY;
337                         break;
338                 }
339         }
340
341         if (!ret)
342                 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
343                                        stt, O_RDWR | O_CLOEXEC);
344
345         if (ret >= 0) {
346                 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
347                 kvm_get_kvm(kvm);
348         }
349
350         mutex_unlock(&kvm->lock);
351
352         if (ret >= 0)
353                 return ret;
354
355  fail:
356         for (i = 0; i < npages; i++)
357                 if (stt->pages[i])
358                         __free_page(stt->pages[i]);
359
360         kfree(stt);
361  fail_acct:
362         kvmppc_account_memlimit(kvmppc_stt_pages(npages), false);
363         return ret;
364 }
365
366 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
367                 unsigned long tce)
368 {
369         unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
370         enum dma_data_direction dir = iommu_tce_direction(tce);
371         struct kvmppc_spapr_tce_iommu_table *stit;
372         unsigned long ua = 0;
373
374         /* Allow userspace to poison TCE table */
375         if (dir == DMA_NONE)
376                 return H_SUCCESS;
377
378         if (iommu_tce_check_gpa(stt->page_shift, gpa))
379                 return H_TOO_HARD;
380
381         if (kvmppc_gpa_to_ua(stt->kvm, tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
382                                 &ua, NULL))
383                 return H_TOO_HARD;
384
385         list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
386                 unsigned long hpa = 0;
387                 struct mm_iommu_table_group_mem_t *mem;
388                 long shift = stit->tbl->it_page_shift;
389
390                 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
391                 if (!mem)
392                         return H_TOO_HARD;
393
394                 if (mm_iommu_ua_to_hpa(mem, ua, shift, &hpa))
395                         return H_TOO_HARD;
396         }
397
398         return H_SUCCESS;
399 }
400
401 static void kvmppc_clear_tce(struct iommu_table *tbl, unsigned long entry)
402 {
403         unsigned long hpa = 0;
404         enum dma_data_direction dir = DMA_NONE;
405
406         iommu_tce_xchg(tbl, entry, &hpa, &dir);
407 }
408
409 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
410                 struct iommu_table *tbl, unsigned long entry)
411 {
412         struct mm_iommu_table_group_mem_t *mem = NULL;
413         const unsigned long pgsize = 1ULL << tbl->it_page_shift;
414         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
415
416         if (!pua)
417                 /* it_userspace allocation might be delayed */
418                 return H_TOO_HARD;
419
420         mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
421         if (!mem)
422                 return H_TOO_HARD;
423
424         mm_iommu_mapped_dec(mem);
425
426         *pua = cpu_to_be64(0);
427
428         return H_SUCCESS;
429 }
430
431 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
432                 struct iommu_table *tbl, unsigned long entry)
433 {
434         enum dma_data_direction dir = DMA_NONE;
435         unsigned long hpa = 0;
436         long ret;
437
438         if (WARN_ON_ONCE(iommu_tce_xchg(tbl, entry, &hpa, &dir)))
439                 return H_TOO_HARD;
440
441         if (dir == DMA_NONE)
442                 return H_SUCCESS;
443
444         ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
445         if (ret != H_SUCCESS)
446                 iommu_tce_xchg(tbl, entry, &hpa, &dir);
447
448         return ret;
449 }
450
451 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
452                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
453                 unsigned long entry)
454 {
455         unsigned long i, ret = H_SUCCESS;
456         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
457         unsigned long io_entry = entry * subpages;
458
459         for (i = 0; i < subpages; ++i) {
460                 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
461                 if (ret != H_SUCCESS)
462                         break;
463         }
464
465         return ret;
466 }
467
468 long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
469                 unsigned long entry, unsigned long ua,
470                 enum dma_data_direction dir)
471 {
472         long ret;
473         unsigned long hpa;
474         __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
475         struct mm_iommu_table_group_mem_t *mem;
476
477         if (!pua)
478                 /* it_userspace allocation might be delayed */
479                 return H_TOO_HARD;
480
481         mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
482         if (!mem)
483                 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
484                 return H_TOO_HARD;
485
486         if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
487                 return H_TOO_HARD;
488
489         if (mm_iommu_mapped_inc(mem))
490                 return H_TOO_HARD;
491
492         ret = iommu_tce_xchg(tbl, entry, &hpa, &dir);
493         if (WARN_ON_ONCE(ret)) {
494                 mm_iommu_mapped_dec(mem);
495                 return H_TOO_HARD;
496         }
497
498         if (dir != DMA_NONE)
499                 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
500
501         *pua = cpu_to_be64(ua);
502
503         return 0;
504 }
505
506 static long kvmppc_tce_iommu_map(struct kvm *kvm,
507                 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
508                 unsigned long entry, unsigned long ua,
509                 enum dma_data_direction dir)
510 {
511         unsigned long i, pgoff, ret = H_SUCCESS;
512         unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
513         unsigned long io_entry = entry * subpages;
514
515         for (i = 0, pgoff = 0; i < subpages;
516                         ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
517
518                 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
519                                 io_entry + i, ua + pgoff, dir);
520                 if (ret != H_SUCCESS)
521                         break;
522         }
523
524         return ret;
525 }
526
527 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
528                       unsigned long ioba, unsigned long tce)
529 {
530         struct kvmppc_spapr_tce_table *stt;
531         long ret, idx;
532         struct kvmppc_spapr_tce_iommu_table *stit;
533         unsigned long entry, ua = 0;
534         enum dma_data_direction dir;
535
536         /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
537         /*          liobn, ioba, tce); */
538
539         stt = kvmppc_find_table(vcpu->kvm, liobn);
540         if (!stt)
541                 return H_TOO_HARD;
542
543         ret = kvmppc_ioba_validate(stt, ioba, 1);
544         if (ret != H_SUCCESS)
545                 return ret;
546
547         ret = kvmppc_tce_validate(stt, tce);
548         if (ret != H_SUCCESS)
549                 return ret;
550
551         dir = iommu_tce_direction(tce);
552
553         idx = srcu_read_lock(&vcpu->kvm->srcu);
554
555         if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
556                         tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
557                 ret = H_PARAMETER;
558                 goto unlock_exit;
559         }
560
561         entry = ioba >> stt->page_shift;
562
563         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
564                 if (dir == DMA_NONE)
565                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
566                                         stit->tbl, entry);
567                 else
568                         ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
569                                         entry, ua, dir);
570
571                 if (ret == H_SUCCESS)
572                         continue;
573
574                 if (ret == H_TOO_HARD)
575                         goto unlock_exit;
576
577                 WARN_ON_ONCE(1);
578                 kvmppc_clear_tce(stit->tbl, entry);
579         }
580
581         kvmppc_tce_put(stt, entry, tce);
582
583 unlock_exit:
584         srcu_read_unlock(&vcpu->kvm->srcu, idx);
585
586         return ret;
587 }
588 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
589
590 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
591                 unsigned long liobn, unsigned long ioba,
592                 unsigned long tce_list, unsigned long npages)
593 {
594         struct kvmppc_spapr_tce_table *stt;
595         long i, ret = H_SUCCESS, idx;
596         unsigned long entry, ua = 0;
597         u64 __user *tces;
598         u64 tce;
599         struct kvmppc_spapr_tce_iommu_table *stit;
600
601         stt = kvmppc_find_table(vcpu->kvm, liobn);
602         if (!stt)
603                 return H_TOO_HARD;
604
605         entry = ioba >> stt->page_shift;
606         /*
607          * SPAPR spec says that the maximum size of the list is 512 TCEs
608          * so the whole table fits in 4K page
609          */
610         if (npages > 512)
611                 return H_PARAMETER;
612
613         if (tce_list & (SZ_4K - 1))
614                 return H_PARAMETER;
615
616         ret = kvmppc_ioba_validate(stt, ioba, npages);
617         if (ret != H_SUCCESS)
618                 return ret;
619
620         idx = srcu_read_lock(&vcpu->kvm->srcu);
621         if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
622                 ret = H_TOO_HARD;
623                 goto unlock_exit;
624         }
625         tces = (u64 __user *) ua;
626
627         for (i = 0; i < npages; ++i) {
628                 if (get_user(tce, tces + i)) {
629                         ret = H_TOO_HARD;
630                         goto unlock_exit;
631                 }
632                 tce = be64_to_cpu(tce);
633
634                 ret = kvmppc_tce_validate(stt, tce);
635                 if (ret != H_SUCCESS)
636                         goto unlock_exit;
637         }
638
639         for (i = 0; i < npages; ++i) {
640                 /*
641                  * This looks unsafe, because we validate, then regrab
642                  * the TCE from userspace which could have been changed by
643                  * another thread.
644                  *
645                  * But it actually is safe, because the relevant checks will be
646                  * re-executed in the following code.  If userspace tries to
647                  * change this dodgily it will result in a messier failure mode
648                  * but won't threaten the host.
649                  */
650                 if (get_user(tce, tces + i)) {
651                         ret = H_TOO_HARD;
652                         goto unlock_exit;
653                 }
654                 tce = be64_to_cpu(tce);
655
656                 if (kvmppc_gpa_to_ua(vcpu->kvm,
657                                 tce & ~(TCE_PCI_READ | TCE_PCI_WRITE),
658                                 &ua, NULL))
659                         return H_PARAMETER;
660
661                 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
662                         ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
663                                         stit->tbl, entry + i, ua,
664                                         iommu_tce_direction(tce));
665
666                         if (ret == H_SUCCESS)
667                                 continue;
668
669                         if (ret == H_TOO_HARD)
670                                 goto unlock_exit;
671
672                         WARN_ON_ONCE(1);
673                         kvmppc_clear_tce(stit->tbl, entry);
674                 }
675
676                 kvmppc_tce_put(stt, entry + i, tce);
677         }
678
679 unlock_exit:
680         srcu_read_unlock(&vcpu->kvm->srcu, idx);
681
682         return ret;
683 }
684 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
685
686 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
687                 unsigned long liobn, unsigned long ioba,
688                 unsigned long tce_value, unsigned long npages)
689 {
690         struct kvmppc_spapr_tce_table *stt;
691         long i, ret;
692         struct kvmppc_spapr_tce_iommu_table *stit;
693
694         stt = kvmppc_find_table(vcpu->kvm, liobn);
695         if (!stt)
696                 return H_TOO_HARD;
697
698         ret = kvmppc_ioba_validate(stt, ioba, npages);
699         if (ret != H_SUCCESS)
700                 return ret;
701
702         /* Check permission bits only to allow userspace poison TCE for debug */
703         if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
704                 return H_PARAMETER;
705
706         list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
707                 unsigned long entry = ioba >> stt->page_shift;
708
709                 for (i = 0; i < npages; ++i) {
710                         ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
711                                         stit->tbl, entry + i);
712
713                         if (ret == H_SUCCESS)
714                                 continue;
715
716                         if (ret == H_TOO_HARD)
717                                 return ret;
718
719                         WARN_ON_ONCE(1);
720                         kvmppc_clear_tce(stit->tbl, entry);
721                 }
722         }
723
724         for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
725                 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
726
727         return H_SUCCESS;
728 }
729 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);