OSDN Git Service

Merge tag 'drm-next-2020-04-08' of git://anongit.freedesktop.org/drm/drm
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / nouveau / nouveau_svm.c
1 /*
2  * Copyright 2018 Red Hat Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22 #include "nouveau_svm.h"
23 #include "nouveau_drv.h"
24 #include "nouveau_chan.h"
25 #include "nouveau_dmem.h"
26
27 #include <nvif/notify.h>
28 #include <nvif/object.h>
29 #include <nvif/vmm.h>
30
31 #include <nvif/class.h>
32 #include <nvif/clb069.h>
33 #include <nvif/ifc00d.h>
34
35 #include <linux/sched/mm.h>
36 #include <linux/sort.h>
37 #include <linux/hmm.h>
38
39 struct nouveau_svm {
40         struct nouveau_drm *drm;
41         struct mutex mutex;
42         struct list_head inst;
43
44         struct nouveau_svm_fault_buffer {
45                 int id;
46                 struct nvif_object object;
47                 u32 entries;
48                 u32 getaddr;
49                 u32 putaddr;
50                 u32 get;
51                 u32 put;
52                 struct nvif_notify notify;
53
54                 struct nouveau_svm_fault {
55                         u64 inst;
56                         u64 addr;
57                         u64 time;
58                         u32 engine;
59                         u8  gpc;
60                         u8  hub;
61                         u8  access;
62                         u8  client;
63                         u8  fault;
64                         struct nouveau_svmm *svmm;
65                 } **fault;
66                 int fault_nr;
67         } buffer[1];
68 };
69
70 #define SVM_DBG(s,f,a...) NV_DEBUG((s)->drm, "svm: "f"\n", ##a)
71 #define SVM_ERR(s,f,a...) NV_WARN((s)->drm, "svm: "f"\n", ##a)
72
73 struct nouveau_ivmm {
74         struct nouveau_svmm *svmm;
75         u64 inst;
76         struct list_head head;
77 };
78
79 static struct nouveau_ivmm *
80 nouveau_ivmm_find(struct nouveau_svm *svm, u64 inst)
81 {
82         struct nouveau_ivmm *ivmm;
83         list_for_each_entry(ivmm, &svm->inst, head) {
84                 if (ivmm->inst == inst)
85                         return ivmm;
86         }
87         return NULL;
88 }
89
90 struct nouveau_svmm {
91         struct mmu_notifier notifier;
92         struct nouveau_vmm *vmm;
93         struct {
94                 unsigned long start;
95                 unsigned long limit;
96         } unmanaged;
97
98         struct mutex mutex;
99 };
100
101 #define SVMM_DBG(s,f,a...)                                                     \
102         NV_DEBUG((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
103 #define SVMM_ERR(s,f,a...)                                                     \
104         NV_WARN((s)->vmm->cli->drm, "svm-%p: "f"\n", (s), ##a)
105
106 int
107 nouveau_svmm_bind(struct drm_device *dev, void *data,
108                   struct drm_file *file_priv)
109 {
110         struct nouveau_cli *cli = nouveau_cli(file_priv);
111         struct drm_nouveau_svm_bind *args = data;
112         unsigned target, cmd, priority;
113         unsigned long addr, end, size;
114         struct mm_struct *mm;
115
116         args->va_start &= PAGE_MASK;
117         args->va_end &= PAGE_MASK;
118
119         /* Sanity check arguments */
120         if (args->reserved0 || args->reserved1)
121                 return -EINVAL;
122         if (args->header & (~NOUVEAU_SVM_BIND_VALID_MASK))
123                 return -EINVAL;
124         if (args->va_start >= args->va_end)
125                 return -EINVAL;
126         if (!args->npages)
127                 return -EINVAL;
128
129         cmd = args->header >> NOUVEAU_SVM_BIND_COMMAND_SHIFT;
130         cmd &= NOUVEAU_SVM_BIND_COMMAND_MASK;
131         switch (cmd) {
132         case NOUVEAU_SVM_BIND_COMMAND__MIGRATE:
133                 break;
134         default:
135                 return -EINVAL;
136         }
137
138         priority = args->header >> NOUVEAU_SVM_BIND_PRIORITY_SHIFT;
139         priority &= NOUVEAU_SVM_BIND_PRIORITY_MASK;
140
141         /* FIXME support CPU target ie all target value < GPU_VRAM */
142         target = args->header >> NOUVEAU_SVM_BIND_TARGET_SHIFT;
143         target &= NOUVEAU_SVM_BIND_TARGET_MASK;
144         switch (target) {
145         case NOUVEAU_SVM_BIND_TARGET__GPU_VRAM:
146                 break;
147         default:
148                 return -EINVAL;
149         }
150
151         /*
152          * FIXME: For now refuse non 0 stride, we need to change the migrate
153          * kernel function to handle stride to avoid to create a mess within
154          * each device driver.
155          */
156         if (args->stride)
157                 return -EINVAL;
158
159         size = ((unsigned long)args->npages) << PAGE_SHIFT;
160         if ((args->va_start + size) <= args->va_start)
161                 return -EINVAL;
162         if ((args->va_start + size) > args->va_end)
163                 return -EINVAL;
164
165         /*
166          * Ok we are ask to do something sane, for now we only support migrate
167          * commands but we will add things like memory policy (what to do on
168          * page fault) and maybe some other commands.
169          */
170
171         mm = get_task_mm(current);
172         down_read(&mm->mmap_sem);
173
174         if (!cli->svm.svmm) {
175                 up_read(&mm->mmap_sem);
176                 return -EINVAL;
177         }
178
179         for (addr = args->va_start, end = args->va_start + size; addr < end;) {
180                 struct vm_area_struct *vma;
181                 unsigned long next;
182
183                 vma = find_vma_intersection(mm, addr, end);
184                 if (!vma)
185                         break;
186
187                 addr = max(addr, vma->vm_start);
188                 next = min(vma->vm_end, end);
189                 /* This is a best effort so we ignore errors */
190                 nouveau_dmem_migrate_vma(cli->drm, vma, addr, next);
191                 addr = next;
192         }
193
194         /*
195          * FIXME Return the number of page we have migrated, again we need to
196          * update the migrate API to return that information so that we can
197          * report it to user space.
198          */
199         args->result = 0;
200
201         up_read(&mm->mmap_sem);
202         mmput(mm);
203
204         return 0;
205 }
206
207 /* Unlink channel instance from SVMM. */
208 void
209 nouveau_svmm_part(struct nouveau_svmm *svmm, u64 inst)
210 {
211         struct nouveau_ivmm *ivmm;
212         if (svmm) {
213                 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
214                 ivmm = nouveau_ivmm_find(svmm->vmm->cli->drm->svm, inst);
215                 if (ivmm) {
216                         list_del(&ivmm->head);
217                         kfree(ivmm);
218                 }
219                 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
220         }
221 }
222
223 /* Link channel instance to SVMM. */
224 int
225 nouveau_svmm_join(struct nouveau_svmm *svmm, u64 inst)
226 {
227         struct nouveau_ivmm *ivmm;
228         if (svmm) {
229                 if (!(ivmm = kmalloc(sizeof(*ivmm), GFP_KERNEL)))
230                         return -ENOMEM;
231                 ivmm->svmm = svmm;
232                 ivmm->inst = inst;
233
234                 mutex_lock(&svmm->vmm->cli->drm->svm->mutex);
235                 list_add(&ivmm->head, &svmm->vmm->cli->drm->svm->inst);
236                 mutex_unlock(&svmm->vmm->cli->drm->svm->mutex);
237         }
238         return 0;
239 }
240
241 /* Invalidate SVMM address-range on GPU. */
242 static void
243 nouveau_svmm_invalidate(struct nouveau_svmm *svmm, u64 start, u64 limit)
244 {
245         if (limit > start) {
246                 bool super = svmm->vmm->vmm.object.client->super;
247                 svmm->vmm->vmm.object.client->super = true;
248                 nvif_object_mthd(&svmm->vmm->vmm.object, NVIF_VMM_V0_PFNCLR,
249                                  &(struct nvif_vmm_pfnclr_v0) {
250                                         .addr = start,
251                                         .size = limit - start,
252                                  }, sizeof(struct nvif_vmm_pfnclr_v0));
253                 svmm->vmm->vmm.object.client->super = super;
254         }
255 }
256
257 static int
258 nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
259                                     const struct mmu_notifier_range *update)
260 {
261         struct nouveau_svmm *svmm =
262                 container_of(mn, struct nouveau_svmm, notifier);
263         unsigned long start = update->start;
264         unsigned long limit = update->end;
265
266         if (!mmu_notifier_range_blockable(update))
267                 return -EAGAIN;
268
269         SVMM_DBG(svmm, "invalidate %016lx-%016lx", start, limit);
270
271         mutex_lock(&svmm->mutex);
272         if (unlikely(!svmm->vmm))
273                 goto out;
274
275         if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
276                 if (start < svmm->unmanaged.start) {
277                         nouveau_svmm_invalidate(svmm, start,
278                                                 svmm->unmanaged.limit);
279                 }
280                 start = svmm->unmanaged.limit;
281         }
282
283         nouveau_svmm_invalidate(svmm, start, limit);
284
285 out:
286         mutex_unlock(&svmm->mutex);
287         return 0;
288 }
289
290 static void nouveau_svmm_free_notifier(struct mmu_notifier *mn)
291 {
292         kfree(container_of(mn, struct nouveau_svmm, notifier));
293 }
294
295 static const struct mmu_notifier_ops nouveau_mn_ops = {
296         .invalidate_range_start = nouveau_svmm_invalidate_range_start,
297         .free_notifier = nouveau_svmm_free_notifier,
298 };
299
300 void
301 nouveau_svmm_fini(struct nouveau_svmm **psvmm)
302 {
303         struct nouveau_svmm *svmm = *psvmm;
304         if (svmm) {
305                 mutex_lock(&svmm->mutex);
306                 svmm->vmm = NULL;
307                 mutex_unlock(&svmm->mutex);
308                 mmu_notifier_put(&svmm->notifier);
309                 *psvmm = NULL;
310         }
311 }
312
313 int
314 nouveau_svmm_init(struct drm_device *dev, void *data,
315                   struct drm_file *file_priv)
316 {
317         struct nouveau_cli *cli = nouveau_cli(file_priv);
318         struct nouveau_svmm *svmm;
319         struct drm_nouveau_svm_init *args = data;
320         int ret;
321
322         /* Allocate tracking for SVM-enabled VMM. */
323         if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL)))
324                 return -ENOMEM;
325         svmm->vmm = &cli->svm;
326         svmm->unmanaged.start = args->unmanaged_addr;
327         svmm->unmanaged.limit = args->unmanaged_addr + args->unmanaged_size;
328         mutex_init(&svmm->mutex);
329
330         /* Check that SVM isn't already enabled for the client. */
331         mutex_lock(&cli->mutex);
332         if (cli->svm.cli) {
333                 ret = -EBUSY;
334                 goto out_free;
335         }
336
337         /* Allocate a new GPU VMM that can support SVM (managed by the
338          * client, with replayable faults enabled).
339          *
340          * All future channel/memory allocations will make use of this
341          * VMM instead of the standard one.
342          */
343         ret = nvif_vmm_init(&cli->mmu, cli->vmm.vmm.object.oclass, true,
344                             args->unmanaged_addr, args->unmanaged_size,
345                             &(struct gp100_vmm_v0) {
346                                 .fault_replay = true,
347                             }, sizeof(struct gp100_vmm_v0), &cli->svm.vmm);
348         if (ret)
349                 goto out_free;
350
351         down_write(&current->mm->mmap_sem);
352         svmm->notifier.ops = &nouveau_mn_ops;
353         ret = __mmu_notifier_register(&svmm->notifier, current->mm);
354         if (ret)
355                 goto out_mm_unlock;
356         /* Note, ownership of svmm transfers to mmu_notifier */
357
358         cli->svm.svmm = svmm;
359         cli->svm.cli = cli;
360         up_write(&current->mm->mmap_sem);
361         mutex_unlock(&cli->mutex);
362         return 0;
363
364 out_mm_unlock:
365         up_write(&current->mm->mmap_sem);
366 out_free:
367         mutex_unlock(&cli->mutex);
368         kfree(svmm);
369         return ret;
370 }
371
372 static const u64
373 nouveau_svm_pfn_flags[HMM_PFN_FLAG_MAX] = {
374         [HMM_PFN_VALID         ] = NVIF_VMM_PFNMAP_V0_V,
375         [HMM_PFN_WRITE         ] = NVIF_VMM_PFNMAP_V0_W,
376 };
377
378 static const u64
379 nouveau_svm_pfn_values[HMM_PFN_VALUE_MAX] = {
380         [HMM_PFN_ERROR  ] = ~NVIF_VMM_PFNMAP_V0_V,
381         [HMM_PFN_NONE   ] =  NVIF_VMM_PFNMAP_V0_NONE,
382         [HMM_PFN_SPECIAL] = ~NVIF_VMM_PFNMAP_V0_V,
383 };
384
385 /* Issue fault replay for GPU to retry accesses that faulted previously. */
386 static void
387 nouveau_svm_fault_replay(struct nouveau_svm *svm)
388 {
389         SVM_DBG(svm, "replay");
390         WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
391                                  GP100_VMM_VN_FAULT_REPLAY,
392                                  &(struct gp100_vmm_fault_replay_vn) {},
393                                  sizeof(struct gp100_vmm_fault_replay_vn)));
394 }
395
396 /* Cancel a replayable fault that could not be handled.
397  *
398  * Cancelling the fault will trigger recovery to reset the engine
399  * and kill the offending channel (ie. GPU SIGSEGV).
400  */
401 static void
402 nouveau_svm_fault_cancel(struct nouveau_svm *svm,
403                          u64 inst, u8 hub, u8 gpc, u8 client)
404 {
405         SVM_DBG(svm, "cancel %016llx %d %02x %02x", inst, hub, gpc, client);
406         WARN_ON(nvif_object_mthd(&svm->drm->client.vmm.vmm.object,
407                                  GP100_VMM_VN_FAULT_CANCEL,
408                                  &(struct gp100_vmm_fault_cancel_v0) {
409                                         .hub = hub,
410                                         .gpc = gpc,
411                                         .client = client,
412                                         .inst = inst,
413                                  }, sizeof(struct gp100_vmm_fault_cancel_v0)));
414 }
415
416 static void
417 nouveau_svm_fault_cancel_fault(struct nouveau_svm *svm,
418                                struct nouveau_svm_fault *fault)
419 {
420         nouveau_svm_fault_cancel(svm, fault->inst,
421                                       fault->hub,
422                                       fault->gpc,
423                                       fault->client);
424 }
425
426 static int
427 nouveau_svm_fault_cmp(const void *a, const void *b)
428 {
429         const struct nouveau_svm_fault *fa = *(struct nouveau_svm_fault **)a;
430         const struct nouveau_svm_fault *fb = *(struct nouveau_svm_fault **)b;
431         int ret;
432         if ((ret = (s64)fa->inst - fb->inst))
433                 return ret;
434         if ((ret = (s64)fa->addr - fb->addr))
435                 return ret;
436         /*XXX: atomic? */
437         return (fa->access == 0 || fa->access == 3) -
438                (fb->access == 0 || fb->access == 3);
439 }
440
441 static void
442 nouveau_svm_fault_cache(struct nouveau_svm *svm,
443                         struct nouveau_svm_fault_buffer *buffer, u32 offset)
444 {
445         struct nvif_object *memory = &buffer->object;
446         const u32 instlo = nvif_rd32(memory, offset + 0x00);
447         const u32 insthi = nvif_rd32(memory, offset + 0x04);
448         const u32 addrlo = nvif_rd32(memory, offset + 0x08);
449         const u32 addrhi = nvif_rd32(memory, offset + 0x0c);
450         const u32 timelo = nvif_rd32(memory, offset + 0x10);
451         const u32 timehi = nvif_rd32(memory, offset + 0x14);
452         const u32 engine = nvif_rd32(memory, offset + 0x18);
453         const u32   info = nvif_rd32(memory, offset + 0x1c);
454         const u64   inst = (u64)insthi << 32 | instlo;
455         const u8     gpc = (info & 0x1f000000) >> 24;
456         const u8     hub = (info & 0x00100000) >> 20;
457         const u8  client = (info & 0x00007f00) >> 8;
458         struct nouveau_svm_fault *fault;
459
460         //XXX: i think we're supposed to spin waiting */
461         if (WARN_ON(!(info & 0x80000000)))
462                 return;
463
464         nvif_mask(memory, offset + 0x1c, 0x80000000, 0x00000000);
465
466         if (!buffer->fault[buffer->fault_nr]) {
467                 fault = kmalloc(sizeof(*fault), GFP_KERNEL);
468                 if (WARN_ON(!fault)) {
469                         nouveau_svm_fault_cancel(svm, inst, hub, gpc, client);
470                         return;
471                 }
472                 buffer->fault[buffer->fault_nr] = fault;
473         }
474
475         fault = buffer->fault[buffer->fault_nr++];
476         fault->inst   = inst;
477         fault->addr   = (u64)addrhi << 32 | addrlo;
478         fault->time   = (u64)timehi << 32 | timelo;
479         fault->engine = engine;
480         fault->gpc    = gpc;
481         fault->hub    = hub;
482         fault->access = (info & 0x000f0000) >> 16;
483         fault->client = client;
484         fault->fault  = (info & 0x0000001f);
485
486         SVM_DBG(svm, "fault %016llx %016llx %02x",
487                 fault->inst, fault->addr, fault->access);
488 }
489
490 struct svm_notifier {
491         struct mmu_interval_notifier notifier;
492         struct nouveau_svmm *svmm;
493 };
494
495 static bool nouveau_svm_range_invalidate(struct mmu_interval_notifier *mni,
496                                          const struct mmu_notifier_range *range,
497                                          unsigned long cur_seq)
498 {
499         struct svm_notifier *sn =
500                 container_of(mni, struct svm_notifier, notifier);
501
502         /*
503          * serializes the update to mni->invalidate_seq done by caller and
504          * prevents invalidation of the PTE from progressing while HW is being
505          * programmed. This is very hacky and only works because the normal
506          * notifier that does invalidation is always called after the range
507          * notifier.
508          */
509         if (mmu_notifier_range_blockable(range))
510                 mutex_lock(&sn->svmm->mutex);
511         else if (!mutex_trylock(&sn->svmm->mutex))
512                 return false;
513         mmu_interval_set_seq(mni, cur_seq);
514         mutex_unlock(&sn->svmm->mutex);
515         return true;
516 }
517
518 static const struct mmu_interval_notifier_ops nouveau_svm_mni_ops = {
519         .invalidate = nouveau_svm_range_invalidate,
520 };
521
522 static int nouveau_range_fault(struct nouveau_svmm *svmm,
523                                struct nouveau_drm *drm, void *data, u32 size,
524                                u64 *pfns, struct svm_notifier *notifier)
525 {
526         unsigned long timeout =
527                 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
528         /* Have HMM fault pages within the fault window to the GPU. */
529         struct hmm_range range = {
530                 .notifier = &notifier->notifier,
531                 .start = notifier->notifier.interval_tree.start,
532                 .end = notifier->notifier.interval_tree.last + 1,
533                 .pfns = pfns,
534                 .flags = nouveau_svm_pfn_flags,
535                 .values = nouveau_svm_pfn_values,
536                 .pfn_shift = NVIF_VMM_PFNMAP_V0_ADDR_SHIFT,
537         };
538         struct mm_struct *mm = notifier->notifier.mm;
539         long ret;
540
541         while (true) {
542                 if (time_after(jiffies, timeout))
543                         return -EBUSY;
544
545                 range.notifier_seq = mmu_interval_read_begin(range.notifier);
546                 range.default_flags = 0;
547                 range.pfn_flags_mask = -1UL;
548                 down_read(&mm->mmap_sem);
549                 ret = hmm_range_fault(&range);
550                 up_read(&mm->mmap_sem);
551                 if (ret <= 0) {
552                         if (ret == 0 || ret == -EBUSY)
553                                 continue;
554                         return ret;
555                 }
556
557                 mutex_lock(&svmm->mutex);
558                 if (mmu_interval_read_retry(range.notifier,
559                                             range.notifier_seq)) {
560                         mutex_unlock(&svmm->mutex);
561                         continue;
562                 }
563                 break;
564         }
565
566         nouveau_dmem_convert_pfn(drm, &range);
567
568         svmm->vmm->vmm.object.client->super = true;
569         ret = nvif_object_ioctl(&svmm->vmm->vmm.object, data, size, NULL);
570         svmm->vmm->vmm.object.client->super = false;
571         mutex_unlock(&svmm->mutex);
572
573         return ret;
574 }
575
576 static int
577 nouveau_svm_fault(struct nvif_notify *notify)
578 {
579         struct nouveau_svm_fault_buffer *buffer =
580                 container_of(notify, typeof(*buffer), notify);
581         struct nouveau_svm *svm =
582                 container_of(buffer, typeof(*svm), buffer[buffer->id]);
583         struct nvif_object *device = &svm->drm->client.device.object;
584         struct nouveau_svmm *svmm;
585         struct {
586                 struct {
587                         struct nvif_ioctl_v0 i;
588                         struct nvif_ioctl_mthd_v0 m;
589                         struct nvif_vmm_pfnmap_v0 p;
590                 } i;
591                 u64 phys[16];
592         } args;
593         struct vm_area_struct *vma;
594         u64 inst, start, limit;
595         int fi, fn, pi, fill;
596         int replay = 0, ret;
597
598         /* Parse available fault buffer entries into a cache, and update
599          * the GET pointer so HW can reuse the entries.
600          */
601         SVM_DBG(svm, "fault handler");
602         if (buffer->get == buffer->put) {
603                 buffer->put = nvif_rd32(device, buffer->putaddr);
604                 buffer->get = nvif_rd32(device, buffer->getaddr);
605                 if (buffer->get == buffer->put)
606                         return NVIF_NOTIFY_KEEP;
607         }
608         buffer->fault_nr = 0;
609
610         SVM_DBG(svm, "get %08x put %08x", buffer->get, buffer->put);
611         while (buffer->get != buffer->put) {
612                 nouveau_svm_fault_cache(svm, buffer, buffer->get * 0x20);
613                 if (++buffer->get == buffer->entries)
614                         buffer->get = 0;
615         }
616         nvif_wr32(device, buffer->getaddr, buffer->get);
617         SVM_DBG(svm, "%d fault(s) pending", buffer->fault_nr);
618
619         /* Sort parsed faults by instance pointer to prevent unnecessary
620          * instance to SVMM translations, followed by address and access
621          * type to reduce the amount of work when handling the faults.
622          */
623         sort(buffer->fault, buffer->fault_nr, sizeof(*buffer->fault),
624              nouveau_svm_fault_cmp, NULL);
625
626         /* Lookup SVMM structure for each unique instance pointer. */
627         mutex_lock(&svm->mutex);
628         for (fi = 0, svmm = NULL; fi < buffer->fault_nr; fi++) {
629                 if (!svmm || buffer->fault[fi]->inst != inst) {
630                         struct nouveau_ivmm *ivmm =
631                                 nouveau_ivmm_find(svm, buffer->fault[fi]->inst);
632                         svmm = ivmm ? ivmm->svmm : NULL;
633                         inst = buffer->fault[fi]->inst;
634                         SVM_DBG(svm, "inst %016llx -> svm-%p", inst, svmm);
635                 }
636                 buffer->fault[fi]->svmm = svmm;
637         }
638         mutex_unlock(&svm->mutex);
639
640         /* Process list of faults. */
641         args.i.i.version = 0;
642         args.i.i.type = NVIF_IOCTL_V0_MTHD;
643         args.i.m.version = 0;
644         args.i.m.method = NVIF_VMM_V0_PFNMAP;
645         args.i.p.version = 0;
646
647         for (fi = 0; fn = fi + 1, fi < buffer->fault_nr; fi = fn) {
648                 struct svm_notifier notifier;
649                 struct mm_struct *mm;
650
651                 /* Cancel any faults from non-SVM channels. */
652                 if (!(svmm = buffer->fault[fi]->svmm)) {
653                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
654                         continue;
655                 }
656                 SVMM_DBG(svmm, "addr %016llx", buffer->fault[fi]->addr);
657
658                 /* We try and group handling of faults within a small
659                  * window into a single update.
660                  */
661                 start = buffer->fault[fi]->addr;
662                 limit = start + (ARRAY_SIZE(args.phys) << PAGE_SHIFT);
663                 if (start < svmm->unmanaged.limit)
664                         limit = min_t(u64, limit, svmm->unmanaged.start);
665                 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
666
667                 mm = svmm->notifier.mm;
668                 if (!mmget_not_zero(mm)) {
669                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
670                         continue;
671                 }
672
673                 /* Intersect fault window with the CPU VMA, cancelling
674                  * the fault if the address is invalid.
675                  */
676                 down_read(&mm->mmap_sem);
677                 vma = find_vma_intersection(mm, start, limit);
678                 if (!vma) {
679                         SVMM_ERR(svmm, "wndw %016llx-%016llx", start, limit);
680                         up_read(&mm->mmap_sem);
681                         mmput(mm);
682                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
683                         continue;
684                 }
685                 start = max_t(u64, start, vma->vm_start);
686                 limit = min_t(u64, limit, vma->vm_end);
687                 up_read(&mm->mmap_sem);
688                 SVMM_DBG(svmm, "wndw %016llx-%016llx", start, limit);
689
690                 if (buffer->fault[fi]->addr != start) {
691                         SVMM_ERR(svmm, "addr %016llx", buffer->fault[fi]->addr);
692                         mmput(mm);
693                         nouveau_svm_fault_cancel_fault(svm, buffer->fault[fi]);
694                         continue;
695                 }
696
697                 /* Prepare the GPU-side update of all pages within the
698                  * fault window, determining required pages and access
699                  * permissions based on pending faults.
700                  */
701                 args.i.p.page = PAGE_SHIFT;
702                 args.i.p.addr = start;
703                 for (fn = fi, pi = 0;;) {
704                         /* Determine required permissions based on GPU fault
705                          * access flags.
706                          *XXX: atomic?
707                          */
708                         if (buffer->fault[fn]->access != 0 /* READ. */ &&
709                             buffer->fault[fn]->access != 3 /* PREFETCH. */) {
710                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V |
711                                                   NVIF_VMM_PFNMAP_V0_W;
712                         } else {
713                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_V;
714                         }
715                         args.i.p.size = pi << PAGE_SHIFT;
716
717                         /* It's okay to skip over duplicate addresses from the
718                          * same SVMM as faults are ordered by access type such
719                          * that only the first one needs to be handled.
720                          *
721                          * ie. WRITE faults appear first, thus any handling of
722                          * pending READ faults will already be satisfied.
723                          */
724                         while (++fn < buffer->fault_nr &&
725                                buffer->fault[fn]->svmm == svmm &&
726                                buffer->fault[fn    ]->addr ==
727                                buffer->fault[fn - 1]->addr);
728
729                         /* If the next fault is outside the window, or all GPU
730                          * faults have been dealt with, we're done here.
731                          */
732                         if (fn >= buffer->fault_nr ||
733                             buffer->fault[fn]->svmm != svmm ||
734                             buffer->fault[fn]->addr >= limit)
735                                 break;
736
737                         /* Fill in the gap between this fault and the next. */
738                         fill = (buffer->fault[fn    ]->addr -
739                                 buffer->fault[fn - 1]->addr) >> PAGE_SHIFT;
740                         while (--fill)
741                                 args.phys[pi++] = NVIF_VMM_PFNMAP_V0_NONE;
742                 }
743
744                 SVMM_DBG(svmm, "wndw %016llx-%016llx covering %d fault(s)",
745                          args.i.p.addr,
746                          args.i.p.addr + args.i.p.size, fn - fi);
747
748                 notifier.svmm = svmm;
749                 ret = mmu_interval_notifier_insert(&notifier.notifier,
750                                                    svmm->notifier.mm,
751                                                    args.i.p.addr, args.i.p.size,
752                                                    &nouveau_svm_mni_ops);
753                 if (!ret) {
754                         ret = nouveau_range_fault(
755                                 svmm, svm->drm, &args,
756                                 sizeof(args.i) + pi * sizeof(args.phys[0]),
757                                 args.phys, &notifier);
758                         mmu_interval_notifier_remove(&notifier.notifier);
759                 }
760                 mmput(mm);
761
762                 /* Cancel any faults in the window whose pages didn't manage
763                  * to keep their valid bit, or stay writeable when required.
764                  *
765                  * If handling failed completely, cancel all faults.
766                  */
767                 while (fi < fn) {
768                         struct nouveau_svm_fault *fault = buffer->fault[fi++];
769                         pi = (fault->addr - args.i.p.addr) >> PAGE_SHIFT;
770                         if (ret ||
771                              !(args.phys[pi] & NVIF_VMM_PFNMAP_V0_V) ||
772                             (!(args.phys[pi] & NVIF_VMM_PFNMAP_V0_W) &&
773                              fault->access != 0 && fault->access != 3)) {
774                                 nouveau_svm_fault_cancel_fault(svm, fault);
775                                 continue;
776                         }
777                         replay++;
778                 }
779         }
780
781         /* Issue fault replay to the GPU. */
782         if (replay)
783                 nouveau_svm_fault_replay(svm);
784         return NVIF_NOTIFY_KEEP;
785 }
786
787 static void
788 nouveau_svm_fault_buffer_fini(struct nouveau_svm *svm, int id)
789 {
790         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
791         nvif_notify_put(&buffer->notify);
792 }
793
794 static int
795 nouveau_svm_fault_buffer_init(struct nouveau_svm *svm, int id)
796 {
797         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
798         struct nvif_object *device = &svm->drm->client.device.object;
799         buffer->get = nvif_rd32(device, buffer->getaddr);
800         buffer->put = nvif_rd32(device, buffer->putaddr);
801         SVM_DBG(svm, "get %08x put %08x (init)", buffer->get, buffer->put);
802         return nvif_notify_get(&buffer->notify);
803 }
804
805 static void
806 nouveau_svm_fault_buffer_dtor(struct nouveau_svm *svm, int id)
807 {
808         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
809         int i;
810
811         if (buffer->fault) {
812                 for (i = 0; buffer->fault[i] && i < buffer->entries; i++)
813                         kfree(buffer->fault[i]);
814                 kvfree(buffer->fault);
815         }
816
817         nouveau_svm_fault_buffer_fini(svm, id);
818
819         nvif_notify_fini(&buffer->notify);
820         nvif_object_fini(&buffer->object);
821 }
822
823 static int
824 nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
825 {
826         struct nouveau_svm_fault_buffer *buffer = &svm->buffer[id];
827         struct nouveau_drm *drm = svm->drm;
828         struct nvif_object *device = &drm->client.device.object;
829         struct nvif_clb069_v0 args = {};
830         int ret;
831
832         buffer->id = id;
833
834         ret = nvif_object_init(device, 0, oclass, &args, sizeof(args),
835                                &buffer->object);
836         if (ret < 0) {
837                 SVM_ERR(svm, "Fault buffer allocation failed: %d", ret);
838                 return ret;
839         }
840
841         nvif_object_map(&buffer->object, NULL, 0);
842         buffer->entries = args.entries;
843         buffer->getaddr = args.get;
844         buffer->putaddr = args.put;
845
846         ret = nvif_notify_init(&buffer->object, nouveau_svm_fault, true,
847                                NVB069_V0_NTFY_FAULT, NULL, 0, 0,
848                                &buffer->notify);
849         if (ret)
850                 return ret;
851
852         buffer->fault = kvzalloc(sizeof(*buffer->fault) * buffer->entries, GFP_KERNEL);
853         if (!buffer->fault)
854                 return -ENOMEM;
855
856         return nouveau_svm_fault_buffer_init(svm, id);
857 }
858
859 void
860 nouveau_svm_resume(struct nouveau_drm *drm)
861 {
862         struct nouveau_svm *svm = drm->svm;
863         if (svm)
864                 nouveau_svm_fault_buffer_init(svm, 0);
865 }
866
867 void
868 nouveau_svm_suspend(struct nouveau_drm *drm)
869 {
870         struct nouveau_svm *svm = drm->svm;
871         if (svm)
872                 nouveau_svm_fault_buffer_fini(svm, 0);
873 }
874
875 void
876 nouveau_svm_fini(struct nouveau_drm *drm)
877 {
878         struct nouveau_svm *svm = drm->svm;
879         if (svm) {
880                 nouveau_svm_fault_buffer_dtor(svm, 0);
881                 kfree(drm->svm);
882                 drm->svm = NULL;
883         }
884 }
885
886 void
887 nouveau_svm_init(struct nouveau_drm *drm)
888 {
889         static const struct nvif_mclass buffers[] = {
890                 {   VOLTA_FAULT_BUFFER_A, 0 },
891                 { MAXWELL_FAULT_BUFFER_A, 0 },
892                 {}
893         };
894         struct nouveau_svm *svm;
895         int ret;
896
897         /* Disable on Volta and newer until channel recovery is fixed,
898          * otherwise clients will have a trivial way to trash the GPU
899          * for everyone.
900          */
901         if (drm->client.device.info.family > NV_DEVICE_INFO_V0_PASCAL)
902                 return;
903
904         if (!(drm->svm = svm = kzalloc(sizeof(*drm->svm), GFP_KERNEL)))
905                 return;
906
907         drm->svm->drm = drm;
908         mutex_init(&drm->svm->mutex);
909         INIT_LIST_HEAD(&drm->svm->inst);
910
911         ret = nvif_mclass(&drm->client.device.object, buffers);
912         if (ret < 0) {
913                 SVM_DBG(svm, "No supported fault buffer class");
914                 nouveau_svm_fini(drm);
915                 return;
916         }
917
918         ret = nouveau_svm_fault_buffer_ctor(svm, buffers[ret].oclass, 0);
919         if (ret) {
920                 nouveau_svm_fini(drm);
921                 return;
922         }
923
924         SVM_DBG(svm, "Initialised");
925 }