OSDN Git Service

drm/etnaviv: implement per-process address spaces on MMUv2
[tomoyo/tomoyo-test1.git] / drivers / gpu / drm / etnaviv / etnaviv_drv.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5
6 #include <linux/component.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/module.h>
9 #include <linux/of_platform.h>
10 #include <linux/uaccess.h>
11
12 #include <drm/drm_debugfs.h>
13 #include <drm/drm_drv.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_ioctl.h>
16 #include <drm/drm_of.h>
17 #include <drm/drm_prime.h>
18
19 #include "etnaviv_cmdbuf.h"
20 #include "etnaviv_drv.h"
21 #include "etnaviv_gpu.h"
22 #include "etnaviv_gem.h"
23 #include "etnaviv_mmu.h"
24 #include "etnaviv_perfmon.h"
25
26 /*
27  * DRM operations:
28  */
29
30
31 static void load_gpu(struct drm_device *dev)
32 {
33         struct etnaviv_drm_private *priv = dev->dev_private;
34         unsigned int i;
35
36         for (i = 0; i < ETNA_MAX_PIPES; i++) {
37                 struct etnaviv_gpu *g = priv->gpu[i];
38
39                 if (g) {
40                         int ret;
41
42                         ret = etnaviv_gpu_init(g);
43                         if (ret)
44                                 priv->gpu[i] = NULL;
45                 }
46         }
47 }
48
49 static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
50 {
51         struct etnaviv_drm_private *priv = dev->dev_private;
52         struct etnaviv_file_private *ctx;
53         int ret, i;
54
55         ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
56         if (!ctx)
57                 return -ENOMEM;
58
59         ctx->mmu = etnaviv_iommu_context_init(priv->mmu_global,
60                                               priv->cmdbuf_suballoc);
61         if (!ctx->mmu) {
62                 ret = -ENOMEM;
63                 goto out_free;
64         }
65
66         for (i = 0; i < ETNA_MAX_PIPES; i++) {
67                 struct etnaviv_gpu *gpu = priv->gpu[i];
68                 struct drm_sched_rq *rq;
69
70                 if (gpu) {
71                         rq = &gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
72                         drm_sched_entity_init(&ctx->sched_entity[i],
73                                               &rq, 1, NULL);
74                         }
75         }
76
77         file->driver_priv = ctx;
78
79         return 0;
80
81 out_free:
82         kfree(ctx);
83         return ret;
84 }
85
86 static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
87 {
88         struct etnaviv_drm_private *priv = dev->dev_private;
89         struct etnaviv_file_private *ctx = file->driver_priv;
90         unsigned int i;
91
92         for (i = 0; i < ETNA_MAX_PIPES; i++) {
93                 struct etnaviv_gpu *gpu = priv->gpu[i];
94
95                 if (gpu)
96                         drm_sched_entity_destroy(&ctx->sched_entity[i]);
97         }
98
99         etnaviv_iommu_context_put(ctx->mmu);
100
101         kfree(ctx);
102 }
103
104 /*
105  * DRM debugfs:
106  */
107
108 #ifdef CONFIG_DEBUG_FS
109 static int etnaviv_gem_show(struct drm_device *dev, struct seq_file *m)
110 {
111         struct etnaviv_drm_private *priv = dev->dev_private;
112
113         etnaviv_gem_describe_objects(priv, m);
114
115         return 0;
116 }
117
118 static int etnaviv_mm_show(struct drm_device *dev, struct seq_file *m)
119 {
120         struct drm_printer p = drm_seq_file_printer(m);
121
122         read_lock(&dev->vma_offset_manager->vm_lock);
123         drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
124         read_unlock(&dev->vma_offset_manager->vm_lock);
125
126         return 0;
127 }
128
129 static int etnaviv_mmu_show(struct etnaviv_gpu *gpu, struct seq_file *m)
130 {
131         struct drm_printer p = drm_seq_file_printer(m);
132         struct etnaviv_iommu_context *mmu_context;
133
134         seq_printf(m, "Active Objects (%s):\n", dev_name(gpu->dev));
135
136         /*
137          * Lock the GPU to avoid a MMU context switch just now and elevate
138          * the refcount of the current context to avoid it disappearing from
139          * under our feet.
140          */
141         mutex_lock(&gpu->lock);
142         mmu_context = gpu->mmu_context;
143         if (mmu_context)
144                 etnaviv_iommu_context_get(mmu_context);
145         mutex_unlock(&gpu->lock);
146
147         if (!mmu_context)
148                 return 0;
149
150         mutex_lock(&mmu_context->lock);
151         drm_mm_print(&mmu_context->mm, &p);
152         mutex_unlock(&mmu_context->lock);
153
154         etnaviv_iommu_context_put(mmu_context);
155
156         return 0;
157 }
158
159 static void etnaviv_buffer_dump(struct etnaviv_gpu *gpu, struct seq_file *m)
160 {
161         struct etnaviv_cmdbuf *buf = &gpu->buffer;
162         u32 size = buf->size;
163         u32 *ptr = buf->vaddr;
164         u32 i;
165
166         seq_printf(m, "virt %p - phys 0x%llx - free 0x%08x\n",
167                         buf->vaddr, (u64)etnaviv_cmdbuf_get_pa(buf),
168                         size - buf->user_size);
169
170         for (i = 0; i < size / 4; i++) {
171                 if (i && !(i % 4))
172                         seq_puts(m, "\n");
173                 if (i % 4 == 0)
174                         seq_printf(m, "\t0x%p: ", ptr + i);
175                 seq_printf(m, "%08x ", *(ptr + i));
176         }
177         seq_puts(m, "\n");
178 }
179
180 static int etnaviv_ring_show(struct etnaviv_gpu *gpu, struct seq_file *m)
181 {
182         seq_printf(m, "Ring Buffer (%s): ", dev_name(gpu->dev));
183
184         mutex_lock(&gpu->lock);
185         etnaviv_buffer_dump(gpu, m);
186         mutex_unlock(&gpu->lock);
187
188         return 0;
189 }
190
191 static int show_unlocked(struct seq_file *m, void *arg)
192 {
193         struct drm_info_node *node = (struct drm_info_node *) m->private;
194         struct drm_device *dev = node->minor->dev;
195         int (*show)(struct drm_device *dev, struct seq_file *m) =
196                         node->info_ent->data;
197
198         return show(dev, m);
199 }
200
201 static int show_each_gpu(struct seq_file *m, void *arg)
202 {
203         struct drm_info_node *node = (struct drm_info_node *) m->private;
204         struct drm_device *dev = node->minor->dev;
205         struct etnaviv_drm_private *priv = dev->dev_private;
206         struct etnaviv_gpu *gpu;
207         int (*show)(struct etnaviv_gpu *gpu, struct seq_file *m) =
208                         node->info_ent->data;
209         unsigned int i;
210         int ret = 0;
211
212         for (i = 0; i < ETNA_MAX_PIPES; i++) {
213                 gpu = priv->gpu[i];
214                 if (!gpu)
215                         continue;
216
217                 ret = show(gpu, m);
218                 if (ret < 0)
219                         break;
220         }
221
222         return ret;
223 }
224
225 static struct drm_info_list etnaviv_debugfs_list[] = {
226                 {"gpu", show_each_gpu, 0, etnaviv_gpu_debugfs},
227                 {"gem", show_unlocked, 0, etnaviv_gem_show},
228                 { "mm", show_unlocked, 0, etnaviv_mm_show },
229                 {"mmu", show_each_gpu, 0, etnaviv_mmu_show},
230                 {"ring", show_each_gpu, 0, etnaviv_ring_show},
231 };
232
233 static int etnaviv_debugfs_init(struct drm_minor *minor)
234 {
235         struct drm_device *dev = minor->dev;
236         int ret;
237
238         ret = drm_debugfs_create_files(etnaviv_debugfs_list,
239                         ARRAY_SIZE(etnaviv_debugfs_list),
240                         minor->debugfs_root, minor);
241
242         if (ret) {
243                 dev_err(dev->dev, "could not install etnaviv_debugfs_list\n");
244                 return ret;
245         }
246
247         return ret;
248 }
249 #endif
250
251 /*
252  * DRM ioctls:
253  */
254
255 static int etnaviv_ioctl_get_param(struct drm_device *dev, void *data,
256                 struct drm_file *file)
257 {
258         struct etnaviv_drm_private *priv = dev->dev_private;
259         struct drm_etnaviv_param *args = data;
260         struct etnaviv_gpu *gpu;
261
262         if (args->pipe >= ETNA_MAX_PIPES)
263                 return -EINVAL;
264
265         gpu = priv->gpu[args->pipe];
266         if (!gpu)
267                 return -ENXIO;
268
269         return etnaviv_gpu_get_param(gpu, args->param, &args->value);
270 }
271
272 static int etnaviv_ioctl_gem_new(struct drm_device *dev, void *data,
273                 struct drm_file *file)
274 {
275         struct drm_etnaviv_gem_new *args = data;
276
277         if (args->flags & ~(ETNA_BO_CACHED | ETNA_BO_WC | ETNA_BO_UNCACHED |
278                             ETNA_BO_FORCE_MMU))
279                 return -EINVAL;
280
281         return etnaviv_gem_new_handle(dev, file, args->size,
282                         args->flags, &args->handle);
283 }
284
285 #define TS(t) ((struct timespec){ \
286         .tv_sec = (t).tv_sec, \
287         .tv_nsec = (t).tv_nsec \
288 })
289
290 static int etnaviv_ioctl_gem_cpu_prep(struct drm_device *dev, void *data,
291                 struct drm_file *file)
292 {
293         struct drm_etnaviv_gem_cpu_prep *args = data;
294         struct drm_gem_object *obj;
295         int ret;
296
297         if (args->op & ~(ETNA_PREP_READ | ETNA_PREP_WRITE | ETNA_PREP_NOSYNC))
298                 return -EINVAL;
299
300         obj = drm_gem_object_lookup(file, args->handle);
301         if (!obj)
302                 return -ENOENT;
303
304         ret = etnaviv_gem_cpu_prep(obj, args->op, &TS(args->timeout));
305
306         drm_gem_object_put_unlocked(obj);
307
308         return ret;
309 }
310
311 static int etnaviv_ioctl_gem_cpu_fini(struct drm_device *dev, void *data,
312                 struct drm_file *file)
313 {
314         struct drm_etnaviv_gem_cpu_fini *args = data;
315         struct drm_gem_object *obj;
316         int ret;
317
318         if (args->flags)
319                 return -EINVAL;
320
321         obj = drm_gem_object_lookup(file, args->handle);
322         if (!obj)
323                 return -ENOENT;
324
325         ret = etnaviv_gem_cpu_fini(obj);
326
327         drm_gem_object_put_unlocked(obj);
328
329         return ret;
330 }
331
332 static int etnaviv_ioctl_gem_info(struct drm_device *dev, void *data,
333                 struct drm_file *file)
334 {
335         struct drm_etnaviv_gem_info *args = data;
336         struct drm_gem_object *obj;
337         int ret;
338
339         if (args->pad)
340                 return -EINVAL;
341
342         obj = drm_gem_object_lookup(file, args->handle);
343         if (!obj)
344                 return -ENOENT;
345
346         ret = etnaviv_gem_mmap_offset(obj, &args->offset);
347         drm_gem_object_put_unlocked(obj);
348
349         return ret;
350 }
351
352 static int etnaviv_ioctl_wait_fence(struct drm_device *dev, void *data,
353                 struct drm_file *file)
354 {
355         struct drm_etnaviv_wait_fence *args = data;
356         struct etnaviv_drm_private *priv = dev->dev_private;
357         struct timespec *timeout = &TS(args->timeout);
358         struct etnaviv_gpu *gpu;
359
360         if (args->flags & ~(ETNA_WAIT_NONBLOCK))
361                 return -EINVAL;
362
363         if (args->pipe >= ETNA_MAX_PIPES)
364                 return -EINVAL;
365
366         gpu = priv->gpu[args->pipe];
367         if (!gpu)
368                 return -ENXIO;
369
370         if (args->flags & ETNA_WAIT_NONBLOCK)
371                 timeout = NULL;
372
373         return etnaviv_gpu_wait_fence_interruptible(gpu, args->fence,
374                                                     timeout);
375 }
376
377 static int etnaviv_ioctl_gem_userptr(struct drm_device *dev, void *data,
378         struct drm_file *file)
379 {
380         struct drm_etnaviv_gem_userptr *args = data;
381
382         if (args->flags & ~(ETNA_USERPTR_READ|ETNA_USERPTR_WRITE) ||
383             args->flags == 0)
384                 return -EINVAL;
385
386         if (offset_in_page(args->user_ptr | args->user_size) ||
387             (uintptr_t)args->user_ptr != args->user_ptr ||
388             (u32)args->user_size != args->user_size ||
389             args->user_ptr & ~PAGE_MASK)
390                 return -EINVAL;
391
392         if (!access_ok((void __user *)(unsigned long)args->user_ptr,
393                        args->user_size))
394                 return -EFAULT;
395
396         return etnaviv_gem_new_userptr(dev, file, args->user_ptr,
397                                        args->user_size, args->flags,
398                                        &args->handle);
399 }
400
401 static int etnaviv_ioctl_gem_wait(struct drm_device *dev, void *data,
402         struct drm_file *file)
403 {
404         struct etnaviv_drm_private *priv = dev->dev_private;
405         struct drm_etnaviv_gem_wait *args = data;
406         struct timespec *timeout = &TS(args->timeout);
407         struct drm_gem_object *obj;
408         struct etnaviv_gpu *gpu;
409         int ret;
410
411         if (args->flags & ~(ETNA_WAIT_NONBLOCK))
412                 return -EINVAL;
413
414         if (args->pipe >= ETNA_MAX_PIPES)
415                 return -EINVAL;
416
417         gpu = priv->gpu[args->pipe];
418         if (!gpu)
419                 return -ENXIO;
420
421         obj = drm_gem_object_lookup(file, args->handle);
422         if (!obj)
423                 return -ENOENT;
424
425         if (args->flags & ETNA_WAIT_NONBLOCK)
426                 timeout = NULL;
427
428         ret = etnaviv_gem_wait_bo(gpu, obj, timeout);
429
430         drm_gem_object_put_unlocked(obj);
431
432         return ret;
433 }
434
435 static int etnaviv_ioctl_pm_query_dom(struct drm_device *dev, void *data,
436         struct drm_file *file)
437 {
438         struct etnaviv_drm_private *priv = dev->dev_private;
439         struct drm_etnaviv_pm_domain *args = data;
440         struct etnaviv_gpu *gpu;
441
442         if (args->pipe >= ETNA_MAX_PIPES)
443                 return -EINVAL;
444
445         gpu = priv->gpu[args->pipe];
446         if (!gpu)
447                 return -ENXIO;
448
449         return etnaviv_pm_query_dom(gpu, args);
450 }
451
452 static int etnaviv_ioctl_pm_query_sig(struct drm_device *dev, void *data,
453         struct drm_file *file)
454 {
455         struct etnaviv_drm_private *priv = dev->dev_private;
456         struct drm_etnaviv_pm_signal *args = data;
457         struct etnaviv_gpu *gpu;
458
459         if (args->pipe >= ETNA_MAX_PIPES)
460                 return -EINVAL;
461
462         gpu = priv->gpu[args->pipe];
463         if (!gpu)
464                 return -ENXIO;
465
466         return etnaviv_pm_query_sig(gpu, args);
467 }
468
469 static const struct drm_ioctl_desc etnaviv_ioctls[] = {
470 #define ETNA_IOCTL(n, func, flags) \
471         DRM_IOCTL_DEF_DRV(ETNAVIV_##n, etnaviv_ioctl_##func, flags)
472         ETNA_IOCTL(GET_PARAM,    get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
473         ETNA_IOCTL(GEM_NEW,      gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
474         ETNA_IOCTL(GEM_INFO,     gem_info,     DRM_AUTH|DRM_RENDER_ALLOW),
475         ETNA_IOCTL(GEM_CPU_PREP, gem_cpu_prep, DRM_AUTH|DRM_RENDER_ALLOW),
476         ETNA_IOCTL(GEM_CPU_FINI, gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
477         ETNA_IOCTL(GEM_SUBMIT,   gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
478         ETNA_IOCTL(WAIT_FENCE,   wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
479         ETNA_IOCTL(GEM_USERPTR,  gem_userptr,  DRM_AUTH|DRM_RENDER_ALLOW),
480         ETNA_IOCTL(GEM_WAIT,     gem_wait,     DRM_AUTH|DRM_RENDER_ALLOW),
481         ETNA_IOCTL(PM_QUERY_DOM, pm_query_dom, DRM_AUTH|DRM_RENDER_ALLOW),
482         ETNA_IOCTL(PM_QUERY_SIG, pm_query_sig, DRM_AUTH|DRM_RENDER_ALLOW),
483 };
484
485 static const struct vm_operations_struct vm_ops = {
486         .fault = etnaviv_gem_fault,
487         .open = drm_gem_vm_open,
488         .close = drm_gem_vm_close,
489 };
490
491 static const struct file_operations fops = {
492         .owner              = THIS_MODULE,
493         .open               = drm_open,
494         .release            = drm_release,
495         .unlocked_ioctl     = drm_ioctl,
496         .compat_ioctl       = drm_compat_ioctl,
497         .poll               = drm_poll,
498         .read               = drm_read,
499         .llseek             = no_llseek,
500         .mmap               = etnaviv_gem_mmap,
501 };
502
503 static struct drm_driver etnaviv_drm_driver = {
504         .driver_features    = DRIVER_GEM |
505                                 DRIVER_PRIME |
506                                 DRIVER_RENDER,
507         .open               = etnaviv_open,
508         .postclose           = etnaviv_postclose,
509         .gem_free_object_unlocked = etnaviv_gem_free_object,
510         .gem_vm_ops         = &vm_ops,
511         .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
512         .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
513         .gem_prime_export   = drm_gem_prime_export,
514         .gem_prime_import   = drm_gem_prime_import,
515         .gem_prime_pin      = etnaviv_gem_prime_pin,
516         .gem_prime_unpin    = etnaviv_gem_prime_unpin,
517         .gem_prime_get_sg_table = etnaviv_gem_prime_get_sg_table,
518         .gem_prime_import_sg_table = etnaviv_gem_prime_import_sg_table,
519         .gem_prime_vmap     = etnaviv_gem_prime_vmap,
520         .gem_prime_vunmap   = etnaviv_gem_prime_vunmap,
521         .gem_prime_mmap     = etnaviv_gem_prime_mmap,
522 #ifdef CONFIG_DEBUG_FS
523         .debugfs_init       = etnaviv_debugfs_init,
524 #endif
525         .ioctls             = etnaviv_ioctls,
526         .num_ioctls         = DRM_ETNAVIV_NUM_IOCTLS,
527         .fops               = &fops,
528         .name               = "etnaviv",
529         .desc               = "etnaviv DRM",
530         .date               = "20151214",
531         .major              = 1,
532         .minor              = 2,
533 };
534
535 /*
536  * Platform driver:
537  */
538 static int etnaviv_bind(struct device *dev)
539 {
540         struct etnaviv_drm_private *priv;
541         struct drm_device *drm;
542         int ret;
543
544         drm = drm_dev_alloc(&etnaviv_drm_driver, dev);
545         if (IS_ERR(drm))
546                 return PTR_ERR(drm);
547
548         priv = kzalloc(sizeof(*priv), GFP_KERNEL);
549         if (!priv) {
550                 dev_err(dev, "failed to allocate private data\n");
551                 ret = -ENOMEM;
552                 goto out_put;
553         }
554         drm->dev_private = priv;
555
556         dev->dma_parms = &priv->dma_parms;
557         dma_set_max_seg_size(dev, SZ_2G);
558
559         mutex_init(&priv->gem_lock);
560         INIT_LIST_HEAD(&priv->gem_list);
561         priv->num_gpus = 0;
562
563         priv->cmdbuf_suballoc = etnaviv_cmdbuf_suballoc_new(drm->dev);
564         if (IS_ERR(priv->cmdbuf_suballoc)) {
565                 dev_err(drm->dev, "Failed to create cmdbuf suballocator\n");
566                 ret = PTR_ERR(priv->cmdbuf_suballoc);
567                 goto out_free_priv;
568         }
569
570         dev_set_drvdata(dev, drm);
571
572         ret = component_bind_all(dev, drm);
573         if (ret < 0)
574                 goto out_destroy_suballoc;
575
576         load_gpu(drm);
577
578         ret = drm_dev_register(drm, 0);
579         if (ret)
580                 goto out_unbind;
581
582         return 0;
583
584 out_unbind:
585         component_unbind_all(dev, drm);
586 out_destroy_suballoc:
587         etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
588 out_free_priv:
589         kfree(priv);
590 out_put:
591         drm_dev_put(drm);
592
593         return ret;
594 }
595
596 static void etnaviv_unbind(struct device *dev)
597 {
598         struct drm_device *drm = dev_get_drvdata(dev);
599         struct etnaviv_drm_private *priv = drm->dev_private;
600
601         drm_dev_unregister(drm);
602
603         component_unbind_all(dev, drm);
604
605         dev->dma_parms = NULL;
606
607         etnaviv_cmdbuf_suballoc_destroy(priv->cmdbuf_suballoc);
608
609         drm->dev_private = NULL;
610         kfree(priv);
611
612         drm_dev_put(drm);
613 }
614
615 static const struct component_master_ops etnaviv_master_ops = {
616         .bind = etnaviv_bind,
617         .unbind = etnaviv_unbind,
618 };
619
620 static int compare_of(struct device *dev, void *data)
621 {
622         struct device_node *np = data;
623
624         return dev->of_node == np;
625 }
626
627 static int compare_str(struct device *dev, void *data)
628 {
629         return !strcmp(dev_name(dev), data);
630 }
631
632 static int etnaviv_pdev_probe(struct platform_device *pdev)
633 {
634         struct device *dev = &pdev->dev;
635         struct component_match *match = NULL;
636
637         if (!dev->platform_data) {
638                 struct device_node *core_node;
639
640                 for_each_compatible_node(core_node, NULL, "vivante,gc") {
641                         if (!of_device_is_available(core_node))
642                                 continue;
643
644                         drm_of_component_match_add(&pdev->dev, &match,
645                                                    compare_of, core_node);
646                 }
647         } else {
648                 char **names = dev->platform_data;
649                 unsigned i;
650
651                 for (i = 0; names[i]; i++)
652                         component_match_add(dev, &match, compare_str, names[i]);
653         }
654
655         return component_master_add_with_match(dev, &etnaviv_master_ops, match);
656 }
657
658 static int etnaviv_pdev_remove(struct platform_device *pdev)
659 {
660         component_master_del(&pdev->dev, &etnaviv_master_ops);
661
662         return 0;
663 }
664
665 static struct platform_driver etnaviv_platform_driver = {
666         .probe      = etnaviv_pdev_probe,
667         .remove     = etnaviv_pdev_remove,
668         .driver     = {
669                 .name   = "etnaviv",
670         },
671 };
672
673 static struct platform_device *etnaviv_drm;
674
675 static int __init etnaviv_init(void)
676 {
677         struct platform_device *pdev;
678         int ret;
679         struct device_node *np;
680
681         etnaviv_validate_init();
682
683         ret = platform_driver_register(&etnaviv_gpu_driver);
684         if (ret != 0)
685                 return ret;
686
687         ret = platform_driver_register(&etnaviv_platform_driver);
688         if (ret != 0)
689                 goto unregister_gpu_driver;
690
691         /*
692          * If the DT contains at least one available GPU device, instantiate
693          * the DRM platform device.
694          */
695         for_each_compatible_node(np, NULL, "vivante,gc") {
696                 if (!of_device_is_available(np))
697                         continue;
698
699                 pdev = platform_device_alloc("etnaviv", -1);
700                 if (!pdev) {
701                         ret = -ENOMEM;
702                         of_node_put(np);
703                         goto unregister_platform_driver;
704                 }
705                 pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
706                 pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
707
708                 /*
709                  * Apply the same DMA configuration to the virtual etnaviv
710                  * device as the GPU we found. This assumes that all Vivante
711                  * GPUs in the system share the same DMA constraints.
712                  */
713                 of_dma_configure(&pdev->dev, np, true);
714
715                 ret = platform_device_add(pdev);
716                 if (ret) {
717                         platform_device_put(pdev);
718                         of_node_put(np);
719                         goto unregister_platform_driver;
720                 }
721
722                 etnaviv_drm = pdev;
723                 of_node_put(np);
724                 break;
725         }
726
727         return 0;
728
729 unregister_platform_driver:
730         platform_driver_unregister(&etnaviv_platform_driver);
731 unregister_gpu_driver:
732         platform_driver_unregister(&etnaviv_gpu_driver);
733         return ret;
734 }
735 module_init(etnaviv_init);
736
737 static void __exit etnaviv_exit(void)
738 {
739         platform_device_unregister(etnaviv_drm);
740         platform_driver_unregister(&etnaviv_platform_driver);
741         platform_driver_unregister(&etnaviv_gpu_driver);
742 }
743 module_exit(etnaviv_exit);
744
745 MODULE_AUTHOR("Christian Gmeiner <christian.gmeiner@gmail.com>");
746 MODULE_AUTHOR("Russell King <rmk+kernel@arm.linux.org.uk>");
747 MODULE_AUTHOR("Lucas Stach <l.stach@pengutronix.de>");
748 MODULE_DESCRIPTION("etnaviv DRM Driver");
749 MODULE_LICENSE("GPL v2");
750 MODULE_ALIAS("platform:etnaviv");