OSDN Git Service

modified dependency for kernel
[android-x86/hardware-menlow-psb.git] / drm_vm.c
1 /*
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37
38 #if defined(__ia64__)
39 #include <linux/efi.h>
40 #endif
41
42 static void drm_vm_open(struct vm_area_struct *vma);
43 static void drm_vm_close(struct vm_area_struct *vma);
44 static int drm_bo_mmap_locked(struct vm_area_struct *vma,
45                               struct file *filp,
46                               drm_local_map_t *map);
47
48 pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
49 {
50         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
51
52 #if defined(__i386__) || defined(__x86_64__)
53 #ifdef USE_PAT_WC
54 #warning using pat
55         if (drm_use_pat() && map_type == _DRM_TTM) {
56                 pgprot_val(tmp) |= _PAGE_PAT;
57                 return tmp;
58         }
59 #endif
60         if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
61                 pgprot_val(tmp) |= _PAGE_PCD;
62                 pgprot_val(tmp) &= ~_PAGE_PWT;
63         }
64 #elif defined(__powerpc__)
65         pgprot_val(tmp) |= _PAGE_NO_CACHE;
66         if (map_type == _DRM_REGISTERS)
67                 pgprot_val(tmp) |= _PAGE_GUARDED;
68 #endif
69 #if defined(__ia64__)
70         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
71                                     vma->vm_start))
72                 tmp = pgprot_writecombine(tmp);
73         else
74                 tmp = pgprot_noncached(tmp);
75 #endif
76         return tmp;
77 }
78
79 #ifndef DRM_VM_NOPAGE
80 /**
81  * \c fault method for AGP virtual memory.
82  *
83  * \param vma virtual memory area.
84  * \param address access address.
85  * \return pointer to the page structure.
86  *
87  * Find the right map and if it's AGP memory find the real physical page to
88  * map, get the page, increment the use count and return it.
89  */
90 #if __OS_HAS_AGP
91 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
92 {
93         struct drm_file *priv = vma->vm_file->private_data;
94         struct drm_device *dev = priv->head->dev;
95         struct drm_map *map = NULL;
96         struct drm_map_list *r_list;
97         struct drm_hash_item *hash;
98
99         /*
100          * Find the right map
101          */
102         if (!drm_core_has_AGP(dev))
103                 goto vm_fault_error;
104
105         if (!dev->agp || !dev->agp->cant_use_aperture)
106                 goto vm_fault_error;
107
108         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
109                 goto vm_fault_error;
110
111         r_list = drm_hash_entry(hash, struct drm_map_list, hash);
112         map = r_list->map;
113
114         if (map && map->type == _DRM_AGP) {
115                 /*
116                  * Using vm_pgoff as a selector forces us to use this unusual
117                  * addressing scheme.
118                  */
119                 unsigned long offset = (unsigned long)vmf->virtual_address -
120                                                                 vma->vm_start;
121                 unsigned long baddr = map->offset + offset;
122                 struct drm_agp_mem *agpmem;
123                 struct page *page;
124
125 #ifdef __alpha__
126                 /*
127                  * Adjust to a bus-relative address
128                  */
129                 baddr -= dev->hose->mem_space->start;
130 #endif
131
132                 /*
133                  * It's AGP memory - find the real physical page to map
134                  */
135                 list_for_each_entry(agpmem, &dev->agp->memory, head) {
136                         if (agpmem->bound <= baddr &&
137                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
138                                 break;
139                 }
140
141                 if (!agpmem)
142                         goto vm_fault_error;
143
144                 /*
145                  * Get the page, inc the use count, and return it
146                  */
147                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
148                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
149                 get_page(page);
150                 vmf->page = page;
151
152                 DRM_DEBUG
153                     ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
154                      baddr, __va(agpmem->memory->memory[offset]), offset,
155                      page_count(page));
156                 return 0;
157         }
158 vm_fault_error:
159         return VM_FAULT_SIGBUS; /* Disallow mremap */
160 }
161 #else                           /* __OS_HAS_AGP */
162 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
163 {
164         return VM_FAULT_SIGBUS;
165 }
166 #endif                          /* __OS_HAS_AGP */
167
168 /**
169  * \c nopage method for shared virtual memory.
170  *
171  * \param vma virtual memory area.
172  * \param address access address.
173  * \return pointer to the page structure.
174  *
175  * Get the mapping, find the real physical page to map, get the page, and
176  * return it.
177  */
178 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
179 {
180         struct drm_map *map = (struct drm_map *) vma->vm_private_data;
181         unsigned long offset;
182         unsigned long i;
183         struct page *page;
184
185         if (!map)
186                 return VM_FAULT_SIGBUS; /* Nothing allocated */
187
188         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
189         i = (unsigned long)map->handle + offset;
190         page = vmalloc_to_page((void *)i);
191         if (!page)
192                 return VM_FAULT_SIGBUS;
193         get_page(page);
194         vmf->page = page;
195
196         DRM_DEBUG("shm_fault 0x%lx\n", offset);
197         return 0;
198 }
199 #endif
200
201 /**
202  * \c close method for shared virtual memory.
203  *
204  * \param vma virtual memory area.
205  *
206  * Deletes map information if we are the last
207  * person to close a mapping and it's not in the global maplist.
208  */
209 static void drm_vm_shm_close(struct vm_area_struct *vma)
210 {
211         struct drm_file *priv = vma->vm_file->private_data;
212         struct drm_device *dev = priv->head->dev;
213         struct drm_vma_entry *pt, *temp;
214         struct drm_map *map;
215         struct drm_map_list *r_list;
216         int found_maps = 0;
217
218         DRM_DEBUG("0x%08lx,0x%08lx\n",
219                   vma->vm_start, vma->vm_end - vma->vm_start);
220         atomic_dec(&dev->vma_count);
221
222         map = vma->vm_private_data;
223
224         mutex_lock(&dev->struct_mutex);
225         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
226                 if (pt->vma->vm_private_data == map)
227                         found_maps++;
228                 if (pt->vma == vma) {
229                         list_del(&pt->head);
230                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
231                 }
232         }
233         /* We were the only map that was found */
234         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
235                 /* Check to see if we are in the maplist, if we are not, then
236                  * we delete this mappings information.
237                  */
238                 found_maps = 0;
239                 list_for_each_entry(r_list, &dev->maplist, head) {
240                         if (r_list->map == map)
241                                 found_maps++;
242                 }
243
244                 if (!found_maps) {
245                         drm_dma_handle_t dmah;
246
247                         switch (map->type) {
248                         case _DRM_REGISTERS:
249                         case _DRM_FRAME_BUFFER:
250                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
251                                         int retcode;
252                                         retcode = mtrr_del(map->mtrr,
253                                                            map->offset,
254                                                            map->size);
255                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
256                                 }
257                                 iounmap(map->handle);
258                                 break;
259                         case _DRM_SHM:
260                                 vfree(map->handle);
261                                 break;
262                         case _DRM_AGP:
263                         case _DRM_SCATTER_GATHER:
264                                 break;
265                         case _DRM_CONSISTENT:
266                                 dmah.vaddr = map->handle;
267                                 dmah.busaddr = map->offset;
268                                 dmah.size = map->size;
269                                 __drm_pci_free(dev, &dmah);
270                                 break;
271                         case _DRM_TTM:
272                                 BUG_ON(1);
273                                 break;
274                         }
275                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
276                 }
277         }
278         mutex_unlock(&dev->struct_mutex);
279 }
280
281 #ifndef DRM_VM_NOPAGE
282 /**
283  * \c fault method for DMA virtual memory.
284  *
285  * \param vma virtual memory area.
286  * \param address access address.
287  * \return pointer to the page structure.
288  *
289  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
290  */
291 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
292 {
293         struct drm_file *priv = vma->vm_file->private_data;
294         struct drm_device *dev = priv->head->dev;
295         struct drm_device_dma *dma = dev->dma;
296         unsigned long offset;
297         unsigned long page_nr;
298         struct page *page;
299
300         if (!dma)
301                 return VM_FAULT_SIGBUS; /* Error */
302         if (!dma->pagelist)
303                 return VM_FAULT_SIGBUS; /* Nothing allocated */
304
305         offset = (unsigned long)vmf->virtual_address - vma->vm_start;   /* vm_[pg]off[set] should be 0 */
306         page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
307         page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
308
309         get_page(page);
310         vmf->page = page;
311
312         DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
313         return 0;
314 }
315
316 /**
317  * \c fault method for scatter-gather virtual memory.
318  *
319  * \param vma virtual memory area.
320  * \param address access address.
321  * \return pointer to the page structure.
322  *
323  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
324  */
325 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
326 {
327         struct drm_map *map = (struct drm_map *) vma->vm_private_data;
328         struct drm_file *priv = vma->vm_file->private_data;
329         struct drm_device *dev = priv->head->dev;
330         struct drm_sg_mem *entry = dev->sg;
331         unsigned long offset;
332         unsigned long map_offset;
333         unsigned long page_offset;
334         struct page *page;
335
336         if (!entry)
337                 return VM_FAULT_SIGBUS; /* Error */
338         if (!entry->pagelist)
339                 return VM_FAULT_SIGBUS; /* Nothing allocated */
340
341         offset = (unsigned long)vmf->virtual_address - vma->vm_start;
342         map_offset = map->offset - (unsigned long)dev->sg->virtual;
343         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
344         page = entry->pagelist[page_offset];
345         get_page(page);
346         vmf->page = page;
347
348         return 0;
349 }
350 #endif
351
352 /** AGP virtual memory operations */
353 static struct vm_operations_struct drm_vm_ops = {
354 #ifdef DRM_VM_NOPAGE
355         .nopage = drm_vm_nopage,
356 #else
357         .fault = drm_do_vm_fault,
358 #endif
359         .open = drm_vm_open,
360         .close = drm_vm_close,
361 };
362
363 /** Shared virtual memory operations */
364 static struct vm_operations_struct drm_vm_shm_ops = {
365 #ifdef DRM_VM_NOPAGE
366         .nopage = drm_vm_shm_nopage,
367 #else
368         .fault = drm_do_vm_shm_fault,
369 #endif
370         .open = drm_vm_open,
371         .close = drm_vm_shm_close,
372 };
373
374 /** DMA virtual memory operations */
375 static struct vm_operations_struct drm_vm_dma_ops = {
376 #ifdef DRM_VM_NOPAGE
377         .nopage = drm_vm_dma_nopage,
378 #else
379         .fault = drm_do_vm_dma_fault,
380 #endif
381         .open = drm_vm_open,
382         .close = drm_vm_close,
383 };
384
385 /** Scatter-gather virtual memory operations */
386 static struct vm_operations_struct drm_vm_sg_ops = {
387 #ifdef DRM_VM_NOPAGE
388         .nopage = drm_vm_sg_nopage,
389 #else
390         .fault = drm_do_vm_sg_fault,
391 #endif
392         .open = drm_vm_open,
393         .close = drm_vm_close,
394 };
395
396 /**
397  * \c open method for shared virtual memory.
398  *
399  * \param vma virtual memory area.
400  *
401  * Create a new drm_vma_entry structure as the \p vma private data entry and
402  * add it to drm_device::vmalist.
403  */
404 static void drm_vm_open_locked(struct vm_area_struct *vma)
405 {
406         struct drm_file *priv = vma->vm_file->private_data;
407         struct drm_device *dev = priv->head->dev;
408         struct drm_vma_entry *vma_entry;
409
410         DRM_DEBUG("0x%08lx,0x%08lx\n",
411                   vma->vm_start, vma->vm_end - vma->vm_start);
412         atomic_inc(&dev->vma_count);
413
414         vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
415         if (vma_entry) {
416                 vma_entry->vma = vma;
417                 vma_entry->pid = current->pid;
418                 list_add(&vma_entry->head, &dev->vmalist);
419         }
420 }
421
422 static void drm_vm_open(struct vm_area_struct *vma)
423 {
424         struct drm_file *priv = vma->vm_file->private_data;
425         struct drm_device *dev = priv->head->dev;
426
427         mutex_lock(&dev->struct_mutex);
428         drm_vm_open_locked(vma);
429         mutex_unlock(&dev->struct_mutex);
430 }
431
432 /**
433  * \c close method for all virtual memory types.
434  *
435  * \param vma virtual memory area.
436  *
437  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
438  * free it.
439  */
440 static void drm_vm_close(struct vm_area_struct *vma)
441 {
442         struct drm_file *priv = vma->vm_file->private_data;
443         struct drm_device *dev = priv->head->dev;
444         struct drm_vma_entry *pt, *temp;
445
446         DRM_DEBUG("0x%08lx,0x%08lx\n",
447                   vma->vm_start, vma->vm_end - vma->vm_start);
448         atomic_dec(&dev->vma_count);
449
450         mutex_lock(&dev->struct_mutex);
451         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
452                 if (pt->vma == vma) {
453                         list_del(&pt->head);
454                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
455                         break;
456                 }
457         }
458         mutex_unlock(&dev->struct_mutex);
459 }
460
461
462 /**
463  * mmap DMA memory.
464  *
465  * \param file_priv DRM file private.
466  * \param vma virtual memory area.
467  * \return zero on success or a negative number on failure.
468  *
469  * Sets the virtual memory area operations structure to vm_dma_ops, the file
470  * pointer, and calls vm_open().
471  */
472 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
473 {
474         struct drm_file *priv = filp->private_data;
475         struct drm_device *dev;
476         struct drm_device_dma *dma;
477         unsigned long length = vma->vm_end - vma->vm_start;
478
479         dev = priv->head->dev;
480         dma = dev->dma;
481         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
482                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
483
484         /* Length must match exact page count */
485         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
486                 return -EINVAL;
487         }
488
489         if (!capable(CAP_SYS_ADMIN) && (dma->flags & _DRM_DMA_USE_PCI_RO)) {
490                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
491 #if defined(__i386__) || defined(__x86_64__)
492                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
493 #else
494                 /* Ye gads this is ugly.  With more thought
495                    we could move this up higher and use
496                    `protection_map' instead.  */
497                 vma->vm_page_prot =
498                     __pgprot(pte_val
499                              (pte_wrprotect
500                               (__pte(pgprot_val(vma->vm_page_prot)))));
501 #endif
502         }
503
504         vma->vm_ops = &drm_vm_dma_ops;
505         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
506
507         vma->vm_file = filp;    /* Needed for drm_vm_open() */
508         drm_vm_open_locked(vma);
509         return 0;
510 }
511
512 unsigned long drm_core_get_map_ofs(struct drm_map * map)
513 {
514         return map->offset;
515 }
516 EXPORT_SYMBOL(drm_core_get_map_ofs);
517
518 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
519 {
520 #ifdef __alpha__
521         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
522 #else
523         return 0;
524 #endif
525 }
526 EXPORT_SYMBOL(drm_core_get_reg_ofs);
527
528 /**
529  * mmap DMA memory.
530  *
531  * \param file_priv DRM file private.
532  * \param vma virtual memory area.
533  * \return zero on success or a negative number on failure.
534  *
535  * If the virtual memory area has no offset associated with it then it's a DMA
536  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
537  * checks that the restricted flag is not set, sets the virtual memory operations
538  * according to the mapping type and remaps the pages. Finally sets the file
539  * pointer and calls vm_open().
540  */
541 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
542 {
543         struct drm_file *priv = filp->private_data;
544         struct drm_device *dev = priv->head->dev;
545         struct drm_map *map = NULL;
546         unsigned long offset = 0;
547         struct drm_hash_item *hash;
548
549         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
550                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
551
552         if (!priv->authenticated)
553                 return -EACCES;
554
555         /* We check for "dma". On Apple's UniNorth, it's valid to have
556          * the AGP mapped at physical address 0
557          * --BenH.
558          */
559
560         if (!vma->vm_pgoff
561 #if __OS_HAS_AGP
562             && (!dev->agp
563                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
564 #endif
565             )
566                 return drm_mmap_dma(filp, vma);
567
568         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
569                 DRM_ERROR("Could not find map\n");
570                 return -EINVAL;
571         }
572
573         map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
574         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
575                 return -EPERM;
576
577         /* Check for valid size. */
578         if (map->size < vma->vm_end - vma->vm_start)
579                 return -EINVAL;
580
581         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
582                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
583 #if defined(__i386__) || defined(__x86_64__)
584                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
585 #else
586                 /* Ye gads this is ugly.  With more thought
587                    we could move this up higher and use
588                    `protection_map' instead.  */
589                 vma->vm_page_prot =
590                     __pgprot(pte_val
591                              (pte_wrprotect
592                               (__pte(pgprot_val(vma->vm_page_prot)))));
593 #endif
594         }
595
596         switch (map->type) {
597         case _DRM_AGP:
598                 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
599                         /*
600                          * On some platforms we can't talk to bus dma address from the CPU, so for
601                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
602                          * pages and mappings in nopage()
603                          */
604 #if defined(__powerpc__)
605                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
606 #endif
607                         vma->vm_ops = &drm_vm_ops;
608                         break;
609                 }
610                 /* fall through to _DRM_FRAME_BUFFER... */
611         case _DRM_FRAME_BUFFER:
612         case _DRM_REGISTERS:
613                 offset = dev->driver->get_reg_ofs(dev);
614                 vma->vm_flags |= VM_IO; /* not in core dump */
615                 vma->vm_page_prot = drm_io_prot(map->type, vma);
616 #ifdef __sparc__
617                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
618 #endif
619                 if (io_remap_pfn_range(vma, vma->vm_start,
620                                        (map->offset + offset) >> PAGE_SHIFT,
621                                        vma->vm_end - vma->vm_start,
622                                        vma->vm_page_prot))
623                         return -EAGAIN;
624                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
625                           " offset = 0x%lx\n",
626                           map->type,
627                           vma->vm_start, vma->vm_end, map->offset + offset);
628                 vma->vm_ops = &drm_vm_ops;
629                 break;
630         case _DRM_CONSISTENT:
631                 /* Consistent memory is really like shared memory. But
632                  * it's allocated in a different way, so avoid nopage */
633                 if (remap_pfn_range(vma, vma->vm_start,
634                     page_to_pfn(virt_to_page(map->handle)),
635                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
636                         return -EAGAIN;
637         /* fall through to _DRM_SHM */
638         case _DRM_SHM:
639                 vma->vm_ops = &drm_vm_shm_ops;
640                 vma->vm_private_data = (void *)map;
641                 /* Don't let this area swap.  Change when
642                    DRM_KERNEL advisory is supported. */
643                 vma->vm_flags |= VM_RESERVED;
644                 break;
645         case _DRM_SCATTER_GATHER:
646                 vma->vm_ops = &drm_vm_sg_ops;
647                 vma->vm_private_data = (void *)map;
648                 vma->vm_flags |= VM_RESERVED;
649                 break;
650         case _DRM_TTM:
651                 return drm_bo_mmap_locked(vma, filp, map);
652         default:
653                 return -EINVAL; /* This should never happen. */
654         }
655         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
656
657         vma->vm_file = filp;    /* Needed for drm_vm_open() */
658         drm_vm_open_locked(vma);
659         return 0;
660 }
661
662 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
663 {
664         struct drm_file *priv = filp->private_data;
665         struct drm_device *dev = priv->head->dev;
666         int ret;
667
668         mutex_lock(&dev->struct_mutex);
669         ret = drm_mmap_locked(filp, vma);
670         mutex_unlock(&dev->struct_mutex);
671
672         return ret;
673 }
674 EXPORT_SYMBOL(drm_mmap);
675
676 /**
677  * buffer object vm functions.
678  */
679
680 /**
681  * \c Pagefault method for buffer objects.
682  *
683  * \param vma Virtual memory area.
684  * \param address File offset.
685  * \return Error or refault. The pfn is manually inserted.
686  *
687  * It's important that pfns are inserted while holding the bo->mutex lock.
688  * otherwise we might race with unmap_mapping_range() which is always
689  * called with the bo->mutex lock held.
690  *
691  * We're modifying the page attribute bits of the vma->vm_page_prot field,
692  * without holding the mmap_sem in write mode. Only in read mode.
693  * These bits are not used by the mm subsystem code, and we consider them
694  * protected by the bo->mutex lock.
695  */
696 #ifdef DRM_FULL_MM_COMPAT
697 #define DRM_NOPFN_EXTRA 15 /* Fault 16 pages at a time in */
698
699 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
700 int drm_bo_vm_fault(struct vm_area_struct *vma,
701                               struct vm_fault *vmf)
702 {
703         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
704         unsigned long page_offset;
705         struct page *page = NULL;
706         struct drm_ttm *ttm = NULL;
707         struct drm_device *dev;
708         unsigned long pfn;
709         int err;
710         unsigned long bus_base;
711         unsigned long bus_offset;
712         unsigned long bus_size;
713         int i;
714         
715         unsigned long ret = VM_FAULT_NOPAGE;
716
717         unsigned long address = (unsigned long)vmf->virtual_address;
718
719         if (address > vma->vm_end)
720                 return VM_FAULT_SIGBUS;
721
722         dev = bo->dev;
723         err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
724         if (err)
725                 return VM_FAULT_NOPAGE;
726
727         err = mutex_lock_interruptible(&bo->mutex);
728         if (err) {
729                 drm_bo_read_unlock(&dev->bm.bm_lock);
730                 return VM_FAULT_NOPAGE;
731         }
732
733         err = drm_bo_wait(bo, 0, 0, 0);
734         if (err) {
735                 ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
736                 goto out_unlock;
737         }
738
739         /*
740          * If buffer happens to be in a non-mappable location,
741          * move it to a mappable.
742          */
743
744         if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
745                 uint32_t new_mask = bo->mem.mask |
746                         DRM_BO_FLAG_MAPPABLE |
747                         DRM_BO_FLAG_FORCE_MAPPABLE;
748                 err = drm_bo_move_buffer(bo, new_mask, 0, 0);
749                 if (err) {
750                         ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
751                         goto out_unlock;
752                 }
753         }
754
755         err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
756                                 &bus_size);
757
758         if (err) {
759                 ret = VM_FAULT_SIGBUS;
760                 goto out_unlock;
761         }
762
763         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
764
765         if (bus_size) {
766                 struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
767
768                 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
769                 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
770         } else {
771                 ttm = bo->ttm;
772
773                 drm_ttm_fixup_caching(ttm);
774                 page = drm_ttm_get_page(ttm, page_offset);
775                 if (!page) {
776                         ret = VM_FAULT_OOM;
777                         goto out_unlock;
778                 }
779                 pfn = page_to_pfn(page);
780                 vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
781                         vm_get_page_prot(vma->vm_flags) :
782                         drm_io_prot(_DRM_TTM, vma);
783         }
784
785         err = vm_insert_pfn(vma, address, pfn);
786         if (err) {
787                 ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
788                 goto out_unlock;
789         }
790
791         for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
792
793                 if (++page_offset == bo->mem.num_pages)
794                         break;
795                 address = vma->vm_start + (page_offset << PAGE_SHIFT);
796                 if (address >= vma->vm_end)
797                         break;
798                 if (bus_size) {
799                         pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) 
800                                 + page_offset;
801                 } else {
802                         page = drm_ttm_get_page(ttm, page_offset);
803                         if (!page)
804                                 break;
805                         pfn = page_to_pfn(page);
806                 }
807                 if (vm_insert_pfn(vma, address, pfn))
808                         break;
809         }
810 out_unlock:
811         mutex_unlock(&bo->mutex);
812         drm_bo_read_unlock(&dev->bm.bm_lock);
813         return ret;
814 }
815
816 EXPORT_SYMBOL(drm_bo_vm_fault);
817
818 int drm_bo_vm_nopfn(struct vm_area_struct *vma,
819                               struct vm_fault *vmf )
820 {
821         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
822         unsigned long page_offset;
823         struct page *page = NULL;
824         struct drm_ttm *ttm = NULL;
825         struct drm_device *dev;
826         unsigned long pfn;
827         int err;
828         unsigned long bus_base;
829         unsigned long bus_offset;
830         unsigned long bus_size;
831         int i;
832         unsigned long ret = VM_FAULT_NOPAGE;
833         
834         unsigned long address = (unsigned long)vmf->virtual_address;
835
836         if (address > vma->vm_end)
837                 return VM_FAULT_SIGBUS;
838
839         dev = bo->dev;
840         err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
841         if (err)
842                 return VM_FAULT_NOPAGE;
843
844         err = mutex_lock_interruptible(&bo->mutex);
845         if (err) {
846                 drm_bo_read_unlock(&dev->bm.bm_lock);
847                 return VM_FAULT_NOPAGE;
848         }
849
850         err = drm_bo_wait(bo, 0, 0, 0);
851         if (err) {
852                 ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
853                 goto out_unlock;
854         }
855
856         /*
857          * If buffer happens to be in a non-mappable location,
858          * move it to a mappable.
859          */
860
861         if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
862                 uint32_t new_mask = bo->mem.mask |
863                         DRM_BO_FLAG_MAPPABLE |
864                         DRM_BO_FLAG_FORCE_MAPPABLE;
865                 err = drm_bo_move_buffer(bo, new_mask, 0, 0);
866                 if (err) {
867                         ret = (err != -EAGAIN) ? VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
868                         goto out_unlock;
869                 }
870         }
871
872         err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
873                                 &bus_size);
874
875         if (err) {
876                 ret = VM_FAULT_SIGBUS;
877                 goto out_unlock;
878         }
879
880         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
881
882         if (bus_size) {
883                 struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
884
885                 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
886                 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
887         } else {
888                 ttm = bo->ttm;
889
890                 drm_ttm_fixup_caching(ttm);
891                 page = drm_ttm_get_page(ttm, page_offset);
892                 if (!page) {
893                         ret = VM_FAULT_OOM;
894                         goto out_unlock;
895                 }
896                 pfn = page_to_pfn(page);
897                 vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
898                         vm_get_page_prot(vma->vm_flags) :
899                         drm_io_prot(_DRM_TTM, vma);
900         }
901
902         err = vm_insert_pfn(vma, address, pfn);
903         if (err) {
904                 ret = (err != -EAGAIN) ? VM_FAULT_OOM : VM_FAULT_NOPAGE;
905                 goto out_unlock;
906         }
907
908         for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
909
910                 if (++page_offset == bo->mem.num_pages)
911                         break;
912                 address = vma->vm_start + (page_offset << PAGE_SHIFT);
913                 if (address >= vma->vm_end)
914                         break;
915                 if (bus_size) {
916                         pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) 
917                                 + page_offset;
918                 } else {
919                         page = drm_ttm_get_page(ttm, page_offset);
920                         if (!page)
921                                 break;
922                         pfn = page_to_pfn(page);
923                 }
924                 if (vm_insert_pfn(vma, address, pfn))
925                         break;
926         }
927 out_unlock:
928         mutex_unlock(&bo->mutex);
929         drm_bo_read_unlock(&dev->bm.bm_lock);
930         return ret;
931 }
932
933 #else
934 unsigned long drm_bo_vm_nopfn(struct vm_area_struct *vma,
935                               unsigned long address)
936 {
937         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
938         unsigned long page_offset;
939         struct page *page = NULL;
940         struct drm_ttm *ttm = NULL;
941         struct drm_device *dev;
942         unsigned long pfn;
943         int err;
944         unsigned long bus_base;
945         unsigned long bus_offset;
946         unsigned long bus_size;
947         int i;
948         unsigned long ret = NOPFN_REFAULT;
949
950         if (address > vma->vm_end)
951                 return NOPFN_SIGBUS;
952
953         dev = bo->dev;
954         err = drm_bo_read_lock(&dev->bm.bm_lock, 1);
955         if (err)
956                 return NOPFN_REFAULT;
957
958         err = mutex_lock_interruptible(&bo->mutex);
959         if (err) {
960                 drm_bo_read_unlock(&dev->bm.bm_lock);
961                 return NOPFN_REFAULT;
962         }
963
964         err = drm_bo_wait(bo, 0, 0, 0);
965         if (err) {
966                 ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
967                 goto out_unlock;
968         }
969
970         /*
971          * If buffer happens to be in a non-mappable location,
972          * move it to a mappable.
973          */
974
975         if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
976                 uint32_t new_mask = bo->mem.mask |
977                         DRM_BO_FLAG_MAPPABLE |
978                         DRM_BO_FLAG_FORCE_MAPPABLE;
979                 err = drm_bo_move_buffer(bo, new_mask, 0, 0);
980                 if (err) {
981                         ret = (err != -EAGAIN) ? NOPFN_SIGBUS : NOPFN_REFAULT;
982                         goto out_unlock;
983                 }
984         }
985
986         err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
987                                 &bus_size);
988
989         if (err) {
990                 ret = NOPFN_SIGBUS;
991                 goto out_unlock;
992         }
993
994         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
995
996         if (bus_size) {
997                 struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
998
999                 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
1000                 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
1001         } else {
1002                 ttm = bo->ttm;
1003
1004                 drm_ttm_fixup_caching(ttm);
1005                 page = drm_ttm_get_page(ttm, page_offset);
1006                 if (!page) {
1007                         ret = NOPFN_OOM;
1008                         goto out_unlock;
1009                 }
1010                 pfn = page_to_pfn(page);
1011                 vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
1012                         vm_get_page_prot(vma->vm_flags) :
1013                         drm_io_prot(_DRM_TTM, vma);
1014         }
1015
1016         err = vm_insert_pfn(vma, address, pfn);
1017         if (err) {
1018                 ret = (err != -EAGAIN) ? NOPFN_OOM : NOPFN_REFAULT;
1019                 goto out_unlock;
1020         }
1021
1022         for (i=0; i<DRM_NOPFN_EXTRA; ++i) {
1023
1024                 if (++page_offset == bo->mem.num_pages)
1025                         break;
1026                 address = vma->vm_start + (page_offset << PAGE_SHIFT);
1027                 if (address >= vma->vm_end)
1028                         break;
1029                 if (bus_size) {
1030                         pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) 
1031                                 + page_offset;
1032                 } else {
1033                         page = drm_ttm_get_page(ttm, page_offset);
1034                         if (!page)
1035                                 break;
1036                         pfn = page_to_pfn(page);
1037                 }
1038                 if (vm_insert_pfn(vma, address, pfn))
1039                         break;
1040         }
1041 out_unlock:
1042         mutex_unlock(&bo->mutex);
1043         drm_bo_read_unlock(&dev->bm.bm_lock);
1044         return ret;
1045 }
1046 #endif
1047
1048 EXPORT_SYMBOL(drm_bo_vm_nopfn);
1049 #endif
1050    
1051    static void drm_bo_vm_open_locked(struct vm_area_struct *vma)
1052    {
1053         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
1054    
1055         drm_vm_open_locked(vma);
1056         atomic_inc(&bo->usage);
1057    #ifdef DRM_ODD_MM_COMPAT
1058         drm_bo_add_vma(bo, vma);
1059    #endif
1060    }
1061    
1062    /**
1063     * \c vma open method for buffer objects.
1064     *
1065     * \param vma virtual memory area.
1066     */
1067   
1068    static void drm_bo_vm_open(struct vm_area_struct *vma)
1069    {
1070         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
1071         struct drm_device *dev = bo->dev;
1072   
1073         mutex_lock(&dev->struct_mutex);
1074         drm_bo_vm_open_locked(vma);
1075         mutex_unlock(&dev->struct_mutex);
1076    }
1077    
1078 EXPORT_SYMBOL(drm_bo_vm_open);
1079    /**
1080     * \c vma close method for buffer objects.
1081     *
1082     * \param vma virtual memory area.
1083     */
1084    
1085    static void drm_bo_vm_close(struct vm_area_struct *vma)
1086    {
1087         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
1088         struct drm_device *dev = bo->dev;
1089    
1090         drm_vm_close(vma);
1091         if (bo) {
1092                 mutex_lock(&dev->struct_mutex);
1093    #ifdef DRM_ODD_MM_COMPAT
1094                 drm_bo_delete_vma(bo, vma);
1095    #endif
1096                 drm_bo_usage_deref_locked((struct drm_buffer_object **)
1097                                           &vma->vm_private_data);
1098                 mutex_unlock(&dev->struct_mutex);
1099         }
1100         return;
1101    }
1102    
1103 EXPORT_SYMBOL(drm_bo_vm_close);
1104    static struct vm_operations_struct drm_bo_vm_ops = {
1105    #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
1106            .fault = drm_bo_vm_fault,
1107    #else
1108    #ifdef DRM_FULL_MM_COMPAT
1109            .nopfn = drm_bo_vm_nopfn,
1110    #else
1111    #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
1112            .nopfn = drm_bo_vm_nopfn,
1113    #else
1114         .nopage = drm_bo_vm_nopage,
1115    #endif
1116    #endif
1117    #endif
1118         .open = drm_bo_vm_open,
1119         .close = drm_bo_vm_close,
1120    };
1121    
1122    /**
1123     * mmap buffer object memory.
1124     *
1125     * \param vma virtual memory area.
1126     * \param file_priv DRM file private.
1127     * \param map The buffer object drm map.
1128     * \return zero on success or a negative number on failure.
1129     */
1130    
1131    int drm_bo_mmap_locked(struct vm_area_struct *vma,
1132                        struct file *filp,
1133                        drm_local_map_t *map)
1134    {
1135         vma->vm_ops = &drm_bo_vm_ops;
1136         vma->vm_private_data = map->handle;
1137         vma->vm_file = filp;
1138         vma->vm_flags |= VM_RESERVED | VM_IO;
1139    #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19))
1140         vma->vm_flags |= VM_PFNMAP;
1141    #endif
1142         drm_bo_vm_open_locked(vma);
1143    #ifdef DRM_ODD_MM_COMPAT
1144         drm_bo_map_bound(vma);
1145    #endif
1146         return 0;
1147    }