OSDN Git Service

Use fixed sized types in new ioctls
[android-x86/external-libdrm.git] / linux-core / drm_compat.c
1 /**************************************************************************
2  *
3  * This kernel module is free software; you can redistribute it and/or
4  * modify it under the terms of the GNU General Public License as
5  * published by the Free Software Foundation; either version 2 of the
6  * License, or (at your option) any later version.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program; if not, write to the Free Software
15  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
16  *
17  **************************************************************************/
18 /*
19  * This code provides access to unexported mm kernel features. It is necessary
20  * to use the new DRM memory manager code with kernels that don't support it
21  * directly.
22  *
23  * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
24  *          Linux kernel mm subsystem authors.
25  *          (Most code taken from there).
26  */
27
28 #include "drmP.h"
29
30 #if defined(CONFIG_X86) && (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
31
32 /*
33  * These have bad performance in the AGP module for the indicated kernel versions.
34  */
35
36 int drm_map_page_into_agp(struct page *page)
37 {
38         int i;
39         i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
40         /* Caller's responsibility to call global_flush_tlb() for
41          * performance reasons */
42         return i;
43 }
44
45 int drm_unmap_page_from_agp(struct page *page)
46 {
47         int i;
48         i = change_page_attr(page, 1, PAGE_KERNEL);
49         /* Caller's responsibility to call global_flush_tlb() for
50          * performance reasons */
51         return i;
52 }
53 #endif
54
55
56 #if  (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
57
58 /*
59  * The protection map was exported in 2.6.19
60  */
61
62 pgprot_t vm_get_page_prot(unsigned long vm_flags)
63 {
64 #ifdef MODULE
65         static pgprot_t drm_protection_map[16] = {
66                 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
67                 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68         };
69
70         return drm_protection_map[vm_flags & 0x0F];
71 #else
72         extern pgprot_t protection_map[];
73         return protection_map[vm_flags & 0x0F];
74 #endif
75 };
76 #endif
77
78
79 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15))
80
81 /*
82  * vm code for kernels below 2.6.15 in which version a major vm write
83  * occured. This implement a simple straightforward
84  * version similar to what's going to be
85  * in kernel 2.6.19+
86  * Kernels below 2.6.15 use nopage whereas 2.6.19 and upwards use
87  * nopfn.
88  */
89
90 static struct {
91         spinlock_t lock;
92         struct page *dummy_page;
93         atomic_t present;
94 } drm_np_retry =
95 {SPIN_LOCK_UNLOCKED, NOPAGE_OOM, ATOMIC_INIT(0)};
96
97
98 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
99                                     struct fault_data *data);
100
101
102 struct page * get_nopage_retry(void)
103 {
104         if (atomic_read(&drm_np_retry.present) == 0) {
105                 struct page *page = alloc_page(GFP_KERNEL);
106                 if (!page)
107                         return NOPAGE_OOM;
108                 spin_lock(&drm_np_retry.lock);
109                 drm_np_retry.dummy_page = page;
110                 atomic_set(&drm_np_retry.present,1);
111                 spin_unlock(&drm_np_retry.lock);
112         }
113         get_page(drm_np_retry.dummy_page);
114         return drm_np_retry.dummy_page;
115 }
116
117 void free_nopage_retry(void)
118 {
119         if (atomic_read(&drm_np_retry.present) == 1) {
120                 spin_lock(&drm_np_retry.lock);
121                 __free_page(drm_np_retry.dummy_page);
122                 drm_np_retry.dummy_page = NULL;
123                 atomic_set(&drm_np_retry.present, 0);
124                 spin_unlock(&drm_np_retry.lock);
125         }
126 }
127
128 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
129                                unsigned long address,
130                                int *type)
131 {
132         struct fault_data data;
133
134         if (type)
135                 *type = VM_FAULT_MINOR;
136
137         data.address = address;
138         data.vma = vma;
139         drm_bo_vm_fault(vma, &data);
140         switch (data.type) {
141         case VM_FAULT_OOM:
142                 return NOPAGE_OOM;
143         case VM_FAULT_SIGBUS:
144                 return NOPAGE_SIGBUS;
145         default:
146                 break;
147         }
148
149         return NOPAGE_REFAULT;
150 }
151
152 #endif
153
154 #if !defined(DRM_FULL_MM_COMPAT) && \
155   ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)) || \
156    (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)))
157
158 static int drm_pte_is_clear(struct vm_area_struct *vma,
159                             unsigned long addr)
160 {
161         struct mm_struct *mm = vma->vm_mm;
162         int ret = 1;
163         pte_t *pte;
164         pmd_t *pmd;
165         pud_t *pud;
166         pgd_t *pgd;
167
168         spin_lock(&mm->page_table_lock);
169         pgd = pgd_offset(mm, addr);
170         if (pgd_none(*pgd))
171                 goto unlock;
172         pud = pud_offset(pgd, addr);
173         if (pud_none(*pud))
174                 goto unlock;
175         pmd = pmd_offset(pud, addr);
176         if (pmd_none(*pmd))
177                 goto unlock;
178         pte = pte_offset_map(pmd, addr);
179         if (!pte)
180                 goto unlock;
181         ret = pte_none(*pte);
182         pte_unmap(pte);
183  unlock:
184         spin_unlock(&mm->page_table_lock);
185         return ret;
186 }
187
188 static int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
189                   unsigned long pfn)
190 {
191         int ret;
192         if (!drm_pte_is_clear(vma, addr))
193                 return -EBUSY;
194
195         ret = io_remap_pfn_range(vma, addr, pfn, PAGE_SIZE, vma->vm_page_prot);
196         return ret;
197 }
198
199
200 static struct page *drm_bo_vm_fault(struct vm_area_struct *vma,
201                                     struct fault_data *data)
202 {
203         unsigned long address = data->address;
204         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
205         unsigned long page_offset;
206         struct page *page = NULL;
207         struct drm_ttm *ttm;
208         struct drm_device *dev;
209         unsigned long pfn;
210         int err;
211         unsigned long bus_base;
212         unsigned long bus_offset;
213         unsigned long bus_size;
214
215         dev = bo->dev;
216         drm_bo_read_lock(&dev->bm.bm_lock, 0);
217
218         mutex_lock(&bo->mutex);
219
220         err = drm_bo_wait(bo, 0, 1, 0);
221         if (err) {
222                 data->type = (err == -EAGAIN) ?
223                         VM_FAULT_MINOR : VM_FAULT_SIGBUS;
224                 goto out_unlock;
225         }
226
227
228         /*
229          * If buffer happens to be in a non-mappable location,
230          * move it to a mappable.
231          */
232
233         if (!(bo->mem.flags & DRM_BO_FLAG_MAPPABLE)) {
234                 unsigned long _end = jiffies + 3*DRM_HZ;
235                 uint32_t new_mask = bo->mem.proposed_flags |
236                         DRM_BO_FLAG_MAPPABLE |
237                         DRM_BO_FLAG_FORCE_MAPPABLE;
238
239                 do {
240                         err = drm_bo_move_buffer(bo, new_mask, 0, 0);
241                 } while((err == -EAGAIN) && !time_after_eq(jiffies, _end));
242
243                 if (err) {
244                         DRM_ERROR("Timeout moving buffer to mappable location.\n");
245                         data->type = VM_FAULT_SIGBUS;
246                         goto out_unlock;
247                 }
248         }
249
250         if (address > vma->vm_end) {
251                 data->type = VM_FAULT_SIGBUS;
252                 goto out_unlock;
253         }
254
255         dev = bo->dev;
256         err = drm_bo_pci_offset(dev, &bo->mem, &bus_base, &bus_offset,
257                                 &bus_size);
258
259         if (err) {
260                 data->type = VM_FAULT_SIGBUS;
261                 goto out_unlock;
262         }
263
264         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
265
266         if (bus_size) {
267                 struct drm_mem_type_manager *man = &dev->bm.man[bo->mem.mem_type];
268
269                 pfn = ((bus_base + bus_offset) >> PAGE_SHIFT) + page_offset;
270                 vma->vm_page_prot = drm_io_prot(man->drm_bus_maptype, vma);
271         } else {
272                 ttm = bo->ttm;
273
274                 drm_ttm_fixup_caching(ttm);
275                 page = drm_ttm_get_page(ttm, page_offset);
276                 if (!page) {
277                         data->type = VM_FAULT_OOM;
278                         goto out_unlock;
279                 }
280                 pfn = page_to_pfn(page);
281                 vma->vm_page_prot = (bo->mem.flags & DRM_BO_FLAG_CACHED) ?
282                         vm_get_page_prot(vma->vm_flags) :
283                         drm_io_prot(_DRM_TTM, vma);
284         }
285
286         err = vm_insert_pfn(vma, address, pfn);
287
288         if (!err || err == -EBUSY)
289                 data->type = VM_FAULT_MINOR;
290         else
291                 data->type = VM_FAULT_OOM;
292 out_unlock:
293         mutex_unlock(&bo->mutex);
294         drm_bo_read_unlock(&dev->bm.bm_lock);
295         return NULL;
296 }
297
298 #endif
299
300 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)) && \
301   !defined(DRM_FULL_MM_COMPAT)
302
303 /**
304  */
305
306 unsigned long drm_bo_vm_nopfn(struct vm_area_struct * vma,
307                            unsigned long address)
308 {
309         struct fault_data data;
310         data.address = address;
311
312         (void) drm_bo_vm_fault(vma, &data);
313         if (data.type == VM_FAULT_OOM)
314                 return NOPFN_OOM;
315         else if (data.type == VM_FAULT_SIGBUS)
316                 return NOPFN_SIGBUS;
317
318         /*
319          * pfn already set.
320          */
321
322         return 0;
323 }
324 #endif
325
326
327 #ifdef DRM_ODD_MM_COMPAT
328
329 /*
330  * VM compatibility code for 2.6.15-2.6.18. This code implements a complicated
331  * workaround for a single BUG statement in do_no_page in these versions. The
332  * tricky thing is that we need to take the mmap_sem in exclusive mode for _all_
333  * vmas mapping the ttm, before dev->struct_mutex is taken. The way we do this is to
334  * check first take the dev->struct_mutex, and then trylock all mmap_sems. If this
335  * fails for a single mmap_sem, we have to release all sems and the dev->struct_mutex,
336  * release the cpu and retry. We also need to keep track of all vmas mapping the ttm.
337  * phew.
338  */
339
340 typedef struct p_mm_entry {
341         struct list_head head;
342         struct mm_struct *mm;
343         atomic_t refcount;
344         int locked;
345 } p_mm_entry_t;
346
347 typedef struct vma_entry {
348         struct list_head head;
349         struct vm_area_struct *vma;
350 } vma_entry_t;
351
352
353 struct page *drm_bo_vm_nopage(struct vm_area_struct *vma,
354                                unsigned long address,
355                                int *type)
356 {
357         struct drm_buffer_object *bo = (struct drm_buffer_object *) vma->vm_private_data;
358         unsigned long page_offset;
359         struct page *page;
360         struct drm_ttm *ttm;
361         struct drm_device *dev;
362
363         mutex_lock(&bo->mutex);
364
365         if (type)
366                 *type = VM_FAULT_MINOR;
367
368         if (address > vma->vm_end) {
369                 page = NOPAGE_SIGBUS;
370                 goto out_unlock;
371         }
372
373         dev = bo->dev;
374
375         if (drm_mem_reg_is_pci(dev, &bo->mem)) {
376                 DRM_ERROR("Invalid compat nopage.\n");
377                 page = NOPAGE_SIGBUS;
378                 goto out_unlock;
379         }
380
381         ttm = bo->ttm;
382         drm_ttm_fixup_caching(ttm);
383         page_offset = (address - vma->vm_start) >> PAGE_SHIFT;
384         page = drm_ttm_get_page(ttm, page_offset);
385         if (!page) {
386                 page = NOPAGE_OOM;
387                 goto out_unlock;
388         }
389
390         get_page(page);
391 out_unlock:
392         mutex_unlock(&bo->mutex);
393         return page;
394 }
395
396
397
398
399 int drm_bo_map_bound(struct vm_area_struct *vma)
400 {
401         struct drm_buffer_object *bo = (struct drm_buffer_object *)vma->vm_private_data;
402         int ret = 0;
403         unsigned long bus_base;
404         unsigned long bus_offset;
405         unsigned long bus_size;
406
407         ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
408                                 &bus_offset, &bus_size);
409         BUG_ON(ret);
410
411         if (bus_size) {
412                 struct drm_mem_type_manager *man = &bo->dev->bm.man[bo->mem.mem_type];
413                 unsigned long pfn = (bus_base + bus_offset) >> PAGE_SHIFT;
414                 pgprot_t pgprot = drm_io_prot(man->drm_bus_maptype, vma);
415                 ret = io_remap_pfn_range(vma, vma->vm_start, pfn,
416                                          vma->vm_end - vma->vm_start,
417                                          pgprot);
418         }
419
420         return ret;
421 }
422
423
424 int drm_bo_add_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
425 {
426         p_mm_entry_t *entry, *n_entry;
427         vma_entry_t *v_entry;
428         struct mm_struct *mm = vma->vm_mm;
429
430         v_entry = drm_ctl_alloc(sizeof(*v_entry), DRM_MEM_BUFOBJ);
431         if (!v_entry) {
432                 DRM_ERROR("Allocation of vma pointer entry failed\n");
433                 return -ENOMEM;
434         }
435         v_entry->vma = vma;
436
437         list_add_tail(&v_entry->head, &bo->vma_list);
438
439         list_for_each_entry(entry, &bo->p_mm_list, head) {
440                 if (mm == entry->mm) {
441                         atomic_inc(&entry->refcount);
442                         return 0;
443                 } else if ((unsigned long)mm < (unsigned long)entry->mm) ;
444         }
445
446         n_entry = drm_ctl_alloc(sizeof(*n_entry), DRM_MEM_BUFOBJ);
447         if (!n_entry) {
448                 DRM_ERROR("Allocation of process mm pointer entry failed\n");
449                 return -ENOMEM;
450         }
451         INIT_LIST_HEAD(&n_entry->head);
452         n_entry->mm = mm;
453         n_entry->locked = 0;
454         atomic_set(&n_entry->refcount, 0);
455         list_add_tail(&n_entry->head, &entry->head);
456
457         return 0;
458 }
459
460 void drm_bo_delete_vma(struct drm_buffer_object * bo, struct vm_area_struct *vma)
461 {
462         p_mm_entry_t *entry, *n;
463         vma_entry_t *v_entry, *v_n;
464         int found = 0;
465         struct mm_struct *mm = vma->vm_mm;
466
467         list_for_each_entry_safe(v_entry, v_n, &bo->vma_list, head) {
468                 if (v_entry->vma == vma) {
469                         found = 1;
470                         list_del(&v_entry->head);
471                         drm_ctl_free(v_entry, sizeof(*v_entry), DRM_MEM_BUFOBJ);
472                         break;
473                 }
474         }
475         BUG_ON(!found);
476
477         list_for_each_entry_safe(entry, n, &bo->p_mm_list, head) {
478                 if (mm == entry->mm) {
479                         if (atomic_add_negative(-1, &entry->refcount)) {
480                                 list_del(&entry->head);
481                                 BUG_ON(entry->locked);
482                                 drm_ctl_free(entry, sizeof(*entry), DRM_MEM_BUFOBJ);
483                         }
484                         return;
485                 }
486         }
487         BUG_ON(1);
488 }
489
490
491
492 int drm_bo_lock_kmm(struct drm_buffer_object * bo)
493 {
494         p_mm_entry_t *entry;
495         int lock_ok = 1;
496
497         list_for_each_entry(entry, &bo->p_mm_list, head) {
498                 BUG_ON(entry->locked);
499                 if (!down_write_trylock(&entry->mm->mmap_sem)) {
500                         lock_ok = 0;
501                         break;
502                 }
503                 entry->locked = 1;
504         }
505
506         if (lock_ok)
507                 return 0;
508
509         list_for_each_entry(entry, &bo->p_mm_list, head) {
510                 if (!entry->locked)
511                         break;
512                 up_write(&entry->mm->mmap_sem);
513                 entry->locked = 0;
514         }
515
516         /*
517          * Possible deadlock. Try again. Our callers should handle this
518          * and restart.
519          */
520
521         return -EAGAIN;
522 }
523
524 void drm_bo_unlock_kmm(struct drm_buffer_object * bo)
525 {
526         p_mm_entry_t *entry;
527
528         list_for_each_entry(entry, &bo->p_mm_list, head) {
529                 BUG_ON(!entry->locked);
530                 up_write(&entry->mm->mmap_sem);
531                 entry->locked = 0;
532         }
533 }
534
535 int drm_bo_remap_bound(struct drm_buffer_object *bo)
536 {
537         vma_entry_t *v_entry;
538         int ret = 0;
539
540         if (drm_mem_reg_is_pci(bo->dev, &bo->mem)) {
541                 list_for_each_entry(v_entry, &bo->vma_list, head) {
542                         ret = drm_bo_map_bound(v_entry->vma);
543                         if (ret)
544                                 break;
545                 }
546         }
547
548         return ret;
549 }
550
551 void drm_bo_finish_unmap(struct drm_buffer_object *bo)
552 {
553         vma_entry_t *v_entry;
554
555         list_for_each_entry(v_entry, &bo->vma_list, head) {
556                 v_entry->vma->vm_flags &= ~VM_PFNMAP;
557         }
558 }
559
560 #endif
561
562 #ifdef DRM_IDR_COMPAT_FN
563 /* only called when idp->lock is held */
564 static void __free_layer(struct idr *idp, struct idr_layer *p)
565 {
566         p->ary[0] = idp->id_free;
567         idp->id_free = p;
568         idp->id_free_cnt++;
569 }
570
571 static void free_layer(struct idr *idp, struct idr_layer *p)
572 {
573         unsigned long flags;
574
575         /*
576          * Depends on the return element being zeroed.
577          */
578         spin_lock_irqsave(&idp->lock, flags);
579         __free_layer(idp, p);
580         spin_unlock_irqrestore(&idp->lock, flags);
581 }
582
583 /**
584  * idr_for_each - iterate through all stored pointers
585  * @idp: idr handle
586  * @fn: function to be called for each pointer
587  * @data: data passed back to callback function
588  *
589  * Iterate over the pointers registered with the given idr.  The
590  * callback function will be called for each pointer currently
591  * registered, passing the id, the pointer and the data pointer passed
592  * to this function.  It is not safe to modify the idr tree while in
593  * the callback, so functions such as idr_get_new and idr_remove are
594  * not allowed.
595  *
596  * We check the return of @fn each time. If it returns anything other
597  * than 0, we break out and return that value.
598  *
599 * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
600  */
601 int idr_for_each(struct idr *idp,
602                  int (*fn)(int id, void *p, void *data), void *data)
603 {
604         int n, id, max, error = 0;
605         struct idr_layer *p;
606         struct idr_layer *pa[MAX_LEVEL];
607         struct idr_layer **paa = &pa[0];
608
609         n = idp->layers * IDR_BITS;
610         p = idp->top;
611         max = 1 << n;
612
613         id = 0;
614         while (id < max) {
615                 while (n > 0 && p) {
616                         n -= IDR_BITS;
617                         *paa++ = p;
618                         p = p->ary[(id >> n) & IDR_MASK];
619                 }
620
621                 if (p) {
622                         error = fn(id, (void *)p, data);
623                         if (error)
624                                 break;
625                 }
626
627                 id += 1 << n;
628                 while (n < fls(id)) {
629                         n += IDR_BITS;
630                         p = *--paa;
631                 }
632         }
633
634         return error;
635 }
636 EXPORT_SYMBOL(idr_for_each);
637
638 /**
639  * idr_remove_all - remove all ids from the given idr tree
640  * @idp: idr handle
641  *
642  * idr_destroy() only frees up unused, cached idp_layers, but this
643  * function will remove all id mappings and leave all idp_layers
644  * unused.
645  *
646  * A typical clean-up sequence for objects stored in an idr tree, will
647  * use idr_for_each() to free all objects, if necessay, then
648  * idr_remove_all() to remove all ids, and idr_destroy() to free
649  * up the cached idr_layers.
650  */
651 void idr_remove_all(struct idr *idp)
652 {
653        int n, id, max, error = 0;
654        struct idr_layer *p;
655        struct idr_layer *pa[MAX_LEVEL];
656        struct idr_layer **paa = &pa[0];
657
658        n = idp->layers * IDR_BITS;
659        p = idp->top;
660        max = 1 << n;
661
662        id = 0;
663        while (id < max && !error) {
664                while (n > IDR_BITS && p) {
665                        n -= IDR_BITS;
666                        *paa++ = p;
667                        p = p->ary[(id >> n) & IDR_MASK];
668                }
669
670                id += 1 << n;
671                while (n < fls(id)) {
672                        if (p) {
673                                memset(p, 0, sizeof *p);
674                                free_layer(idp, p);
675                        }
676                        n += IDR_BITS;
677                        p = *--paa;
678                }
679        }
680        idp->top = NULL;
681        idp->layers = 0;
682 }
683 EXPORT_SYMBOL(idr_remove_all);
684
685 #endif /* DRM_IDR_COMPAT_FN */
686
687
688
689 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
690 /**
691  * idr_replace - replace pointer for given id
692  * @idp: idr handle
693  * @ptr: pointer you want associated with the id
694  * @id: lookup key
695  *
696  * Replace the pointer registered with an id and return the old value.
697  * A -ENOENT return indicates that @id was not found.
698  * A -EINVAL return indicates that @id was not within valid constraints.
699  *
700  * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
701  */
702 void *idr_replace(struct idr *idp, void *ptr, int id)
703 {
704         int n;
705         struct idr_layer *p, *old_p;
706
707         n = idp->layers * IDR_BITS;
708         p = idp->top;
709
710         id &= MAX_ID_MASK;
711
712         if (id >= (1 << n))
713                 return ERR_PTR(-EINVAL);
714
715         n -= IDR_BITS;
716         while ((n > 0) && p) {
717                 p = p->ary[(id >> n) & IDR_MASK];
718                 n -= IDR_BITS;
719         }
720
721         n = id & IDR_MASK;
722         if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
723                 return ERR_PTR(-ENOENT);
724
725         old_p = p->ary[n];
726         p->ary[n] = ptr;
727
728         return (void *)old_p;
729 }
730 EXPORT_SYMBOL(idr_replace);
731 #endif
732
733 #if defined(DRM_KMAP_ATOMIC_PROT_PFN)
734 #define drm_kmap_get_fixmap_pte(vaddr)                                  \
735         pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
736
737 void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type,
738                            pgprot_t protection)
739 {
740         enum fixed_addresses idx;
741         unsigned long vaddr;
742         static pte_t *km_pte;
743         static int initialized = 0;
744
745         if (unlikely(!initialized)) {
746                 km_pte = drm_kmap_get_fixmap_pte(__fix_to_virt(FIX_KMAP_BEGIN));
747                 initialized = 1;
748         }
749
750         pagefault_disable();
751         idx = type + KM_TYPE_NR*smp_processor_id();
752         vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
753         set_pte(km_pte-idx, pfn_pte(pfn, protection));
754
755         return (void*) vaddr;
756 }
757
758 EXPORT_SYMBOL(kmap_atomic_prot_pfn);
759
760 #endif