3 * Memory management wrappers for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Feb 4 14:00:34 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/highmem.h>
37 #include <linux/vmalloc.h>
41 * Cut down version of drm_memory_debug.h, which used to be called
45 /* Need the 4-argument version of vmap(). */
46 #if __OS_HAS_AGP && defined(VMAP_4_ARGS)
48 #include <linux/vmalloc.h>
54 # define PAGE_AGP __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
56 # define PAGE_AGP PAGE_KERNEL
60 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
61 #ifndef pte_offset_kernel
62 # define pte_offset_kernel(dir, address) pte_offset(dir, address)
65 # define pte_pfn(pte) (pte_page(pte) - mem_map)
68 # define pfn_to_page(pfn) (mem_map + (pfn))
73 * Find the drm_map that covers the range [offset, offset+size).
75 static inline drm_map_t *drm_lookup_map(unsigned long offset,
76 unsigned long size, drm_device_t * dev)
78 struct list_head *list;
79 drm_map_list_t *r_list;
82 list_for_each(list, &dev->maplist->head) {
83 r_list = (drm_map_list_t *) list;
87 if (map->offset <= offset
88 && (offset + size) <= (map->offset + map->size))
94 static inline void *agp_remap(unsigned long offset, unsigned long size,
97 unsigned long *phys_addr_map, i, num_pages =
98 PAGE_ALIGN(size) / PAGE_SIZE;
99 struct drm_agp_mem *agpmem;
100 struct page **page_map;
103 size = PAGE_ALIGN(size);
106 offset -= dev->hose->mem_space->start;
109 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next)
110 if (agpmem->bound <= offset
111 && (agpmem->bound + (agpmem->pages << PAGE_SHIFT)) >=
118 * OK, we're mapping AGP space on a chipset/platform on which memory accesses by
119 * the CPU do not get remapped by the GART. We fix this by using the kernel's
120 * page-table instead (that's probably faster anyhow...).
122 /* note: use vmalloc() because num_pages could be large... */
123 page_map = vmalloc(num_pages * sizeof(struct page *));
128 agpmem->memory->memory + (offset - agpmem->bound) / PAGE_SIZE;
129 for (i = 0; i < num_pages; ++i)
130 page_map[i] = pfn_to_page(phys_addr_map[i] >> PAGE_SHIFT);
131 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP);
137 static inline unsigned long drm_follow_page(void *vaddr)
139 pgd_t *pgd = pgd_offset_k((unsigned long) vaddr);
140 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)
141 pmd_t *pmd = pmd_offset(pgd, (unsigned long)vaddr);
143 pud_t *pud = pud_offset(pgd, (unsigned long) vaddr);
144 pmd_t *pmd = pmd_offset(pud, (unsigned long) vaddr);
146 pte_t *ptep = pte_offset_kernel(pmd, (unsigned long) vaddr);
147 return pte_pfn(*ptep) << PAGE_SHIFT;
150 #else /* __OS_HAS_AGP */
152 static inline drm_map_t *drm_lookup_map(unsigned long offset,
153 unsigned long size, drm_device_t * dev)
158 static inline void *agp_remap(unsigned long offset, unsigned long size,
164 static inline unsigned long drm_follow_page(void *vaddr)
171 static inline void *drm_ioremap(unsigned long offset, unsigned long size,
174 #if defined(VMAP_4_ARGS)
175 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
176 drm_map_t *map = drm_lookup_map(offset, size, dev);
178 if (map && map->type == _DRM_AGP)
179 return agp_remap(offset, size, dev);
183 return ioremap(offset, size);
186 static inline void *drm_ioremap_nocache(unsigned long offset,
187 unsigned long size, drm_device_t * dev)
189 #if defined(VMAP_4_ARGS)
190 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture) {
191 drm_map_t *map = drm_lookup_map(offset, size, dev);
193 if (map && map->type == _DRM_AGP)
194 return agp_remap(offset, size, dev);
198 return ioremap_nocache(offset, size);
201 static inline void drm_ioremapfree(void *pt, unsigned long size,
204 #if defined(VMAP_4_ARGS)
206 * This is a bit ugly. It would be much cleaner if the DRM API would use separate
207 * routines for handling mappings in the AGP space. Hopefully this can be done in
208 * a future revision of the interface...
210 if (drm_core_has_AGP(dev) && dev->agp && dev->agp->cant_use_aperture
211 && ((unsigned long)pt >= VMALLOC_START
212 && (unsigned long)pt < VMALLOC_END)) {
213 unsigned long offset;
216 offset = drm_follow_page(pt) | ((unsigned long)pt & ~PAGE_MASK);
217 map = drm_lookup_map(offset, size, dev);
218 if (map && map->type == _DRM_AGP) {
227 extern void *drm_ioremap(unsigned long offset, unsigned long size,
229 extern void *drm_ioremap_nocache(unsigned long offset,
230 unsigned long size, drm_device_t * dev);
231 extern void drm_ioremapfree(void *pt, unsigned long size,