OSDN Git Service

drm: convert drawable handling to use Linux idr
[android-x86/external-libdrm.git] / linux-core / drm_ttm.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 static void drm_ttm_ipi_handler(void *null)
34 {
35         flush_agp_cache();
36 }
37
38 static void drm_ttm_cache_flush(void)
39 {
40         if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
41                 DRM_ERROR("Timed out waiting for drm cache flush.\n");
42 }
43
44 /*
45  * Use kmalloc if possible. Otherwise fall back to vmalloc.
46  */
47
48 static void ttm_alloc_pages(drm_ttm_t * ttm)
49 {
50         unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
51         ttm->pages = NULL;
52
53         if (drm_alloc_memctl(size))
54                 return;
55
56         if (size <= PAGE_SIZE) {
57                 ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
58         }
59         if (!ttm->pages) {
60                 ttm->pages = vmalloc_user(size);
61                 if (ttm->pages)
62                         ttm->page_flags |= DRM_TTM_PAGE_VMALLOC;
63         }
64         if (!ttm->pages) {
65                 drm_free_memctl(size);
66         }
67 }
68
69 static void ttm_free_pages(drm_ttm_t * ttm)
70 {
71         unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
72
73         if (ttm->page_flags & DRM_TTM_PAGE_VMALLOC) {
74                 vfree(ttm->pages);
75                 ttm->page_flags &= ~DRM_TTM_PAGE_VMALLOC;
76         } else {
77                 drm_free(ttm->pages, size, DRM_MEM_TTM);
78         }
79         drm_free_memctl(size);
80         ttm->pages = NULL;
81 }
82
83 static struct page *drm_ttm_alloc_page(void)
84 {
85         struct page *page;
86
87         if (drm_alloc_memctl(PAGE_SIZE)) {
88                 return NULL;
89         }
90         page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
91         if (!page) {
92                 drm_free_memctl(PAGE_SIZE);
93                 return NULL;
94         }
95 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
96         SetPageLocked(page);
97 #else
98         SetPageReserved(page);
99 #endif
100         return page;
101 }
102
103 /*
104  * Change caching policy for the linear kernel map
105  * for range of pages in a ttm.
106  */
107
108 static int drm_set_caching(drm_ttm_t * ttm, int noncached)
109 {
110         int i;
111         struct page **cur_page;
112         int do_tlbflush = 0;
113
114         if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
115                 return 0;
116
117         if (noncached)
118                 drm_ttm_cache_flush();
119
120         for (i = 0; i < ttm->num_pages; ++i) {
121                 cur_page = ttm->pages + i;
122                 if (*cur_page) {
123                         if (!PageHighMem(*cur_page)) {
124                                 if (noncached) {
125                                         map_page_into_agp(*cur_page);
126                                 } else {
127                                         unmap_page_from_agp(*cur_page);
128                                 }
129                                 do_tlbflush = 1;
130                         }
131                 }
132         }
133         if (do_tlbflush)
134                 flush_agp_mappings();
135
136         DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
137
138         return 0;
139 }
140
141 /*
142  * Free all resources associated with a ttm.
143  */
144
145 int drm_destroy_ttm(drm_ttm_t * ttm)
146 {
147
148         int i;
149         struct page **cur_page;
150         drm_ttm_backend_t *be;
151
152         if (!ttm)
153                 return 0;
154
155         be = ttm->be;
156         if (be) {
157                 be->func->destroy(be);
158                 ttm->be = NULL;
159         }
160
161         if (ttm->pages) {
162                 drm_buffer_manager_t *bm = &ttm->dev->bm;
163                 if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
164                         drm_set_caching(ttm, 0);
165
166                 for (i = 0; i < ttm->num_pages; ++i) {
167                         cur_page = ttm->pages + i;
168                         if (*cur_page) {
169 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15))
170                                 unlock_page(*cur_page);
171 #else
172                                 ClearPageReserved(*cur_page);
173 #endif
174                                 if (page_count(*cur_page) != 1) {
175                                         DRM_ERROR("Erroneous page count. "
176                                                   "Leaking pages.\n");
177                                 }
178                                 if (page_mapped(*cur_page)) {
179                                         DRM_ERROR("Erroneous map count. "
180                                                   "Leaking page mappings.\n");
181                                 }
182                                 __free_page(*cur_page);
183                                 drm_free_memctl(PAGE_SIZE);
184                                 --bm->cur_pages;
185                         }
186                 }
187                 ttm_free_pages(ttm);
188         }
189
190         drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
191         return 0;
192 }
193
194 struct page *drm_ttm_get_page(drm_ttm_t * ttm, int index)
195 {
196         struct page *p;
197         drm_buffer_manager_t *bm = &ttm->dev->bm;
198
199         p = ttm->pages[index];
200         if (!p) {
201                 p = drm_ttm_alloc_page();
202                 if (!p)
203                         return NULL;
204                 ttm->pages[index] = p;
205                 ++bm->cur_pages;
206         }
207         return p;
208 }
209
210 static int drm_ttm_populate(drm_ttm_t * ttm)
211 {
212         struct page *page;
213         unsigned long i;
214         drm_ttm_backend_t *be;
215
216         if (ttm->state != ttm_unpopulated)
217                 return 0;
218
219         be = ttm->be;
220         for (i = 0; i < ttm->num_pages; ++i) {
221                 page = drm_ttm_get_page(ttm, i);
222                 if (!page)
223                         return -ENOMEM;
224         }
225         be->func->populate(be, ttm->num_pages, ttm->pages);
226         ttm->state = ttm_unbound;
227         return 0;
228 }
229
230 /*
231  * Initialize a ttm.
232  */
233
234 drm_ttm_t *drm_ttm_init(struct drm_device * dev, unsigned long size)
235 {
236         drm_bo_driver_t *bo_driver = dev->driver->bo_driver;
237         drm_ttm_t *ttm;
238
239         if (!bo_driver)
240                 return NULL;
241
242         ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
243         if (!ttm)
244                 return NULL;
245
246         ttm->dev = dev;
247         atomic_set(&ttm->vma_count, 0);
248
249         ttm->destroy = 0;
250         ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
251
252         ttm->page_flags = 0;
253
254         /*
255          * Account also for AGP module memory usage.
256          */
257
258         ttm_alloc_pages(ttm);
259         if (!ttm->pages) {
260                 drm_destroy_ttm(ttm);
261                 DRM_ERROR("Failed allocating page table\n");
262                 return NULL;
263         }
264         ttm->be = bo_driver->create_ttm_backend_entry(dev);
265         if (!ttm->be) {
266                 drm_destroy_ttm(ttm);
267                 DRM_ERROR("Failed creating ttm backend entry\n");
268                 return NULL;
269         }
270         ttm->state = ttm_unpopulated;
271         return ttm;
272 }
273
274 /*
275  * Unbind a ttm region from the aperture.
276  */
277
278 void drm_ttm_evict(drm_ttm_t * ttm)
279 {
280         drm_ttm_backend_t *be = ttm->be;
281         int ret;
282
283         if (ttm->state == ttm_bound) {
284                 ret = be->func->unbind(be);
285                 BUG_ON(ret);
286         }
287
288         ttm->state = ttm_evicted;
289 }
290
291 void drm_ttm_fixup_caching(drm_ttm_t * ttm)
292 {
293
294         if (ttm->state == ttm_evicted) {
295                 drm_ttm_backend_t *be = ttm->be;
296                 if (be->func->needs_ub_cache_adjust(be)) {
297                         drm_set_caching(ttm, 0);
298                 }
299                 ttm->state = ttm_unbound;
300         }
301 }
302
303 void drm_ttm_unbind(drm_ttm_t * ttm)
304 {
305         if (ttm->state == ttm_bound)
306                 drm_ttm_evict(ttm);
307
308         drm_ttm_fixup_caching(ttm);
309 }
310
311 int drm_bind_ttm(drm_ttm_t * ttm, int cached, unsigned long aper_offset)
312 {
313
314         int ret = 0;
315         drm_ttm_backend_t *be;
316
317         if (!ttm)
318                 return -EINVAL;
319         if (ttm->state == ttm_bound)
320                 return 0;
321
322         be = ttm->be;
323
324         ret = drm_ttm_populate(ttm);
325         if (ret)
326                 return ret;
327
328         if (ttm->state == ttm_unbound && !cached) {
329                 drm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
330         }
331
332         if ((ret = be->func->bind(be, aper_offset, cached))) {
333                 ttm->state = ttm_evicted;
334                 DRM_ERROR("Couldn't bind backend.\n");
335                 return ret;
336         }
337
338         ttm->aper_offset = aper_offset;
339         ttm->state = ttm_bound;
340
341         return 0;
342 }
343
344 EXPORT_SYMBOL(drm_bind_ttm);