OSDN Git Service

libdrm/nouveau: incr refcount on ref fence before decr on old fence
[android-x86/external-libdrm.git] / linux-core / drm_ttm.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 #if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
34 static void drm_clflush_page(struct page *page)
35 {
36         uint8_t *page_virtual;
37         unsigned int i;
38
39         if (unlikely(page == NULL))
40                 return;
41
42         page_virtual = kmap_atomic(page, KM_USER0);
43
44         for (i=0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
45                 clflush(page_virtual + i);
46
47         kunmap_atomic(page_virtual, KM_USER0);
48 }
49
50 static void drm_ttm_cache_flush_clflush(struct page *pages[], unsigned long num_pages)
51 {
52         unsigned long i;
53
54         mb();
55         for (i=0; i < num_pages; ++i)
56                 drm_clflush_page(*pages++);
57         mb();
58 }
59 #endif
60
61 static void drm_ttm_ipi_handler(void *null)
62 {
63         flush_agp_cache();
64 }
65
66 void drm_ttm_cache_flush(struct page *pages[], unsigned long num_pages)
67 {
68
69 #if defined( CONFIG_X86 ) && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
70         if (cpu_has_clflush) {
71                 drm_ttm_cache_flush_clflush(pages, num_pages);
72                 return;
73         }
74 #endif
75 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
76         if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1))
77 #else
78         if (on_each_cpu(drm_ttm_ipi_handler, NULL, 1, 1) != 0)
79 #endif
80                 DRM_ERROR("Timed out waiting for drm cache flush.\n");
81 }
82 EXPORT_SYMBOL(drm_ttm_cache_flush);
83
84 /**
85  * Allocates storage for pointers to the pages that back the ttm.
86  *
87  * Uses kmalloc if possible. Otherwise falls back to vmalloc.
88  */
89 static void drm_ttm_alloc_page_directory(struct drm_ttm *ttm)
90 {
91         unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
92         ttm->pages = NULL;
93
94         if (drm_alloc_memctl(size))
95                 return;
96
97         if (size <= PAGE_SIZE)
98                 ttm->pages = drm_calloc(1, size, DRM_MEM_TTM);
99
100         if (!ttm->pages) {
101                 ttm->pages = vmalloc_user(size);
102                 if (ttm->pages)
103                         ttm->page_flags |= DRM_TTM_PAGEDIR_VMALLOC;
104         }
105         if (!ttm->pages)
106                 drm_free_memctl(size);
107 }
108
109 static void drm_ttm_free_page_directory(struct drm_ttm *ttm)
110 {
111         unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
112
113         if (ttm->page_flags & DRM_TTM_PAGEDIR_VMALLOC) {
114                 vfree(ttm->pages);
115                 ttm->page_flags &= ~DRM_TTM_PAGEDIR_VMALLOC;
116         } else {
117                 drm_free(ttm->pages, size, DRM_MEM_TTM);
118         }
119         drm_free_memctl(size);
120         ttm->pages = NULL;
121 }
122
123 static struct page *drm_ttm_alloc_page(void)
124 {
125         struct page *page;
126
127         if (drm_alloc_memctl(PAGE_SIZE))
128                 return NULL;
129
130         page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
131         if (!page) {
132                 drm_free_memctl(PAGE_SIZE);
133                 return NULL;
134         }
135         return page;
136 }
137
138 /*
139  * Change caching policy for the linear kernel map
140  * for range of pages in a ttm.
141  */
142
143 static int drm_ttm_set_caching(struct drm_ttm *ttm, int noncached)
144 {
145         int i;
146         struct page **cur_page;
147         int do_tlbflush = 0;
148
149         if ((ttm->page_flags & DRM_TTM_PAGE_UNCACHED) == noncached)
150                 return 0;
151
152         if (noncached)
153                 drm_ttm_cache_flush(ttm->pages, ttm->num_pages);
154
155         for (i = 0; i < ttm->num_pages; ++i) {
156                 cur_page = ttm->pages + i;
157                 if (*cur_page) {
158                         if (!PageHighMem(*cur_page)) {
159                                 if (noncached) {
160                                         map_page_into_agp(*cur_page);
161                                 } else {
162                                         unmap_page_from_agp(*cur_page);
163                                 }
164                                 do_tlbflush = 1;
165                         }
166                 }
167         }
168         if (do_tlbflush)
169                 flush_agp_mappings();
170
171         DRM_FLAG_MASKED(ttm->page_flags, noncached, DRM_TTM_PAGE_UNCACHED);
172
173         return 0;
174 }
175
176
177 static void drm_ttm_free_user_pages(struct drm_ttm *ttm)
178 {
179         int write;
180         int dirty;
181         struct page *page;
182         int i;
183
184         BUG_ON(!(ttm->page_flags & DRM_TTM_PAGE_USER));
185         write = ((ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0);
186         dirty = ((ttm->page_flags & DRM_TTM_PAGE_USER_DIRTY) != 0);
187
188         for (i = 0; i < ttm->num_pages; ++i) {
189                 page = ttm->pages[i];
190                 if (page == NULL)
191                         continue;
192
193                 if (page == ttm->dummy_read_page) {
194                         BUG_ON(write);
195                         continue;
196                 }
197
198                 if (write && dirty && !PageReserved(page))
199                         set_page_dirty_lock(page);
200
201                 ttm->pages[i] = NULL;
202                 put_page(page);
203         }
204 }
205
206 static void drm_ttm_free_alloced_pages(struct drm_ttm *ttm)
207 {
208         int i;
209         struct drm_buffer_manager *bm = &ttm->dev->bm;
210         struct page **cur_page;
211
212         for (i = 0; i < ttm->num_pages; ++i) {
213                 cur_page = ttm->pages + i;
214                 if (*cur_page) {
215                         if (page_count(*cur_page) != 1)
216                                 DRM_ERROR("Erroneous page count. Leaking pages.\n");
217                         if (page_mapped(*cur_page))
218                                 DRM_ERROR("Erroneous map count. Leaking page mappings.\n");
219                         __free_page(*cur_page);
220                         drm_free_memctl(PAGE_SIZE);
221                         --bm->cur_pages;
222                 }
223         }
224 }
225
226 /*
227  * Free all resources associated with a ttm.
228  */
229
230 int drm_ttm_destroy(struct drm_ttm *ttm)
231 {
232         struct drm_ttm_backend *be;
233
234         if (!ttm)
235                 return 0;
236
237         be = ttm->be;
238         if (be) {
239                 be->func->destroy(be);
240                 ttm->be = NULL;
241         }
242
243         if (ttm->pages) {
244                 if (ttm->page_flags & DRM_TTM_PAGE_UNCACHED)
245                         drm_ttm_set_caching(ttm, 0);
246
247                 if (ttm->page_flags & DRM_TTM_PAGE_USER)
248                         drm_ttm_free_user_pages(ttm);
249                 else
250                         drm_ttm_free_alloced_pages(ttm);
251
252                 drm_ttm_free_page_directory(ttm);
253         }
254
255         drm_ctl_free(ttm, sizeof(*ttm), DRM_MEM_TTM);
256         return 0;
257 }
258
259 struct page *drm_ttm_get_page(struct drm_ttm *ttm, int index)
260 {
261         struct page *p;
262         struct drm_buffer_manager *bm = &ttm->dev->bm;
263
264         while(NULL == (p = ttm->pages[index])) {
265                 p = drm_ttm_alloc_page();
266                 if (!p)
267                         return NULL;
268
269                 if (PageHighMem(p))
270                         ttm->pages[--ttm->first_himem_page] = p;
271                 else
272                         ttm->pages[++ttm->last_lomem_page] = p;
273
274                 ++bm->cur_pages;
275         }
276         return p;
277 }
278 EXPORT_SYMBOL(drm_ttm_get_page);
279
280 /**
281  * drm_ttm_set_user:
282  *
283  * @ttm: the ttm to map pages to. This must always be
284  * a freshly created ttm.
285  *
286  * @tsk: a pointer to the address space from which to map
287  * pages.
288  * 
289  * @write: a boolean indicating that write access is desired
290  *
291  * start: the starting address
292  *
293  * Map a range of user addresses to a new ttm object. This
294  * provides access to user memory from the graphics device.
295  */
296 int drm_ttm_set_user(struct drm_ttm *ttm,
297                      struct task_struct *tsk,
298                      unsigned long start,
299                      unsigned long num_pages)
300 {
301         struct mm_struct *mm = tsk->mm;
302         int ret;
303         int write = (ttm->page_flags & DRM_TTM_PAGE_WRITE) != 0;
304
305         BUG_ON(num_pages != ttm->num_pages);
306         BUG_ON((ttm->page_flags & DRM_TTM_PAGE_USER) == 0);
307
308         down_read(&mm->mmap_sem);
309         ret = get_user_pages(tsk, mm, start, num_pages,
310                              write, 0, ttm->pages, NULL);
311         up_read(&mm->mmap_sem);
312
313         if (ret != num_pages && write) {
314                 drm_ttm_free_user_pages(ttm);
315                 return -ENOMEM;
316         }
317
318         return 0;
319 }
320
321 /**
322  * drm_ttm_populate:
323  *
324  * @ttm: the object to allocate pages for
325  *
326  * Allocate pages for all unset page entries, then
327  * call the backend to create the hardware mappings
328  */
329 int drm_ttm_populate(struct drm_ttm *ttm)
330 {
331         struct page *page;
332         unsigned long i;
333         struct drm_ttm_backend *be;
334
335         if (ttm->state != ttm_unpopulated)
336                 return 0;
337
338         be = ttm->be;
339
340         for (i = 0; i < ttm->num_pages; ++i) {
341                 page = drm_ttm_get_page(ttm, i);
342                 if (!page)
343                         return -ENOMEM;
344         }
345
346         be->func->populate(be, ttm->num_pages, ttm->pages, ttm->dummy_read_page);
347         ttm->state = ttm_unbound;
348         return 0;
349 }
350
351 /**
352  * drm_ttm_create:
353  *
354  * @dev: the drm_device
355  *
356  * @size: The size (in bytes) of the desired object
357  *
358  * @page_flags: various DRM_TTM_PAGE_* flags. See drm_object.h.
359  *
360  * Allocate and initialize a ttm, leaving it unpopulated at this time
361  */
362
363 struct drm_ttm *drm_ttm_create(struct drm_device *dev, unsigned long size,
364                                uint32_t page_flags, struct page *dummy_read_page)
365 {
366         struct drm_bo_driver *bo_driver = dev->driver->bo_driver;
367         struct drm_ttm *ttm;
368
369         if (!bo_driver)
370                 return NULL;
371
372         ttm = drm_ctl_calloc(1, sizeof(*ttm), DRM_MEM_TTM);
373         if (!ttm)
374                 return NULL;
375
376         ttm->dev = dev;
377         atomic_set(&ttm->vma_count, 0);
378
379         ttm->destroy = 0;
380         ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
381         ttm->first_himem_page = ttm->num_pages;
382         ttm->last_lomem_page = -1;
383
384         ttm->page_flags = page_flags;
385
386         ttm->dummy_read_page = dummy_read_page;
387
388         /*
389          * Account also for AGP module memory usage.
390          */
391
392         drm_ttm_alloc_page_directory(ttm);
393         if (!ttm->pages) {
394                 drm_ttm_destroy(ttm);
395                 DRM_ERROR("Failed allocating page table\n");
396                 return NULL;
397         }
398         ttm->be = bo_driver->create_ttm_backend_entry(dev);
399         if (!ttm->be) {
400                 drm_ttm_destroy(ttm);
401                 DRM_ERROR("Failed creating ttm backend entry\n");
402                 return NULL;
403         }
404         ttm->state = ttm_unpopulated;
405         return ttm;
406 }
407
408 /**
409  * drm_ttm_evict:
410  *
411  * @ttm: the object to be unbound from the aperture.
412  *
413  * Transition a ttm from bound to evicted, where it
414  * isn't present in the aperture, but various caches may
415  * not be consistent.
416  */
417 void drm_ttm_evict(struct drm_ttm *ttm)
418 {
419         struct drm_ttm_backend *be = ttm->be;
420         int ret;
421
422         if (ttm->state == ttm_bound) {
423                 ret = be->func->unbind(be);
424                 BUG_ON(ret);
425         }
426
427         ttm->state = ttm_evicted;
428 }
429
430 /**
431  * drm_ttm_fixup_caching:
432  *
433  * @ttm: the object to set unbound
434  *
435  * XXX this function is misnamed. Transition a ttm from evicted to
436  * unbound, flushing caches as appropriate.
437  */
438 void drm_ttm_fixup_caching(struct drm_ttm *ttm)
439 {
440
441         if (ttm->state == ttm_evicted) {
442                 struct drm_ttm_backend *be = ttm->be;
443                 if (be->func->needs_ub_cache_adjust(be))
444                         drm_ttm_set_caching(ttm, 0);
445                 ttm->state = ttm_unbound;
446         }
447 }
448
449 /**
450  * drm_ttm_unbind:
451  *
452  * @ttm: the object to unbind from the graphics device
453  *
454  * Unbind an object from the aperture. This removes the mappings
455  * from the graphics device and flushes caches if necessary.
456  */
457 void drm_ttm_unbind(struct drm_ttm *ttm)
458 {
459         if (ttm->state == ttm_bound)
460                 drm_ttm_evict(ttm);
461
462         drm_ttm_fixup_caching(ttm);
463 }
464
465 /**
466  * drm_ttm_bind:
467  *
468  * @ttm: the ttm object to bind to the graphics device
469  *
470  * @bo_mem: the aperture memory region which will hold the object
471  *
472  * Bind a ttm object to the aperture. This ensures that the necessary
473  * pages are allocated, flushes CPU caches as needed and marks the
474  * ttm as DRM_TTM_PAGE_USER_DIRTY to indicate that it may have been
475  * modified by the GPU
476  */
477 int drm_ttm_bind(struct drm_ttm *ttm, struct drm_bo_mem_reg *bo_mem)
478 {
479         struct drm_bo_driver *bo_driver = ttm->dev->driver->bo_driver;
480         int ret = 0;
481         struct drm_ttm_backend *be;
482
483         if (!ttm)
484                 return -EINVAL;
485         if (ttm->state == ttm_bound)
486                 return 0;
487
488         be = ttm->be;
489
490         ret = drm_ttm_populate(ttm);
491         if (ret)
492                 return ret;
493
494         if (ttm->state == ttm_unbound && !(bo_mem->flags & DRM_BO_FLAG_CACHED))
495                 drm_ttm_set_caching(ttm, DRM_TTM_PAGE_UNCACHED);
496         else if ((bo_mem->flags & DRM_BO_FLAG_CACHED_MAPPED) &&
497                    bo_driver->ttm_cache_flush)
498                 bo_driver->ttm_cache_flush(ttm);
499
500         ret = be->func->bind(be, bo_mem);
501         if (ret) {
502                 ttm->state = ttm_evicted;
503                 DRM_ERROR("Couldn't bind backend.\n");
504                 return ret;
505         }
506
507         ttm->state = ttm_bound;
508         if (ttm->page_flags & DRM_TTM_PAGE_USER)
509                 ttm->page_flags |= DRM_TTM_PAGE_USER_DIRTY;
510         return 0;
511 }
512 EXPORT_SYMBOL(drm_ttm_bind);