OSDN Git Service

Make sure other TTM memory types than TT is really unbound when evicted.
[android-x86/external-libdrm.git] / linux-core / drm_bo_move.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
29  */
30
31 #include "drmP.h"
32
33 /**
34  * Free the old memory node unless it's a pinned region and we
35  * have not been requested to free also pinned regions.
36  */
37
38 static void drm_bo_free_old_node(struct drm_buffer_object *bo)
39 {
40         struct drm_bo_mem_reg *old_mem = &bo->mem;
41
42         if (old_mem->mm_node && (old_mem->mm_node != bo->pinned_node)) {
43                 mutex_lock(&bo->dev->struct_mutex);
44                 drm_mm_put_block(old_mem->mm_node);
45                 mutex_unlock(&bo->dev->struct_mutex);
46         }
47         old_mem->mm_node = NULL;
48 }
49
50 int drm_bo_move_ttm(struct drm_buffer_object *bo,
51                     int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
52 {
53         struct drm_ttm *ttm = bo->ttm;
54         struct drm_bo_mem_reg *old_mem = &bo->mem;
55         uint64_t save_flags = old_mem->flags;
56         uint64_t save_proposed_flags = old_mem->proposed_flags;
57         int ret;
58
59         if (old_mem->mem_type != DRM_BO_MEM_LOCAL) {
60                 if (evict)
61                         drm_ttm_evict(ttm);
62                 else
63                         drm_ttm_unbind(ttm);
64
65                 drm_bo_free_old_node(bo);
66                 DRM_FLAG_MASKED(old_mem->flags,
67                                 DRM_BO_FLAG_CACHED | DRM_BO_FLAG_MAPPABLE |
68                                 DRM_BO_FLAG_MEM_LOCAL, DRM_BO_MASK_MEMTYPE);
69                 old_mem->mem_type = DRM_BO_MEM_LOCAL;
70                 save_flags = old_mem->flags;
71         }
72         if (new_mem->mem_type != DRM_BO_MEM_LOCAL) {
73                 ret = drm_ttm_bind(ttm, new_mem);
74                 if (ret)
75                         return ret;
76         }
77
78         *old_mem = *new_mem;
79         new_mem->mm_node = NULL;
80         old_mem->proposed_flags = save_proposed_flags;
81         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
82         return 0;
83 }
84 EXPORT_SYMBOL(drm_bo_move_ttm);
85
86 /**
87  * \c Return a kernel virtual address to the buffer object PCI memory.
88  *
89  * \param bo The buffer object.
90  * \return Failure indication.
91  *
92  * Returns -EINVAL if the buffer object is currently not mappable.
93  * Returns -ENOMEM if the ioremap operation failed.
94  * Otherwise returns zero.
95  *
96  * After a successfull call, bo->iomap contains the virtual address, or NULL
97  * if the buffer object content is not accessible through PCI space.
98  * Call bo->mutex locked.
99  */
100
101 int drm_mem_reg_ioremap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
102                         void **virtual)
103 {
104         struct drm_buffer_manager *bm = &dev->bm;
105         struct drm_mem_type_manager *man = &bm->man[mem->mem_type];
106         unsigned long bus_offset;
107         unsigned long bus_size;
108         unsigned long bus_base;
109         int ret;
110         void *addr;
111
112         *virtual = NULL;
113         ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset, &bus_size);
114         if (ret || bus_size == 0)
115                 return ret;
116
117         if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP))
118                 addr = (void *)(((u8 *) man->io_addr) + bus_offset);
119         else {
120                 addr = ioremap_nocache(bus_base + bus_offset, bus_size);
121                 if (!addr)
122                         return -ENOMEM;
123         }
124         *virtual = addr;
125         return 0;
126 }
127 EXPORT_SYMBOL(drm_mem_reg_ioremap);
128
129 /**
130  * \c Unmap mapping obtained using drm_bo_ioremap
131  *
132  * \param bo The buffer object.
133  *
134  * Call bo->mutex locked.
135  */
136
137 void drm_mem_reg_iounmap(struct drm_device *dev, struct drm_bo_mem_reg *mem,
138                          void *virtual)
139 {
140         struct drm_buffer_manager *bm;
141         struct drm_mem_type_manager *man;
142
143         bm = &dev->bm;
144         man = &bm->man[mem->mem_type];
145
146         if (virtual && (man->flags & _DRM_FLAG_NEEDS_IOREMAP))
147                 iounmap(virtual);
148 }
149
150 static int drm_copy_io_page(void *dst, void *src, unsigned long page)
151 {
152         uint32_t *dstP =
153             (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
154         uint32_t *srcP =
155             (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
156
157         int i;
158         for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
159                 iowrite32(ioread32(srcP++), dstP++);
160         return 0;
161 }
162
163 static int drm_copy_io_ttm_page(struct drm_ttm *ttm, void *src,
164                                 unsigned long page)
165 {
166         struct page *d = drm_ttm_get_page(ttm, page);
167         void *dst;
168
169         if (!d)
170                 return -ENOMEM;
171
172         src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
173         dst = kmap(d);
174         if (!dst)
175                 return -ENOMEM;
176
177         memcpy_fromio(dst, src, PAGE_SIZE);
178         kunmap(d);
179         return 0;
180 }
181
182 static int drm_copy_ttm_io_page(struct drm_ttm *ttm, void *dst, unsigned long page)
183 {
184         struct page *s = drm_ttm_get_page(ttm, page);
185         void *src;
186
187         if (!s)
188                 return -ENOMEM;
189
190         dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
191         src = kmap(s);
192         if (!src)
193                 return -ENOMEM;
194
195         memcpy_toio(dst, src, PAGE_SIZE);
196         kunmap(s);
197         return 0;
198 }
199
200 int drm_bo_move_memcpy(struct drm_buffer_object *bo,
201                        int evict, int no_wait, struct drm_bo_mem_reg *new_mem)
202 {
203         struct drm_device *dev = bo->dev;
204         struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
205         struct drm_ttm *ttm = bo->ttm;
206         struct drm_bo_mem_reg *old_mem = &bo->mem;
207         struct drm_bo_mem_reg old_copy = *old_mem;
208         void *old_iomap;
209         void *new_iomap;
210         int ret;
211         uint64_t save_flags = old_mem->flags;
212         uint64_t save_proposed_flags = old_mem->proposed_flags;
213         unsigned long i;
214         unsigned long page;
215         unsigned long add = 0;
216         int dir;
217
218         ret = drm_mem_reg_ioremap(dev, old_mem, &old_iomap);
219         if (ret)
220                 return ret;
221         ret = drm_mem_reg_ioremap(dev, new_mem, &new_iomap);
222         if (ret)
223                 goto out;
224
225         if (old_iomap == NULL && new_iomap == NULL)
226                 goto out2;
227         if (old_iomap == NULL && ttm == NULL)
228                 goto out2;
229
230         add = 0;
231         dir = 1;
232
233         if ((old_mem->mem_type == new_mem->mem_type) &&
234             (new_mem->mm_node->start <
235              old_mem->mm_node->start + old_mem->mm_node->size)) {
236                 dir = -1;
237                 add = new_mem->num_pages - 1;
238         }
239
240         for (i = 0; i < new_mem->num_pages; ++i) {
241                 page = i * dir + add;
242                 if (old_iomap == NULL)
243                         ret = drm_copy_ttm_io_page(ttm, new_iomap, page);
244                 else if (new_iomap == NULL)
245                         ret = drm_copy_io_ttm_page(ttm, old_iomap, page);
246                 else
247                         ret = drm_copy_io_page(new_iomap, old_iomap, page);
248                 if (ret)
249                         goto out1;
250         }
251         mb();
252 out2:
253         drm_bo_free_old_node(bo);
254
255         *old_mem = *new_mem;
256         new_mem->mm_node = NULL;
257         old_mem->proposed_flags = save_proposed_flags;
258         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
259
260         if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (ttm != NULL)) {
261                 drm_ttm_unbind(ttm);
262                 drm_ttm_destroy(ttm);
263                 bo->ttm = NULL;
264         }
265
266 out1:
267         drm_mem_reg_iounmap(dev, new_mem, new_iomap);
268 out:
269         drm_mem_reg_iounmap(dev, &old_copy, old_iomap);
270         return ret;
271 }
272 EXPORT_SYMBOL(drm_bo_move_memcpy);
273
274 /*
275  * Transfer a buffer object's memory and LRU status to a newly
276  * created object. User-space references remains with the old
277  * object. Call bo->mutex locked.
278  */
279
280 int drm_buffer_object_transfer(struct drm_buffer_object *bo,
281                                struct drm_buffer_object **new_obj)
282 {
283         struct drm_buffer_object *fbo;
284         struct drm_device *dev = bo->dev;
285         struct drm_buffer_manager *bm = &dev->bm;
286
287         fbo = drm_ctl_calloc(1, sizeof(*fbo), DRM_MEM_BUFOBJ);
288         if (!fbo)
289                 return -ENOMEM;
290
291         *fbo = *bo;
292         mutex_init(&fbo->mutex);
293         mutex_lock(&fbo->mutex);
294         mutex_lock(&dev->struct_mutex);
295
296         DRM_INIT_WAITQUEUE(&bo->event_queue);
297         INIT_LIST_HEAD(&fbo->ddestroy);
298         INIT_LIST_HEAD(&fbo->lru);
299         INIT_LIST_HEAD(&fbo->pinned_lru);
300 #ifdef DRM_ODD_MM_COMPAT
301         INIT_LIST_HEAD(&fbo->vma_list);
302         INIT_LIST_HEAD(&fbo->p_mm_list);
303 #endif
304
305         fbo->fence = drm_fence_reference_locked(bo->fence);
306         fbo->pinned_node = NULL;
307         fbo->mem.mm_node->private = (void *)fbo;
308         atomic_set(&fbo->usage, 1);
309         atomic_inc(&bm->count);
310         mutex_unlock(&dev->struct_mutex);
311         mutex_unlock(&fbo->mutex);
312
313         *new_obj = fbo;
314         return 0;
315 }
316
317 /*
318  * Since move is underway, we need to block signals in this function.
319  * We cannot restart until it has finished.
320  */
321
322 int drm_bo_move_accel_cleanup(struct drm_buffer_object *bo,
323                               int evict, int no_wait, uint32_t fence_class,
324                               uint32_t fence_type, uint32_t fence_flags,
325                               struct drm_bo_mem_reg *new_mem)
326 {
327         struct drm_device *dev = bo->dev;
328         struct drm_mem_type_manager *man = &dev->bm.man[new_mem->mem_type];
329         struct drm_bo_mem_reg *old_mem = &bo->mem;
330         int ret;
331         uint64_t save_flags = old_mem->flags;
332         uint64_t save_proposed_flags = old_mem->proposed_flags;
333         struct drm_buffer_object *old_obj;
334
335         if (bo->fence)
336                 drm_fence_usage_deref_unlocked(&bo->fence);
337         ret = drm_fence_object_create(dev, fence_class, fence_type,
338                                       fence_flags | DRM_FENCE_FLAG_EMIT,
339                                       &bo->fence);
340         bo->fence_type = fence_type;
341         if (ret)
342                 return ret;
343
344 #ifdef DRM_ODD_MM_COMPAT
345         /*
346          * In this mode, we don't allow pipelining a copy blit,
347          * since the buffer will be accessible from user space
348          * the moment we return and rebuild the page tables.
349          *
350          * With normal vm operation, page tables are rebuilt
351          * on demand using fault(), which waits for buffer idle.
352          */
353         if (1)
354 #else
355         if (evict || ((bo->mem.mm_node == bo->pinned_node) &&
356                       bo->mem.mm_node != NULL))
357 #endif
358         {
359                 ret = drm_bo_wait(bo, 0, 1, 0);
360                 if (ret)
361                         return ret;
362
363                 drm_bo_free_old_node(bo);
364
365                 if ((man->flags & _DRM_FLAG_MEMTYPE_FIXED) && (bo->ttm != NULL)) {
366                         drm_ttm_unbind(bo->ttm);
367                         drm_ttm_destroy(bo->ttm);
368                         bo->ttm = NULL;
369                 }
370         } else {
371
372                 /* This should help pipeline ordinary buffer moves.
373                  *
374                  * Hang old buffer memory on a new buffer object,
375                  * and leave it to be released when the GPU
376                  * operation has completed.
377                  */
378
379                 ret = drm_buffer_object_transfer(bo, &old_obj);
380
381                 if (ret)
382                         return ret;
383
384                 if (!(man->flags & _DRM_FLAG_MEMTYPE_FIXED))
385                         old_obj->ttm = NULL;
386                 else
387                         bo->ttm = NULL;
388
389                 mutex_lock(&dev->struct_mutex);
390                 list_del_init(&old_obj->lru);
391                 DRM_FLAG_MASKED(bo->priv_flags, 0, _DRM_BO_FLAG_UNFENCED);
392                 drm_bo_add_to_lru(old_obj);
393
394                 drm_bo_usage_deref_locked(&old_obj);
395                 mutex_unlock(&dev->struct_mutex);
396
397         }
398
399         *old_mem = *new_mem;
400         new_mem->mm_node = NULL;
401         old_mem->proposed_flags = save_proposed_flags;
402         DRM_FLAG_MASKED(save_flags, new_mem->flags, DRM_BO_MASK_MEMTYPE);
403         return 0;
404 }
405 EXPORT_SYMBOL(drm_bo_move_accel_cleanup);
406
407 int drm_bo_same_page(unsigned long offset,
408                      unsigned long offset2)
409 {
410         return (offset & PAGE_MASK) == (offset2 & PAGE_MASK);
411 }
412 EXPORT_SYMBOL(drm_bo_same_page);
413
414 unsigned long drm_bo_offset_end(unsigned long offset,
415                                 unsigned long end)
416 {
417         offset = (offset + PAGE_SIZE) & PAGE_MASK;
418         return (end < offset) ? end : offset;
419 }
420 EXPORT_SYMBOL(drm_bo_offset_end);
421
422 static pgprot_t drm_kernel_io_prot(uint32_t map_type)
423 {
424         pgprot_t tmp = PAGE_KERNEL;
425
426 #if defined(__i386__) || defined(__x86_64__)
427 #ifdef USE_PAT_WC
428 #warning using pat
429         if (drm_use_pat() && map_type == _DRM_TTM) {
430                 pgprot_val(tmp) |= _PAGE_PAT;
431                 return tmp;
432         }
433 #endif
434         if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
435                 pgprot_val(tmp) |= _PAGE_PCD;
436                 pgprot_val(tmp) &= ~_PAGE_PWT;
437         }
438 #elif defined(__powerpc__)
439         pgprot_val(tmp) |= _PAGE_NO_CACHE;
440         if (map_type == _DRM_REGISTERS)
441                 pgprot_val(tmp) |= _PAGE_GUARDED;
442 #endif
443 #if defined(__ia64__)
444         if (map_type == _DRM_TTM)
445                 tmp = pgprot_writecombine(tmp);
446         else
447                 tmp = pgprot_noncached(tmp);
448 #endif
449         return tmp;
450 }
451
452 static int drm_bo_ioremap(struct drm_buffer_object *bo, unsigned long bus_base,
453                           unsigned long bus_offset, unsigned long bus_size,
454                           struct drm_bo_kmap_obj *map)
455 {
456         struct drm_device *dev = bo->dev;
457         struct drm_bo_mem_reg *mem = &bo->mem;
458         struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
459
460         if (!(man->flags & _DRM_FLAG_NEEDS_IOREMAP)) {
461                 map->bo_kmap_type = bo_map_premapped;
462                 map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset);
463         } else {
464                 map->bo_kmap_type = bo_map_iomap;
465                 map->virtual = ioremap_nocache(bus_base + bus_offset, bus_size);
466         }
467         return (!map->virtual) ? -ENOMEM : 0;
468 }
469
470 static int drm_bo_kmap_ttm(struct drm_buffer_object *bo,
471                            unsigned long start_page, unsigned long num_pages,
472                            struct drm_bo_kmap_obj *map)
473 {
474         struct drm_device *dev = bo->dev;
475         struct drm_bo_mem_reg *mem = &bo->mem;
476         struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
477         pgprot_t prot;
478         struct drm_ttm *ttm = bo->ttm;
479         struct page *d;
480         int i;
481
482         BUG_ON(!ttm);
483
484         if (num_pages == 1 && (mem->flags & DRM_BO_FLAG_CACHED)) {
485
486                 /*
487                  * We're mapping a single page, and the desired
488                  * page protection is consistent with the bo.
489                  */
490
491                 map->bo_kmap_type = bo_map_kmap;
492                 map->page = drm_ttm_get_page(ttm, start_page);
493                 map->virtual = kmap(map->page);
494         } else {
495                 /*
496                  * Populate the part we're mapping;
497                  */
498
499                 for (i = start_page; i < start_page + num_pages; ++i) {
500                         d = drm_ttm_get_page(ttm, i);
501                         if (!d)
502                                 return -ENOMEM;
503                 }
504
505                 /*
506                  * We need to use vmap to get the desired page protection
507                  * or to make the buffer object look contigous.
508                  */
509
510                 prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
511                         PAGE_KERNEL :
512                         drm_kernel_io_prot(man->drm_bus_maptype);
513                 map->bo_kmap_type = bo_map_vmap;
514                 map->virtual = vmap(ttm->pages + start_page,
515                                     num_pages, 0, prot);
516         }
517         return (!map->virtual) ? -ENOMEM : 0;
518 }
519
520 /*
521  * This function is to be used for kernel mapping of buffer objects.
522  * It chooses the appropriate mapping method depending on the memory type
523  * and caching policy the buffer currently has.
524  * Mapping multiple pages or buffers that live in io memory is a bit slow and
525  * consumes vmalloc space. Be restrictive with such mappings.
526  * Mapping single pages usually returns the logical kernel address,
527  * (which is fast)
528  * BUG may use slower temporary mappings for high memory pages or
529  * uncached / write-combined pages.
530  *
531  * The function fills in a drm_bo_kmap_obj which can be used to return the
532  * kernel virtual address of the buffer.
533  *
534  * Code servicing a non-priviliged user request is only allowed to map one
535  * page at a time. We might need to implement a better scheme to stop such
536  * processes from consuming all vmalloc space.
537  */
538
539 int drm_bo_kmap(struct drm_buffer_object *bo, unsigned long start_page,
540                 unsigned long num_pages, struct drm_bo_kmap_obj *map)
541 {
542         int ret;
543         unsigned long bus_base;
544         unsigned long bus_offset;
545         unsigned long bus_size;
546
547         map->virtual = NULL;
548
549         if (num_pages > bo->num_pages)
550                 return -EINVAL;
551         if (start_page > bo->num_pages)
552                 return -EINVAL;
553 #if 0
554         if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
555                 return -EPERM;
556 #endif
557         ret = drm_bo_pci_offset(bo->dev, &bo->mem, &bus_base,
558                                 &bus_offset, &bus_size);
559
560         if (ret)
561                 return ret;
562
563         if (bus_size == 0) {
564                 return drm_bo_kmap_ttm(bo, start_page, num_pages, map);
565         } else {
566                 bus_offset += start_page << PAGE_SHIFT;
567                 bus_size = num_pages << PAGE_SHIFT;
568                 return drm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map);
569         }
570 }
571 EXPORT_SYMBOL(drm_bo_kmap);
572
573 void drm_bo_kunmap(struct drm_bo_kmap_obj *map)
574 {
575         if (!map->virtual)
576                 return;
577
578         switch (map->bo_kmap_type) {
579         case bo_map_iomap:
580                 iounmap(map->virtual);
581                 break;
582         case bo_map_vmap:
583                 vunmap(map->virtual);
584                 break;
585         case bo_map_kmap:
586                 kunmap(map->page);
587                 break;
588         case bo_map_premapped:
589                 break;
590         default:
591                 BUG();
592         }
593         map->virtual = NULL;
594         map->page = NULL;
595 }
596 EXPORT_SYMBOL(drm_bo_kunmap);
597
598 int drm_bo_pfn_prot(struct drm_buffer_object *bo,
599                     unsigned long dst_offset,
600                     unsigned long *pfn,
601                     pgprot_t *prot)
602 {
603         struct drm_bo_mem_reg *mem = &bo->mem;
604         struct drm_device *dev = bo->dev;
605         unsigned long bus_offset;
606         unsigned long bus_size;
607         unsigned long bus_base;
608         struct drm_mem_type_manager *man = &dev->bm.man[mem->mem_type];
609         int ret;
610
611         ret = drm_bo_pci_offset(dev, mem, &bus_base, &bus_offset,
612                                 &bus_size);
613         if (ret)
614                 return -EINVAL;
615
616         if (bus_size != 0)
617                 *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT;
618         else if (!bo->ttm)
619                 return -EINVAL;
620         else
621                 *pfn = page_to_pfn(drm_ttm_get_page(bo->ttm, dst_offset >> PAGE_SHIFT));
622
623         *prot = (mem->flags & DRM_BO_FLAG_CACHED) ?
624                 PAGE_KERNEL : drm_kernel_io_prot(man->drm_bus_maptype);
625
626         return 0;
627 }
628 EXPORT_SYMBOL(drm_bo_pfn_prot);
629