OSDN Git Service

nouveau: enable accelerated move to sysmem
[android-x86/external-libdrm.git] / linux-core / nouveau_bo.c
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *          Ben Skeggs   <darktama@iinet.net.au>
27  *          Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29
30 #include "drmP.h"
31 #include "nouveau_drm.h"
32 #include "nouveau_drv.h"
33 #include "nouveau_dma.h"
34
35 static struct drm_ttm_backend *
36 nouveau_bo_create_ttm_backend_entry(struct drm_device * dev)
37 {
38         struct drm_nouveau_private *dev_priv = dev->dev_private;
39
40         switch (dev_priv->gart_info.type) {
41         case NOUVEAU_GART_AGP:
42                 return drm_agp_init_ttm(dev);
43         case NOUVEAU_GART_SGDMA:
44                 return nouveau_sgdma_init_ttm(dev);
45         default:
46                 DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type);
47                 break;
48         }
49
50         return NULL;
51 }
52
53 static int
54 nouveau_bo_fence_type(struct drm_buffer_object *bo,
55                       uint32_t *fclass, uint32_t *type)
56 {
57         /* When we get called, *fclass is set to the requested fence class */
58
59         if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
60                 *type = 3;
61         else
62                 *type = 1;
63         return 0;
64
65 }
66
67 static int
68 nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags)
69 {
70         /* We'll do this from user space. */
71         return 0;
72 }
73
74 static int
75 nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
76                          struct drm_mem_type_manager *man)
77 {
78         struct drm_nouveau_private *dev_priv = dev->dev_private;
79
80         switch (type) {
81         case DRM_BO_MEM_LOCAL:
82                 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
83                              _DRM_FLAG_MEMTYPE_CACHED;
84                 man->drm_bus_maptype = 0;
85                 break;
86         case DRM_BO_MEM_VRAM:
87                 man->flags = _DRM_FLAG_MEMTYPE_FIXED |
88                              _DRM_FLAG_MEMTYPE_MAPPABLE |
89                              _DRM_FLAG_NEEDS_IOREMAP;
90                 man->io_addr = NULL;
91                 man->drm_bus_maptype = _DRM_FRAME_BUFFER;
92                 man->io_offset = drm_get_resource_start(dev, 1);
93                 man->io_size = drm_get_resource_len(dev, 1);
94                 if (man->io_size > nouveau_mem_fb_amount(dev))
95                         man->io_size = nouveau_mem_fb_amount(dev);
96                 break;
97         case DRM_BO_MEM_PRIV0:
98                 /* Unmappable VRAM */
99                 man->flags = _DRM_FLAG_MEMTYPE_CMA;
100                 man->drm_bus_maptype = 0;
101                 break;
102         case DRM_BO_MEM_TT:
103                 switch (dev_priv->gart_info.type) {
104                 case NOUVEAU_GART_AGP:
105                         man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
106                                      _DRM_FLAG_MEMTYPE_CSELECT |
107                                      _DRM_FLAG_NEEDS_IOREMAP;
108                         man->drm_bus_maptype = _DRM_AGP;
109                         break;
110                 case NOUVEAU_GART_SGDMA:
111                         man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
112                                      _DRM_FLAG_MEMTYPE_CSELECT |
113                                      _DRM_FLAG_MEMTYPE_CMA;
114                         man->drm_bus_maptype = _DRM_SCATTER_GATHER;
115                         break;
116                 default:
117                         DRM_ERROR("Unknown GART type: %d\n",
118                                   dev_priv->gart_info.type);
119                         return -EINVAL;
120                 }
121
122                 man->io_offset  = dev_priv->gart_info.aper_base;
123                 man->io_size    = dev_priv->gart_info.aper_size;
124                 man->io_addr   = NULL;
125                 break;
126         default:
127                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
128                 return -EINVAL;
129         }
130         return 0;
131 }
132
133 static uint64_t
134 nouveau_bo_evict_flags(struct drm_buffer_object *bo)
135 {
136         switch (bo->mem.mem_type) {
137         case DRM_BO_MEM_LOCAL:
138         case DRM_BO_MEM_TT:
139                 return DRM_BO_FLAG_MEM_LOCAL;
140         default:
141                 return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
142         }
143         return 0;
144 }
145
146
147 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
148  * DRM_BO_MEM_{VRAM,PRIV0,TT} directly.
149  */
150 static int
151 nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
152                      struct drm_bo_mem_reg *new_mem)
153 {
154         struct drm_device *dev = bo->dev;
155         struct drm_nouveau_private *dev_priv = dev->dev_private;
156         struct nouveau_drm_channel *dchan = &dev_priv->channel;
157         struct drm_bo_mem_reg *old_mem = &bo->mem;
158         uint32_t srch, dsth, page_count;
159
160         /* Can happen during init/takedown */
161         if (!dchan->chan)
162                 return -EINVAL;
163
164         srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
165         dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
166         if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) {
167                 dchan->m2mf_dma_source = srch;
168                 dchan->m2mf_dma_destin = dsth;
169
170                 BEGIN_RING(NvSubM2MF,
171                            NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
172                 OUT_RING  (dchan->m2mf_dma_source);
173                 OUT_RING  (dchan->m2mf_dma_destin);
174         }
175
176         page_count = new_mem->num_pages;
177         while (page_count) {
178                 int line_count = (page_count > 2047) ? 2047 : page_count;
179
180                 BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
181                 OUT_RING  (old_mem->mm_node->start << PAGE_SHIFT);
182                 OUT_RING  (new_mem->mm_node->start << PAGE_SHIFT);
183                 OUT_RING  (PAGE_SIZE); /* src_pitch */
184                 OUT_RING  (PAGE_SIZE); /* dst_pitch */
185                 OUT_RING  (PAGE_SIZE); /* line_length */
186                 OUT_RING  (line_count);
187                 OUT_RING  ((1<<8)|(1<<0));
188                 OUT_RING  (0);
189                 BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
190                 OUT_RING  (0);
191
192                 page_count -= line_count;
193         }
194
195         return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id,
196                                          DRM_FENCE_TYPE_EXE, 0, new_mem);
197 }
198
199 /* Flip pages into the GART and move if we can. */
200 static int
201 nouveau_bo_move_flipd(struct drm_buffer_object *bo, int evict, int no_wait,
202                       struct drm_bo_mem_reg *new_mem)
203 {
204         struct drm_device *dev = bo->dev;
205         struct drm_bo_mem_reg tmp_mem;
206         int ret;
207
208         tmp_mem = *new_mem;
209         tmp_mem.mm_node = NULL;
210         tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT |
211                                   DRM_BO_FLAG_CACHED |
212                                   DRM_BO_FLAG_FORCE_CACHING);
213
214         ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
215         if (ret)
216                 return ret;
217
218         ret = drm_ttm_bind(bo->ttm, &tmp_mem);
219         if (ret)
220                 goto out_cleanup;
221
222         ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem);
223         if (ret)
224                 goto out_cleanup;
225
226         ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
227
228 out_cleanup:
229         if (tmp_mem.mm_node) {
230                 mutex_lock(&dev->struct_mutex);
231                 if (tmp_mem.mm_node != bo->pinned_node)
232                         drm_mm_put_block(tmp_mem.mm_node);
233                 tmp_mem.mm_node = NULL;
234                 mutex_unlock(&dev->struct_mutex);
235         }
236
237         return ret;
238 }
239
240 static int
241 nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
242                 struct drm_bo_mem_reg *new_mem)
243 {
244         struct drm_bo_mem_reg *old_mem = &bo->mem;
245
246         if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
247                 if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
248                         return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
249                 if (nouveau_bo_move_flipd(bo, evict, no_wait, new_mem))
250                         return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
251         }
252         else
253         if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
254                 if (1 /*nouveau_bo_move_flips(bo, evict, no_wait, new_mem)*/)
255                         return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
256         }
257         else {
258                 if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
259                         return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
260         }
261
262         return 0;
263 }
264
265 static void
266 nouveau_bo_flush_ttm(struct drm_ttm *ttm)
267 {
268 }
269
270 static uint32_t nouveau_mem_prios[]  = {
271         DRM_BO_MEM_PRIV0,
272         DRM_BO_MEM_VRAM,
273         DRM_BO_MEM_TT,
274         DRM_BO_MEM_LOCAL
275 };
276 static uint32_t nouveau_busy_prios[] = {
277         DRM_BO_MEM_TT,
278         DRM_BO_MEM_PRIV0,
279         DRM_BO_MEM_VRAM,
280         DRM_BO_MEM_LOCAL
281 };
282
283 struct drm_bo_driver nouveau_bo_driver = {
284         .mem_type_prio = nouveau_mem_prios,
285         .mem_busy_prio = nouveau_busy_prios,
286         .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t),
287         .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t),
288         .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
289         .fence_type = nouveau_bo_fence_type,
290         .invalidate_caches = nouveau_bo_invalidate_caches,
291         .init_mem_type = nouveau_bo_init_mem_type,
292         .evict_flags = nouveau_bo_evict_flags,
293         .move = nouveau_bo_move,
294         .ttm_cache_flush= nouveau_bo_flush_ttm,
295         .command_stream_barrier = NULL
296 };