OSDN Git Service

nouveau: cleanups
[android-x86/external-libdrm.git] / linux-core / nouveau_buffer.c
1 /*
2  * Copyright 2007 Dave Airlied
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  */
24 /*
25  * Authors: Dave Airlied <airlied@linux.ie>
26  *          Ben Skeggs   <darktama@iinet.net.au>
27  *          Jeremy Kolb  <jkolb@brandeis.edu>
28  */
29
30 #include "drmP.h"
31 #include "nouveau_drm.h"
32 #include "nouveau_drv.h"
33 #include "nouveau_dma.h"
34
35 static struct drm_ttm_backend *
36 nouveau_bo_create_ttm_backend_entry(struct drm_device * dev)
37 {
38         struct drm_nouveau_private *dev_priv = dev->dev_private;
39
40         switch (dev_priv->gart_info.type) {
41         case NOUVEAU_GART_AGP:
42                 return drm_agp_init_ttm(dev);
43         case NOUVEAU_GART_SGDMA:
44                 return nouveau_sgdma_init_ttm(dev);
45         default:
46                 DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type);
47                 break;
48         }
49
50         return NULL;
51 }
52
53 static int
54 nouveau_bo_fence_type(struct drm_buffer_object *bo,
55                       uint32_t *fclass, uint32_t *type)
56 {
57         *fclass = 0;
58         if (bo->mem.mask & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
59                 *type = 3;
60         else
61                 *type = 1;
62         return 0;
63
64 }
65
66 static int
67 nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags)
68 {
69         /* We'll do this from user space. */
70         return 0;
71 }
72
73 static int
74 nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
75                          struct drm_mem_type_manager *man)
76 {
77         struct drm_nouveau_private *dev_priv = dev->dev_private;
78
79         switch (type) {
80         case DRM_BO_MEM_LOCAL:
81                 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
82                              _DRM_FLAG_MEMTYPE_CACHED;
83                 man->drm_bus_maptype = 0;
84                 break;
85         case DRM_BO_MEM_VRAM:
86                 man->flags = _DRM_FLAG_MEMTYPE_FIXED |
87                              _DRM_FLAG_MEMTYPE_MAPPABLE |
88                              _DRM_FLAG_NEEDS_IOREMAP;
89                 man->io_addr = NULL;
90                 man->drm_bus_maptype = _DRM_FRAME_BUFFER;
91                 man->io_offset = drm_get_resource_start(dev, 1);
92                 man->io_size = drm_get_resource_len(dev, 1);
93                 if (man->io_size > nouveau_mem_fb_amount(dev))
94                         man->io_size = nouveau_mem_fb_amount(dev);
95                 break;
96         case DRM_BO_MEM_PRIV0:
97                 /* Unmappable VRAM */              
98                 man->flags = _DRM_FLAG_MEMTYPE_CMA;
99                 man->drm_bus_maptype = 0;
100                 break;
101         case DRM_BO_MEM_TT:
102                 switch (dev_priv->gart_info.type) {
103                 case NOUVEAU_GART_AGP:
104                         man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
105                                      _DRM_FLAG_MEMTYPE_CSELECT |
106                                      _DRM_FLAG_NEEDS_IOREMAP;
107                         man->drm_bus_maptype = _DRM_AGP;
108                         break;
109                 case NOUVEAU_GART_SGDMA:
110                         man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
111                                      _DRM_FLAG_MEMTYPE_CSELECT |
112                                      _DRM_FLAG_MEMTYPE_CMA;
113                         man->drm_bus_maptype = _DRM_SCATTER_GATHER;
114                         break;
115                 default:
116                         DRM_ERROR("Unknown GART type: %d\n",
117                                   dev_priv->gart_info.type);
118                         return -EINVAL;
119                 }
120
121                 man->io_offset  = dev_priv->gart_info.aper_base;
122                 man->io_size    = dev_priv->gart_info.aper_size;
123                 man->io_addr   = NULL;
124                 break;
125         default:
126                 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
127                 return -EINVAL;
128         }
129         return 0;
130 }
131
132 static uint32_t
133 nouveau_bo_evict_mask(struct drm_buffer_object *bo)
134 {
135         switch (bo->mem.mem_type) {
136         case DRM_BO_MEM_LOCAL:
137         case DRM_BO_MEM_TT:
138                 return DRM_BO_FLAG_MEM_LOCAL;
139         default:
140                 return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
141         }
142         return 0;
143 }
144
145 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
146  * DRM_BO_MEM_{VRAM,PRIV0,TT} directly.
147  */
148 static int
149 nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
150                 struct drm_bo_mem_reg *new_mem)
151 {
152         struct drm_device *dev = bo->dev;
153         struct drm_nouveau_private *dev_priv = dev->dev_private;
154         struct nouveau_drm_channel *dchan = &dev_priv->channel;
155         struct drm_bo_mem_reg *old_mem = &bo->mem;
156         uint32_t srch, dsth, page_count;
157
158         /* Can happen during init/takedown */
159         if (!dchan->chan)
160                 return -EINVAL;
161
162         srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
163         dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
164         if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) {
165                 dchan->m2mf_dma_source = srch;
166                 dchan->m2mf_dma_destin = dsth;
167
168                 BEGIN_RING(NvSubM2MF,
169                            NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
170                 OUT_RING  (dchan->m2mf_dma_source);
171                 OUT_RING  (dchan->m2mf_dma_destin);
172         }
173
174         page_count = new_mem->num_pages;
175         while (page_count) {
176                 int line_count = (page_count > 2047) ? 2047 : page_count;
177
178                 BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
179                 OUT_RING  (old_mem->mm_node->start << PAGE_SHIFT);
180                 OUT_RING  (new_mem->mm_node->start << PAGE_SHIFT);
181                 OUT_RING  (PAGE_SIZE); /* src_pitch */
182                 OUT_RING  (PAGE_SIZE); /* dst_pitch */
183                 OUT_RING  (PAGE_SIZE); /* line_length */
184                 OUT_RING  (line_count);
185                 OUT_RING  ((1<<8)|(1<<0));
186                 OUT_RING  (0);
187                 BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
188                 OUT_RING  (0);
189
190                 page_count -= line_count;
191         }
192
193         return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id,
194                                          DRM_FENCE_TYPE_EXE, 0, new_mem);
195 }
196
197 static int
198 nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
199                 struct drm_bo_mem_reg *new_mem)
200 {
201         struct drm_bo_mem_reg *old_mem = &bo->mem;
202
203         if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
204                 if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
205                         return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
206 #if 0
207                 if (!nouveau_bo_move_flipd(bo, evict, no_wait, new_mem))
208 #endif
209                         return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
210         }
211         else
212         if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
213 #if 0
214                 if (nouveau_bo_move_flips(bo, evict, no_wait, new_mem))
215 #endif
216                         return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
217         }
218         else {
219                 if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
220                         return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
221         }
222         return 0;
223 }
224
225 static void
226 nouveau_bo_flush_ttm(struct drm_ttm *ttm)
227 {
228 }
229
230 static uint32_t nouveau_mem_prios[]  = {
231         DRM_BO_MEM_PRIV0,
232         DRM_BO_MEM_VRAM,
233         DRM_BO_MEM_TT,
234         DRM_BO_MEM_LOCAL
235 };
236 static uint32_t nouveau_busy_prios[] = {
237         DRM_BO_MEM_TT,
238         DRM_BO_MEM_PRIV0,
239         DRM_BO_MEM_VRAM,
240         DRM_BO_MEM_LOCAL
241 };
242
243 struct drm_bo_driver nouveau_bo_driver = {
244         .mem_type_prio = nouveau_mem_prios,
245         .mem_busy_prio = nouveau_busy_prios,
246         .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t),
247         .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t),
248         .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
249         .fence_type = nouveau_bo_fence_type,
250         .invalidate_caches = nouveau_bo_invalidate_caches,
251         .init_mem_type = nouveau_bo_init_mem_type,
252         .evict_mask = nouveau_bo_evict_mask,
253         .move = nouveau_bo_move,
254         .ttm_cache_flush= nouveau_bo_flush_ttm
255 };
256