2 * Copyright 2007 Dave Airlied
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
25 * Authors: Dave Airlied <airlied@linux.ie>
26 * Ben Skeggs <darktama@iinet.net.au>
27 * Jeremy Kolb <jkolb@brandeis.edu>
31 #include "nouveau_drm.h"
32 #include "nouveau_drv.h"
33 #include "nouveau_dma.h"
35 static struct drm_ttm_backend *
36 nouveau_bo_create_ttm_backend_entry(struct drm_device * dev)
38 struct drm_nouveau_private *dev_priv = dev->dev_private;
40 switch (dev_priv->gart_info.type) {
41 case NOUVEAU_GART_AGP:
42 return drm_agp_init_ttm(dev);
43 case NOUVEAU_GART_SGDMA:
44 return nouveau_sgdma_init_ttm(dev);
46 DRM_ERROR("Unknown GART type %d\n", dev_priv->gart_info.type);
54 nouveau_bo_fence_type(struct drm_buffer_object *bo,
55 uint32_t *fclass, uint32_t *type)
57 /* When we get called, *fclass is set to the requested fence class */
59 if (bo->mem.proposed_flags & (DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE))
68 nouveau_bo_invalidate_caches(struct drm_device *dev, uint64_t buffer_flags)
70 /* We'll do this from user space. */
75 nouveau_bo_init_mem_type(struct drm_device *dev, uint32_t type,
76 struct drm_mem_type_manager *man)
78 struct drm_nouveau_private *dev_priv = dev->dev_private;
81 case DRM_BO_MEM_LOCAL:
82 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
83 _DRM_FLAG_MEMTYPE_CACHED;
84 man->drm_bus_maptype = 0;
87 man->flags = _DRM_FLAG_MEMTYPE_FIXED |
88 _DRM_FLAG_MEMTYPE_MAPPABLE |
89 _DRM_FLAG_NEEDS_IOREMAP;
91 man->drm_bus_maptype = _DRM_FRAME_BUFFER;
92 man->io_offset = drm_get_resource_start(dev, 1);
93 man->io_size = drm_get_resource_len(dev, 1);
94 if (man->io_size > nouveau_mem_fb_amount(dev))
95 man->io_size = nouveau_mem_fb_amount(dev);
97 case DRM_BO_MEM_PRIV0:
99 man->flags = _DRM_FLAG_MEMTYPE_CMA;
100 man->drm_bus_maptype = 0;
103 switch (dev_priv->gart_info.type) {
104 case NOUVEAU_GART_AGP:
105 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
106 _DRM_FLAG_MEMTYPE_CSELECT |
107 _DRM_FLAG_NEEDS_IOREMAP;
108 man->drm_bus_maptype = _DRM_AGP;
110 case NOUVEAU_GART_SGDMA:
111 man->flags = _DRM_FLAG_MEMTYPE_MAPPABLE |
112 _DRM_FLAG_MEMTYPE_CSELECT |
113 _DRM_FLAG_MEMTYPE_CMA;
114 man->drm_bus_maptype = _DRM_SCATTER_GATHER;
117 DRM_ERROR("Unknown GART type: %d\n",
118 dev_priv->gart_info.type);
122 man->io_offset = dev_priv->gart_info.aper_base;
123 man->io_size = dev_priv->gart_info.aper_size;
127 DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
134 nouveau_bo_evict_flags(struct drm_buffer_object *bo)
136 switch (bo->mem.mem_type) {
137 case DRM_BO_MEM_LOCAL:
139 return DRM_BO_FLAG_MEM_LOCAL;
141 return DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_CACHED;
147 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
148 * DRM_BO_MEM_{VRAM,PRIV0,TT} directly.
151 nouveau_bo_move_m2mf(struct drm_buffer_object *bo, int evict, int no_wait,
152 struct drm_bo_mem_reg *new_mem)
154 struct drm_device *dev = bo->dev;
155 struct drm_nouveau_private *dev_priv = dev->dev_private;
156 struct nouveau_drm_channel *dchan = &dev_priv->channel;
157 struct drm_bo_mem_reg *old_mem = &bo->mem;
158 uint32_t srch, dsth, page_count;
160 /* Can happen during init/takedown */
164 srch = old_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
165 dsth = new_mem->mem_type == DRM_BO_MEM_TT ? NvDmaTT : NvDmaFB;
166 if (srch != dchan->m2mf_dma_source || dsth != dchan->m2mf_dma_destin) {
167 dchan->m2mf_dma_source = srch;
168 dchan->m2mf_dma_destin = dsth;
170 BEGIN_RING(NvSubM2MF,
171 NV_MEMORY_TO_MEMORY_FORMAT_SET_DMA_SOURCE, 2);
172 OUT_RING (dchan->m2mf_dma_source);
173 OUT_RING (dchan->m2mf_dma_destin);
176 page_count = new_mem->num_pages;
178 int line_count = (page_count > 2047) ? 2047 : page_count;
180 BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
181 OUT_RING (old_mem->mm_node->start << PAGE_SHIFT);
182 OUT_RING (new_mem->mm_node->start << PAGE_SHIFT);
183 OUT_RING (PAGE_SIZE); /* src_pitch */
184 OUT_RING (PAGE_SIZE); /* dst_pitch */
185 OUT_RING (PAGE_SIZE); /* line_length */
186 OUT_RING (line_count);
187 OUT_RING ((1<<8)|(1<<0));
189 BEGIN_RING(NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
192 page_count -= line_count;
195 return drm_bo_move_accel_cleanup(bo, evict, no_wait, dchan->chan->id,
196 DRM_FENCE_TYPE_EXE, 0, new_mem);
199 /* Flip pages into the GART and move if we can. */
201 nouveau_bo_move_flipd(struct drm_buffer_object *bo, int evict, int no_wait,
202 struct drm_bo_mem_reg *new_mem)
204 struct drm_device *dev = bo->dev;
205 struct drm_bo_mem_reg tmp_mem;
209 tmp_mem.mm_node = NULL;
210 tmp_mem.proposed_flags = (DRM_BO_FLAG_MEM_TT |
212 DRM_BO_FLAG_FORCE_CACHING);
214 ret = drm_bo_mem_space(bo, &tmp_mem, no_wait);
218 ret = drm_ttm_bind(bo->ttm, &tmp_mem);
222 ret = nouveau_bo_move_m2mf(bo, 1, no_wait, &tmp_mem);
226 ret = drm_bo_move_ttm(bo, evict, no_wait, new_mem);
229 if (tmp_mem.mm_node) {
230 mutex_lock(&dev->struct_mutex);
231 if (tmp_mem.mm_node != bo->pinned_node)
232 drm_mm_put_block(tmp_mem.mm_node);
233 tmp_mem.mm_node = NULL;
234 mutex_unlock(&dev->struct_mutex);
241 nouveau_bo_move(struct drm_buffer_object *bo, int evict, int no_wait,
242 struct drm_bo_mem_reg *new_mem)
244 struct drm_bo_mem_reg *old_mem = &bo->mem;
246 if (new_mem->mem_type == DRM_BO_MEM_LOCAL) {
247 if (old_mem->mem_type == DRM_BO_MEM_LOCAL)
248 return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
249 if (nouveau_bo_move_flipd(bo, evict, no_wait, new_mem))
250 return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
253 if (old_mem->mem_type == DRM_BO_MEM_LOCAL) {
254 if (1 /*nouveau_bo_move_flips(bo, evict, no_wait, new_mem)*/)
255 return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
258 if (nouveau_bo_move_m2mf(bo, evict, no_wait, new_mem))
259 return drm_bo_move_memcpy(bo, evict, no_wait, new_mem);
266 nouveau_bo_flush_ttm(struct drm_ttm *ttm)
270 static uint32_t nouveau_mem_prios[] = {
276 static uint32_t nouveau_busy_prios[] = {
283 struct drm_bo_driver nouveau_bo_driver = {
284 .mem_type_prio = nouveau_mem_prios,
285 .mem_busy_prio = nouveau_busy_prios,
286 .num_mem_type_prio = sizeof(nouveau_mem_prios)/sizeof(uint32_t),
287 .num_mem_busy_prio = sizeof(nouveau_busy_prios)/sizeof(uint32_t),
288 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
289 .fence_type = nouveau_bo_fence_type,
290 .invalidate_caches = nouveau_bo_invalidate_caches,
291 .init_mem_type = nouveau_bo_init_mem_type,
292 .evict_flags = nouveau_bo_evict_flags,
293 .move = nouveau_bo_move,
294 .ttm_cache_flush= nouveau_bo_flush_ttm,
295 .command_stream_barrier = NULL