2 * Copyright 2007 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include "nouveau_private.h"
36 nouveau_bo_init(struct nouveau_device *dev)
42 nouveau_bo_takedown(struct nouveau_device *dev)
47 nouveau_bo_info(struct nouveau_bo_priv *nvbo, struct drm_nouveau_gem_info *arg)
49 nvbo->handle = nvbo->base.handle = arg->handle;
50 nvbo->domain = arg->domain;
51 nvbo->size = arg->size;
52 nvbo->offset = arg->offset;
53 nvbo->map_handle = arg->map_handle;
54 nvbo->base.tile_mode = arg->tile_mode;
55 /* XXX - flag inverted for backwards compatibility */
56 nvbo->base.tile_flags = arg->tile_flags ^ NOUVEAU_GEM_TILE_NONCONTIG;
61 nouveau_bo_allocated(struct nouveau_bo_priv *nvbo)
63 if (nvbo->sysmem || nvbo->handle)
69 nouveau_bo_ualloc(struct nouveau_bo_priv *nvbo)
71 if (nvbo->user || nvbo->sysmem) {
76 nvbo->sysmem = malloc(nvbo->size);
84 nouveau_bo_ufree(struct nouveau_bo_priv *nvbo)
94 nouveau_bo_kfree(struct nouveau_bo_priv *nvbo)
96 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
97 struct drm_gem_close req;
103 munmap(nvbo->map, nvbo->size);
107 req.handle = nvbo->handle;
109 drmIoctl(nvdev->fd, DRM_IOCTL_GEM_CLOSE, &req);
113 nouveau_bo_kalloc(struct nouveau_bo_priv *nvbo, struct nouveau_channel *chan)
115 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
116 struct drm_nouveau_gem_new req;
117 struct drm_nouveau_gem_info *info = &req.info;
123 req.channel_hint = chan ? chan->id : 0;
124 req.align = nvbo->align;
127 info->size = nvbo->size;
130 if (nvbo->flags & NOUVEAU_BO_VRAM)
131 info->domain |= NOUVEAU_GEM_DOMAIN_VRAM;
132 if (nvbo->flags & NOUVEAU_BO_GART)
133 info->domain |= NOUVEAU_GEM_DOMAIN_GART;
135 info->domain |= (NOUVEAU_GEM_DOMAIN_VRAM |
136 NOUVEAU_GEM_DOMAIN_GART);
139 if (nvbo->flags & NOUVEAU_BO_MAP)
140 info->domain |= NOUVEAU_GEM_DOMAIN_MAPPABLE;
142 info->tile_mode = nvbo->base.tile_mode;
143 info->tile_flags = nvbo->base.tile_flags;
144 /* XXX - flag inverted for backwards compatibility */
145 info->tile_flags ^= NOUVEAU_GEM_TILE_NONCONTIG;
146 if (!nvdev->has_bo_usage)
147 info->tile_flags &= NOUVEAU_GEM_TILE_LAYOUT_MASK;
149 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_NEW,
154 nouveau_bo_info(nvbo, &req.info);
159 nouveau_bo_kmap(struct nouveau_bo_priv *nvbo)
161 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
166 if (!nvbo->map_handle)
169 nvbo->map = mmap(0, nvbo->size, PROT_READ | PROT_WRITE,
170 MAP_SHARED, nvdev->fd, nvbo->map_handle);
171 if (nvbo->map == MAP_FAILED) {
180 nouveau_bo_new_tile(struct nouveau_device *dev, uint32_t flags, int align,
181 int size, uint32_t tile_mode, uint32_t tile_flags,
182 struct nouveau_bo **bo)
184 struct nouveau_bo_priv *nvbo;
187 if (!dev || !bo || *bo)
190 nvbo = calloc(1, sizeof(struct nouveau_bo_priv));
193 nvbo->base.device = dev;
194 nvbo->base.size = size;
195 nvbo->base.tile_mode = tile_mode;
196 nvbo->base.tile_flags = tile_flags;
203 if (flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)) {
204 ret = nouveau_bo_kalloc(nvbo, NULL);
206 nouveau_bo_ref(NULL, (void *)&nvbo);
216 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align,
217 int size, struct nouveau_bo **bo)
219 return nouveau_bo_new_tile(dev, flags, align, size, 0, 0, bo);
223 nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size,
224 struct nouveau_bo **bo)
226 struct nouveau_bo_priv *nvbo;
229 ret = nouveau_bo_new(dev, NOUVEAU_BO_MAP, 0, size, bo);
232 nvbo = nouveau_bo(*bo);
240 nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
241 struct nouveau_bo **bo)
243 struct nouveau_device_priv *nvdev = nouveau_device(dev);
244 struct drm_nouveau_gem_info req;
245 struct nouveau_bo_priv *nvbo;
248 ret = nouveau_bo_new(dev, 0, 0, 0, bo);
251 nvbo = nouveau_bo(*bo);
254 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_INFO,
257 nouveau_bo_ref(NULL, bo);
261 nouveau_bo_info(nvbo, &req);
262 nvbo->base.size = nvbo->size;
267 nouveau_bo_handle_get(struct nouveau_bo *bo, uint32_t *handle)
269 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
270 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
276 if (!nvbo->global_handle) {
277 struct drm_gem_flink req;
279 ret = nouveau_bo_kalloc(nvbo, NULL);
283 req.handle = nvbo->handle;
284 ret = drmIoctl(nvdev->fd, DRM_IOCTL_GEM_FLINK, &req);
286 nouveau_bo_kfree(nvbo);
290 nvbo->global_handle = req.name;
293 *handle = nvbo->global_handle;
298 nouveau_bo_handle_ref(struct nouveau_device *dev, uint32_t handle,
299 struct nouveau_bo **bo)
301 struct nouveau_device_priv *nvdev = nouveau_device(dev);
302 struct nouveau_bo_priv *nvbo;
303 struct drm_gem_open req;
307 ret = drmIoctl(nvdev->fd, DRM_IOCTL_GEM_OPEN, &req);
309 nouveau_bo_ref(NULL, bo);
313 ret = nouveau_bo_wrap(dev, req.handle, bo);
315 nouveau_bo_ref(NULL, bo);
319 nvbo = nouveau_bo(*bo);
320 nvbo->base.handle = nvbo->handle;
325 nouveau_bo_del(struct nouveau_bo **bo)
327 struct nouveau_bo_priv *nvbo;
331 nvbo = nouveau_bo(*bo);
334 if (--nvbo->refcount)
338 nvbo->pending = NULL;
339 nouveau_pushbuf_flush(nvbo->pending_channel, 0);
342 nouveau_bo_ufree(nvbo);
343 nouveau_bo_kfree(nvbo);
348 nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pbo)
354 nouveau_bo(ref)->refcount++;
364 nouveau_bo_wait(struct nouveau_bo *bo, int cpu_write, int no_wait, int no_block)
366 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
367 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
368 struct drm_nouveau_gem_cpu_prep req;
371 if (!nvbo->global_handle && !nvbo->write_marker && !cpu_write)
375 (nvbo->pending->write_domains || cpu_write)) {
376 nvbo->pending = NULL;
377 nouveau_pushbuf_flush(nvbo->pending_channel, 0);
380 req.handle = nvbo->handle;
383 req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
385 req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
387 req.flags |= NOUVEAU_GEM_CPU_PREP_NOBLOCK;
390 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_PREP,
392 } while (ret == -EAGAIN);
397 nvbo->write_marker = 0;
402 nouveau_bo_map_range(struct nouveau_bo *bo, uint32_t delta, uint32_t size,
405 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
408 if (!nvbo || bo->map)
411 if (!nouveau_bo_allocated(nvbo)) {
412 if (nvbo->flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)) {
413 ret = nouveau_bo_kalloc(nvbo, NULL);
418 if (!nouveau_bo_allocated(nvbo)) {
419 ret = nouveau_bo_ualloc(nvbo);
426 bo->map = (char *)nvbo->sysmem + delta;
428 ret = nouveau_bo_kmap(nvbo);
432 if (!(flags & NOUVEAU_BO_NOSYNC)) {
433 ret = nouveau_bo_wait(bo, (flags & NOUVEAU_BO_WR),
434 (flags & NOUVEAU_BO_NOWAIT), 0);
441 bo->map = (char *)nvbo->map + delta;
448 nouveau_bo_map_flush(struct nouveau_bo *bo, uint32_t delta, uint32_t size)
453 nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags)
455 return nouveau_bo_map_range(bo, 0, bo->size, flags);
459 nouveau_bo_unmap(struct nouveau_bo *bo)
461 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
463 if (bo->map && !nvbo->sysmem && nvbo->map_refcnt) {
464 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
465 struct drm_nouveau_gem_cpu_fini req;
467 req.handle = nvbo->handle;
468 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_FINI,
477 nouveau_bo_busy(struct nouveau_bo *bo, uint32_t access)
479 return nouveau_bo_wait(bo, (access & NOUVEAU_BO_WR), 1, 1);
483 nouveau_bo_pending(struct nouveau_bo *bo)
485 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
492 if (nvbo->pending->read_domains)
493 flags |= NOUVEAU_BO_RD;
494 if (nvbo->pending->write_domains)
495 flags |= NOUVEAU_BO_WR;
500 struct drm_nouveau_gem_pushbuf_bo *
501 nouveau_bo_emit_buffer(struct nouveau_channel *chan, struct nouveau_bo *bo)
503 struct nouveau_pushbuf_priv *nvpb = &nouveau_channel(chan)->pb;
504 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
505 struct drm_nouveau_gem_pushbuf_bo *pbbo;
506 struct nouveau_bo *ref = NULL;
510 return nvbo->pending;
513 ret = nouveau_bo_kalloc(nvbo, chan);
518 void *sysmem_tmp = nvbo->sysmem;
521 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
524 nvbo->sysmem = sysmem_tmp;
526 memcpy(bo->map, nvbo->sysmem, nvbo->base.size);
527 nouveau_bo_ufree(nvbo);
528 nouveau_bo_unmap(bo);
532 if (nvpb->nr_buffers >= NOUVEAU_GEM_MAX_BUFFERS)
534 pbbo = nvpb->buffers + nvpb->nr_buffers++;
535 nvbo->pending = pbbo;
536 nvbo->pending_channel = chan;
537 nvbo->pending_refcnt = 0;
539 nouveau_bo_ref(bo, &ref);
540 pbbo->user_priv = (uint64_t)(unsigned long)ref;
541 pbbo->handle = nvbo->handle;
542 pbbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART;
543 pbbo->read_domains = 0;
544 pbbo->write_domains = 0;
545 pbbo->presumed.domain = nvbo->domain;
546 pbbo->presumed.offset = nvbo->offset;
547 pbbo->presumed.valid = 1;