2 * Copyright 2007 Nouveau Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
18 * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
19 * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
29 #include <sys/ioctl.h>
31 #include "nouveau_private.h"
34 nouveau_bo_init(struct nouveau_device *dev)
40 nouveau_bo_takedown(struct nouveau_device *dev)
45 nouveau_bo_allocated(struct nouveau_bo_priv *nvbo)
47 if (nvbo->sysmem || nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN))
53 nouveau_bo_ualloc(struct nouveau_bo_priv *nvbo)
55 if (nvbo->user || nvbo->sysmem) {
60 nvbo->sysmem = malloc(nvbo->size);
68 nouveau_bo_ufree(struct nouveau_bo_priv *nvbo)
78 nouveau_bo_kfree_nomm(struct nouveau_bo_priv *nvbo)
80 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
81 struct drm_nouveau_mem_free req;
84 drmUnmap(nvbo->map, nvbo->size);
88 req.offset = nvbo->offset;
89 if (nvbo->domain & NOUVEAU_BO_GART)
90 req.flags = NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI;
92 if (nvbo->domain & NOUVEAU_BO_VRAM)
93 req.flags = NOUVEAU_MEM_FB;
94 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_FREE, &req, sizeof(req));
100 nouveau_bo_kfree(struct nouveau_bo_priv *nvbo)
102 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
103 struct drm_gem_close req;
108 if (!nvdev->mm_enabled) {
109 nouveau_bo_kfree_nomm(nvbo);
114 munmap(nvbo->map, nvbo->size);
118 req.handle = nvbo->handle;
120 ioctl(nvdev->fd, DRM_IOCTL_GEM_CLOSE, &req);
124 nouveau_bo_kalloc_nomm(struct nouveau_bo_priv *nvbo)
126 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
127 struct drm_nouveau_mem_alloc req;
133 if (!(nvbo->flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART)))
134 nvbo->flags |= (NOUVEAU_BO_GART | NOUVEAU_BO_VRAM);
136 req.size = nvbo->size;
137 req.alignment = nvbo->align;
139 if (nvbo->flags & NOUVEAU_BO_VRAM)
140 req.flags |= NOUVEAU_MEM_FB;
141 if (nvbo->flags & NOUVEAU_BO_GART)
142 req.flags |= (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI);
143 if (nvbo->flags & NOUVEAU_BO_TILED) {
144 req.flags |= NOUVEAU_MEM_TILE;
145 if (nvbo->flags & NOUVEAU_BO_ZTILE)
146 req.flags |= NOUVEAU_MEM_TILE_ZETA;
148 req.flags |= NOUVEAU_MEM_MAPPED;
150 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_MEM_ALLOC,
155 nvbo->handle = req.map_handle;
156 nvbo->size = req.size;
157 nvbo->offset = req.offset;
158 if (req.flags & (NOUVEAU_MEM_AGP | NOUVEAU_MEM_PCI))
159 nvbo->domain = NOUVEAU_BO_GART;
161 if (req.flags & NOUVEAU_MEM_FB)
162 nvbo->domain = NOUVEAU_BO_VRAM;
168 nouveau_bo_kalloc(struct nouveau_bo_priv *nvbo, struct nouveau_channel *chan)
170 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
171 struct drm_nouveau_gem_new req;
174 if (nvbo->handle || (nvbo->flags & NOUVEAU_BO_PIN))
177 if (!nvdev->mm_enabled)
178 return nouveau_bo_kalloc_nomm(nvbo);
180 req.channel_hint = chan ? chan->id : 0;
182 req.size = nvbo->size;
183 req.align = nvbo->align;
187 if (nvbo->flags & NOUVEAU_BO_VRAM)
188 req.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
190 if (nvbo->flags & NOUVEAU_BO_GART)
191 req.domain |= NOUVEAU_GEM_DOMAIN_GART;
193 if (nvbo->flags & NOUVEAU_BO_TILED) {
194 req.domain |= NOUVEAU_GEM_DOMAIN_TILE;
195 if (nvbo->flags & NOUVEAU_BO_ZTILE)
196 req.domain |= NOUVEAU_GEM_DOMAIN_TILE_ZETA;
200 req.domain |= (NOUVEAU_GEM_DOMAIN_VRAM |
201 NOUVEAU_GEM_DOMAIN_GART);
204 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_NEW,
208 nvbo->handle = nvbo->base.handle = req.handle;
209 nvbo->size = req.size;
210 nvbo->domain = req.domain;
211 nvbo->offset = req.offset;
217 nouveau_bo_kmap_nomm(struct nouveau_bo_priv *nvbo)
219 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
222 ret = drmMap(nvdev->fd, nvbo->handle, nvbo->size, &nvbo->map);
232 nouveau_bo_kmap(struct nouveau_bo_priv *nvbo)
234 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
235 struct drm_nouveau_gem_mmap req;
244 if (!nvdev->mm_enabled)
245 return nouveau_bo_kmap_nomm(nvbo);
247 req.handle = nvbo->handle;
248 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_MMAP,
253 nvbo->map = (void *)(unsigned long)req.vaddr;
258 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, int align,
259 int size, struct nouveau_bo **bo)
261 struct nouveau_bo_priv *nvbo;
264 if (!dev || !bo || *bo)
267 nvbo = calloc(1, sizeof(struct nouveau_bo_priv));
270 nvbo->base.device = dev;
271 nvbo->base.size = size;
274 /* Don't set NOUVEAU_BO_PIN here, or nouveau_bo_allocated() will
275 * decided the buffer's already allocated when it's not. The
276 * call to nouveau_bo_pin() later will set this flag.
278 nvbo->flags = (flags & ~NOUVEAU_BO_PIN);
282 /*XXX: murder me violently */
283 if (flags & NOUVEAU_BO_TILED) {
284 nvbo->base.tiled = 1;
285 if (flags & NOUVEAU_BO_ZTILE)
286 nvbo->base.tiled |= 2;
289 if (flags & NOUVEAU_BO_PIN) {
290 ret = nouveau_bo_pin((void *)nvbo, nvbo->flags);
292 nouveau_bo_ref(NULL, (void *)nvbo);
302 nouveau_bo_user(struct nouveau_device *dev, void *ptr, int size,
303 struct nouveau_bo **bo)
305 struct nouveau_bo_priv *nvbo;
308 ret = nouveau_bo_new(dev, 0, 0, size, bo);
311 nvbo = nouveau_bo(*bo);
319 nouveau_bo_fake(struct nouveau_device *dev, uint64_t offset, uint32_t flags,
320 uint32_t size, void *map, struct nouveau_bo **bo)
322 struct nouveau_bo_priv *nvbo;
325 ret = nouveau_bo_new(dev, flags & ~NOUVEAU_BO_PIN, 0, size, bo);
328 nvbo = nouveau_bo(*bo);
330 nvbo->flags = flags | NOUVEAU_BO_PIN;
331 nvbo->domain = (flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART));
332 nvbo->offset = offset;
333 nvbo->size = nvbo->base.size = size;
335 nvbo->base.flags = nvbo->flags;
336 nvbo->base.offset = nvbo->offset;
341 nouveau_bo_handle_get(struct nouveau_bo *bo, uint32_t *handle)
343 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
344 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
350 if (!nvdev->mm_enabled)
353 if (!nvbo->global_handle) {
354 struct drm_gem_flink req;
356 ret = nouveau_bo_kalloc(nvbo, NULL);
360 req.handle = nvbo->handle;
361 ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_FLINK, &req);
363 nouveau_bo_kfree(nvbo);
367 nvbo->global_handle = req.name;
370 *handle = nvbo->global_handle;
375 nouveau_bo_handle_ref(struct nouveau_device *dev, uint32_t handle,
376 struct nouveau_bo **bo)
378 struct nouveau_device_priv *nvdev = nouveau_device(dev);
379 struct nouveau_bo_priv *nvbo;
380 struct drm_gem_open req;
383 ret = nouveau_bo_new(dev, 0, 0, 0, bo);
386 nvbo = nouveau_bo(*bo);
388 if (!nvdev->mm_enabled) {
390 nvbo->offset = handle;
391 nvbo->domain = NOUVEAU_BO_VRAM;
392 nvbo->flags = NOUVEAU_BO_VRAM | NOUVEAU_BO_PIN;
393 nvbo->base.offset = nvbo->offset;
394 nvbo->base.flags = nvbo->flags;
397 ret = ioctl(nvdev->fd, DRM_IOCTL_GEM_OPEN, &req);
399 nouveau_bo_ref(NULL, bo);
403 nvbo->size = req.size;
404 nvbo->handle = req.handle;
411 nouveau_bo_del_cb(void *priv)
413 struct nouveau_bo_priv *nvbo = priv;
415 nouveau_bo_kfree(nvbo);
420 nouveau_bo_del(struct nouveau_bo **bo)
422 struct nouveau_bo_priv *nvbo;
426 nvbo = nouveau_bo(*bo);
429 if (--nvbo->refcount)
433 nvbo->pending = NULL;
434 nouveau_pushbuf_flush(nvbo->pending_channel, 0);
437 nouveau_bo_ufree(nvbo);
438 if (!nouveau_device(nvbo->base.device)->mm_enabled && nvbo->fence)
439 nouveau_fence_signal_cb(nvbo->fence, nouveau_bo_del_cb, nvbo);
441 nouveau_bo_del_cb(nvbo);
445 nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pbo)
451 nouveau_bo(ref)->refcount++;
461 nouveau_bo_wait_nomm(struct nouveau_bo *bo, int cpu_write)
463 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
467 ret = nouveau_fence_wait(&nvbo->fence);
469 ret = nouveau_fence_wait(&nvbo->wr_fence);
473 nvbo->write_marker = 0;
478 nouveau_bo_wait(struct nouveau_bo *bo, int cpu_write)
480 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
481 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
482 struct drm_nouveau_gem_cpu_prep req;
485 if (!nvbo->global_handle && !nvbo->write_marker && !cpu_write)
489 (nvbo->pending->write_domains || cpu_write)) {
490 nvbo->pending = NULL;
491 nouveau_pushbuf_flush(nvbo->pending_channel, 0);
494 if (!nvdev->mm_enabled)
495 return nouveau_bo_wait_nomm(bo, cpu_write);
497 req.handle = nvbo->handle;
498 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_PREP,
503 nvbo->write_marker = 0;
508 nouveau_bo_map(struct nouveau_bo *bo, uint32_t flags)
510 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
513 if (!nvbo || bo->map)
516 if (!nouveau_bo_allocated(nvbo)) {
517 if (nvbo->flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)) {
518 ret = nouveau_bo_kalloc(nvbo, NULL);
523 if (!nouveau_bo_allocated(nvbo)) {
524 ret = nouveau_bo_ualloc(nvbo);
531 bo->map = nvbo->sysmem;
533 ret = nouveau_bo_kmap(nvbo);
537 ret = nouveau_bo_wait(bo, (flags & NOUVEAU_BO_WR));
548 nouveau_bo_unmap(struct nouveau_bo *bo)
550 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
551 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
553 if (nvdev->mm_enabled && bo->map && !nvbo->sysmem) {
554 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
555 struct drm_nouveau_gem_cpu_fini req;
557 req.handle = nvbo->handle;
558 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_CPU_FINI,
566 nouveau_bo_validate_nomm(struct nouveau_bo_priv *nvbo, uint32_t flags)
568 struct nouveau_bo *new = NULL;
569 uint32_t t_handle, t_domain, t_offset, t_size;
573 if ((flags & NOUVEAU_BO_VRAM) && nvbo->domain == NOUVEAU_BO_VRAM)
575 if ((flags & NOUVEAU_BO_GART) && nvbo->domain == NOUVEAU_BO_GART)
577 assert(flags & (NOUVEAU_BO_VRAM|NOUVEAU_BO_GART));
579 /* Keep tiling info */
580 flags |= (nvbo->flags & (NOUVEAU_BO_TILED|NOUVEAU_BO_ZTILE));
582 ret = nouveau_bo_new(nvbo->base.device, flags, 0, nvbo->size, &new);
586 ret = nouveau_bo_kalloc(nouveau_bo(new), NULL);
588 nouveau_bo_ref(NULL, &new);
592 if (nvbo->handle || nvbo->sysmem) {
593 nouveau_bo_kmap(nouveau_bo(new));
595 if (!nvbo->base.map) {
596 nouveau_bo_map(&nvbo->base, NOUVEAU_BO_RD);
597 memcpy(nouveau_bo(new)->map, nvbo->base.map, nvbo->base.size);
598 nouveau_bo_unmap(&nvbo->base);
600 memcpy(nouveau_bo(new)->map, nvbo->base.map, nvbo->base.size);
604 t_handle = nvbo->handle;
605 t_domain = nvbo->domain;
606 t_offset = nvbo->offset;
610 nvbo->handle = nouveau_bo(new)->handle;
611 nvbo->domain = nouveau_bo(new)->domain;
612 nvbo->offset = nouveau_bo(new)->offset;
613 nvbo->size = nouveau_bo(new)->size;
614 nvbo->map = nouveau_bo(new)->map;
616 nouveau_bo(new)->handle = t_handle;
617 nouveau_bo(new)->domain = t_domain;
618 nouveau_bo(new)->offset = t_offset;
619 nouveau_bo(new)->size = t_size;
620 nouveau_bo(new)->map = t_map;
622 nouveau_bo_ref(NULL, &new);
628 nouveau_bo_pin_nomm(struct nouveau_bo *bo, uint32_t flags)
630 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
634 if (!(flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)))
637 ret = nouveau_bo_validate_nomm(nvbo, flags & ~NOUVEAU_BO_PIN);
644 /* Fill in public nouveau_bo members */
645 bo->flags = nvbo->domain;
646 bo->offset = nvbo->offset;
652 nouveau_bo_pin(struct nouveau_bo *bo, uint32_t flags)
654 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
655 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
656 struct drm_nouveau_gem_pin req;
662 if (!nvdev->mm_enabled)
663 return nouveau_bo_pin_nomm(bo, flags);
665 /* Ensure we have a kernel object... */
667 if (!(flags & (NOUVEAU_BO_VRAM | NOUVEAU_BO_GART)))
671 ret = nouveau_bo_kalloc(nvbo, NULL);
676 /* Now force it to stay put :) */
677 req.handle = nvbo->handle;
679 if (nvbo->flags & NOUVEAU_BO_VRAM)
680 req.domain |= NOUVEAU_GEM_DOMAIN_VRAM;
681 if (nvbo->flags & NOUVEAU_BO_GART)
682 req.domain |= NOUVEAU_GEM_DOMAIN_GART;
684 ret = drmCommandWriteRead(nvdev->fd, DRM_NOUVEAU_GEM_PIN, &req,
685 sizeof(struct drm_nouveau_gem_pin));
688 nvbo->offset = req.offset;
689 nvbo->domain = req.domain;
691 nvbo->flags |= NOUVEAU_BO_PIN;
693 /* Fill in public nouveau_bo members */
694 if (nvbo->domain & NOUVEAU_GEM_DOMAIN_VRAM)
695 bo->flags = NOUVEAU_BO_VRAM;
696 if (nvbo->domain & NOUVEAU_GEM_DOMAIN_GART)
697 bo->flags = NOUVEAU_BO_GART;
698 bo->offset = nvbo->offset;
704 nouveau_bo_unpin(struct nouveau_bo *bo)
706 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
707 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
708 struct drm_nouveau_gem_unpin req;
713 if (nvdev->mm_enabled) {
714 req.handle = nvbo->handle;
715 drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_UNPIN,
719 nvbo->pinned = bo->offset = bo->flags = 0;
723 nouveau_bo_tile(struct nouveau_bo *bo, uint32_t flags, uint32_t delta,
726 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
727 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
728 uint32_t kern_flags = 0;
731 if (flags & NOUVEAU_BO_TILED) {
732 kern_flags |= NOUVEAU_MEM_TILE;
733 if (flags & NOUVEAU_BO_ZTILE)
734 kern_flags |= NOUVEAU_MEM_TILE_ZETA;
737 if (nvdev->mm_enabled) {
738 struct drm_nouveau_gem_tile req;
740 req.handle = nvbo->handle;
743 req.flags = kern_flags;
744 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_GEM_TILE,
747 struct drm_nouveau_mem_tile req;
749 req.offset = nvbo->offset;
752 req.flags = kern_flags;
754 if (flags & NOUVEAU_BO_VRAM)
755 req.flags |= NOUVEAU_MEM_FB;
756 if (flags & NOUVEAU_BO_GART)
757 req.flags |= NOUVEAU_MEM_AGP;
759 ret = drmCommandWrite(nvdev->fd, DRM_NOUVEAU_MEM_TILE,
767 nouveau_bo_busy(struct nouveau_bo *bo, uint32_t access)
769 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
770 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
772 if (!nvdev->mm_enabled) {
773 struct nouveau_fence *fence;
775 if (nvbo->pending && (nvbo->pending->write_domains ||
776 (access & NOUVEAU_BO_WR)))
779 if (access & NOUVEAU_BO_WR)
782 fence = nvbo->wr_fence;
783 return !nouveau_fence(fence)->signalled;
790 struct drm_nouveau_gem_pushbuf_bo *
791 nouveau_bo_emit_buffer(struct nouveau_channel *chan, struct nouveau_bo *bo)
793 struct nouveau_pushbuf_priv *nvpb = nouveau_pushbuf(chan->pushbuf);
794 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
795 struct drm_nouveau_gem_pushbuf_bo *pbbo;
796 struct nouveau_bo *ref = NULL;
800 return nvbo->pending;
803 ret = nouveau_bo_kalloc(nvbo, chan);
808 void *sysmem_tmp = nvbo->sysmem;
811 ret = nouveau_bo_map(bo, NOUVEAU_BO_WR);
814 nvbo->sysmem = sysmem_tmp;
816 memcpy(bo->map, nvbo->sysmem, nvbo->base.size);
817 nouveau_bo_unmap(bo);
818 nouveau_bo_ufree(nvbo);
822 if (nvpb->nr_buffers >= NOUVEAU_PUSHBUF_MAX_BUFFERS)
824 pbbo = nvpb->buffers + nvpb->nr_buffers++;
825 nvbo->pending = pbbo;
826 nvbo->pending_channel = chan;
828 nouveau_bo_ref(bo, &ref);
829 pbbo->user_priv = (uint64_t)(unsigned long)ref;
830 pbbo->handle = nvbo->handle;
831 pbbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART;
832 pbbo->read_domains = 0;
833 pbbo->write_domains = 0;
834 pbbo->presumed_domain = nvbo->domain;
835 pbbo->presumed_offset = nvbo->offset;
836 pbbo->presumed_ok = 1;