2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
40 #include <xf86atomic.h>
41 #include "libdrm_macros.h"
42 #include "libdrm_lists.h"
43 #include "nouveau_drm.h"
48 #include "nvif/class.h"
49 #include "nvif/cl0080.h"
50 #include "nvif/ioctl.h"
51 #include "nvif/unpack.h"
54 drm_private uint32_t nouveau_debug = 0;
57 debug_init(char *args)
60 int n = strtol(args, NULL, 0);
68 nouveau_object_ioctl(struct nouveau_object *obj, void *data, uint32_t size)
70 struct nouveau_drm *drm = nouveau_drm(obj);
72 struct nvif_ioctl_v0 v0;
77 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
79 if (obj != &drm->client)
80 args->v0.object = (unsigned long)(void *)obj;
83 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
84 args->v0.route = 0x00;
86 args->v0.route = 0xff;
87 args->v0.token = obj->handle;
92 return drmCommandWriteRead(drm->fd, DRM_NOUVEAU_NVIF, args, argc);
96 nouveau_object_mthd(struct nouveau_object *obj,
97 uint32_t mthd, void *data, uint32_t size)
99 struct nouveau_drm *drm = nouveau_drm(obj);
101 struct nvif_ioctl_v0 ioctl;
102 struct nvif_ioctl_mthd_v0 mthd;
104 uint32_t argc = sizeof(*args) + size;
111 if (argc > sizeof(stack)) {
112 if (!(args = malloc(argc)))
115 args = (void *)stack;
117 args->ioctl.version = 0;
118 args->ioctl.type = NVIF_IOCTL_V0_MTHD;
119 args->mthd.version = 0;
120 args->mthd.method = mthd;
122 memcpy(args->mthd.data, data, size);
123 ret = nouveau_object_ioctl(obj, args, argc);
124 memcpy(data, args->mthd.data, size);
125 if (args != (void *)stack)
131 nouveau_object_sclass_put(struct nouveau_sclass **psclass)
138 nouveau_object_sclass_get(struct nouveau_object *obj,
139 struct nouveau_sclass **psclass)
141 struct nouveau_drm *drm = nouveau_drm(obj);
143 struct nvif_ioctl_v0 ioctl;
144 struct nvif_ioctl_sclass_v0 sclass;
146 struct nouveau_sclass *sclass;
151 return abi16_sclass(obj, psclass);
154 size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
155 if (!(args = malloc(size)))
157 args->ioctl.version = 0;
158 args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
159 args->sclass.version = 0;
160 args->sclass.count = cnt;
162 ret = nouveau_object_ioctl(obj, args, size);
163 if (ret == 0 && args->sclass.count <= cnt)
165 cnt = args->sclass.count;
171 if ((sclass = calloc(args->sclass.count, sizeof(*sclass)))) {
172 for (i = 0; i < args->sclass.count; i++) {
173 sclass[i].oclass = args->sclass.oclass[i].oclass;
174 sclass[i].minver = args->sclass.oclass[i].minver;
175 sclass[i].maxver = args->sclass.oclass[i].maxver;
178 ret = args->sclass.count;
188 nouveau_object_mclass(struct nouveau_object *obj,
189 const struct nouveau_mclass *mclass)
191 struct nouveau_sclass *sclass;
195 cnt = nouveau_object_sclass_get(obj, &sclass);
199 for (i = 0; ret < 0 && mclass[i].oclass; i++) {
200 for (j = 0; j < cnt; j++) {
201 if (mclass[i].oclass == sclass[j].oclass &&
202 mclass[i].version >= sclass[j].minver &&
203 mclass[i].version <= sclass[j].maxver) {
210 nouveau_object_sclass_put(&sclass);
215 nouveau_object_fini(struct nouveau_object *obj)
218 struct nvif_ioctl_v0 ioctl;
219 struct nvif_ioctl_del del;
221 .ioctl.type = NVIF_IOCTL_V0_DEL,
231 nouveau_object_ioctl(obj, &args, sizeof(args));
235 nouveau_object_init(struct nouveau_object *parent, uint32_t handle,
236 int32_t oclass, void *data, uint32_t size,
237 struct nouveau_object *obj)
239 struct nouveau_drm *drm = nouveau_drm(parent);
241 struct nvif_ioctl_v0 ioctl;
242 struct nvif_ioctl_new_v0 new;
244 uint32_t argc = sizeof(*args) + size;
245 int (*func)(struct nouveau_object *);
248 obj->parent = parent;
249 obj->handle = handle;
250 obj->oclass = oclass;
254 if (!abi16_object(obj, &func) && drm->nvif) {
255 if (!(args = malloc(argc)))
257 args->ioctl.version = 0;
258 args->ioctl.type = NVIF_IOCTL_V0_NEW;
259 args->new.version = 0;
260 args->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
261 args->new.token = (unsigned long)(void *)obj;
262 args->new.object = (unsigned long)(void *)obj;
263 args->new.handle = handle;
264 args->new.oclass = oclass;
265 memcpy(args->new.data, data, size);
266 ret = nouveau_object_ioctl(parent, args, argc);
267 memcpy(data, args->new.data, size);
271 obj->length = size ? size : sizeof(struct nouveau_object *);
272 if (!(obj->data = malloc(obj->length)))
275 memcpy(obj->data, data, obj->length);
276 *(struct nouveau_object **)obj->data = obj;
282 nouveau_object_fini(obj);
290 nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
291 uint32_t oclass, void *data, uint32_t length,
292 struct nouveau_object **pobj)
294 struct nouveau_object *obj;
297 if (!(obj = malloc(sizeof(*obj))))
300 ret = nouveau_object_init(parent, handle, oclass, data, length, obj);
311 nouveau_object_del(struct nouveau_object **pobj)
313 struct nouveau_object *obj = *pobj;
315 nouveau_object_fini(obj);
322 nouveau_drm_del(struct nouveau_drm **pdrm)
329 nouveau_drm_new(int fd, struct nouveau_drm **pdrm)
331 struct nouveau_drm *drm;
335 debug_init(getenv("NOUVEAU_LIBDRM_DEBUG"));
338 if (!(drm = calloc(1, sizeof(*drm))))
342 if (!(ver = drmGetVersion(fd))) {
343 nouveau_drm_del(&drm);
348 drm->version = (ver->version_major << 24) |
349 (ver->version_minor << 8) |
350 ver->version_patchlevel;
351 drm->nvif = (drm->version >= 0x01000301);
356 /* this is the old libdrm's version of nouveau_device_wrap(), the symbol
357 * is kept here to prevent AIGLX from crashing if the DDX is linked against
358 * the new libdrm, but the DRI driver against the old
361 nouveau_device_open_existing(struct nouveau_device **pdev, int close, int fd,
368 nouveau_device_new(struct nouveau_object *parent, int32_t oclass,
369 void *data, uint32_t size, struct nouveau_device **pdev)
371 struct nv_device_info_v0 info = {};
373 struct nv_device_v0 v0;
375 uint32_t argc = size;
376 struct nouveau_drm *drm = nouveau_drm(parent);
377 struct nouveau_device_priv *nvdev;
378 struct nouveau_device *dev;
383 if (oclass != NV_DEVICE ||
384 nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))
387 if (!(nvdev = calloc(1, sizeof(*nvdev))))
389 dev = *pdev = &nvdev->base;
393 ret = nouveau_object_init(parent, 0, oclass, args, argc,
400 ret = nouveau_object_mthd(&dev->object, NV_DEVICE_V0_INFO,
401 &info, sizeof(info));
405 nvdev->base.chipset = info.chipset;
406 nvdev->have_bo_usage = true;
408 if (args->v0.device == ~0ULL) {
409 nvdev->base.object.parent = &drm->client;
410 nvdev->base.object.handle = ~0ULL;
411 nvdev->base.object.oclass = NOUVEAU_DEVICE_CLASS;
412 nvdev->base.object.length = ~0;
414 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_CHIPSET_ID, &v);
417 nvdev->base.chipset = v;
419 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_HAS_BO_USAGE, &v);
421 nvdev->have_bo_usage = (v != 0);
425 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_FB_SIZE, &v);
428 nvdev->base.vram_size = v;
430 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_AGP_SIZE, &v);
433 nvdev->base.gart_size = v;
435 tmp = getenv("NOUVEAU_LIBDRM_VRAM_LIMIT_PERCENT");
437 nvdev->vram_limit_percent = atoi(tmp);
439 nvdev->vram_limit_percent = 80;
441 nvdev->base.vram_limit =
442 (nvdev->base.vram_size * nvdev->vram_limit_percent) / 100;
444 tmp = getenv("NOUVEAU_LIBDRM_GART_LIMIT_PERCENT");
446 nvdev->gart_limit_percent = atoi(tmp);
448 nvdev->gart_limit_percent = 80;
450 nvdev->base.gart_limit =
451 (nvdev->base.gart_size * nvdev->gart_limit_percent) / 100;
453 ret = pthread_mutex_init(&nvdev->lock, NULL);
454 DRMINITLISTHEAD(&nvdev->bo_list);
457 nouveau_device_del(pdev);
462 nouveau_device_wrap(int fd, int close, struct nouveau_device **pdev)
464 struct nouveau_drm *drm;
465 struct nouveau_device_priv *nvdev;
468 ret = nouveau_drm_new(fd, &drm);
473 ret = nouveau_device_new(&drm->client, NV_DEVICE,
474 &(struct nv_device_v0) {
476 }, sizeof(struct nv_device_v0), pdev);
478 nouveau_drm_del(&drm);
482 nvdev = nouveau_device(*pdev);
483 nvdev->base.fd = drm->fd;
484 nvdev->base.drm_version = drm->version;
485 nvdev->close = close;
490 nouveau_device_open(const char *busid, struct nouveau_device **pdev)
492 int ret = -ENODEV, fd = drmOpen("nouveau", busid);
494 ret = nouveau_device_wrap(fd, 1, pdev);
502 nouveau_device_del(struct nouveau_device **pdev)
504 struct nouveau_device_priv *nvdev = nouveau_device(*pdev);
507 pthread_mutex_destroy(&nvdev->lock);
508 if (nvdev->base.fd >= 0) {
509 struct nouveau_drm *drm =
510 nouveau_drm(&nvdev->base.object);
511 nouveau_drm_del(&drm);
513 drmClose(nvdev->base.fd);
521 nouveau_getparam(struct nouveau_device *dev, uint64_t param, uint64_t *value)
523 struct nouveau_drm *drm = nouveau_drm(&dev->object);
524 struct drm_nouveau_getparam r = { .param = param };
525 int fd = drm->fd, ret =
526 drmCommandWriteRead(fd, DRM_NOUVEAU_GETPARAM, &r, sizeof(r));
532 nouveau_setparam(struct nouveau_device *dev, uint64_t param, uint64_t value)
534 struct nouveau_drm *drm = nouveau_drm(&dev->object);
535 struct drm_nouveau_setparam r = { .param = param, .value = value };
536 return drmCommandWrite(drm->fd, DRM_NOUVEAU_SETPARAM, &r, sizeof(r));
540 nouveau_client_new(struct nouveau_device *dev, struct nouveau_client **pclient)
542 struct nouveau_device_priv *nvdev = nouveau_device(dev);
543 struct nouveau_client_priv *pcli;
544 int id = 0, i, ret = -ENOMEM;
547 pthread_mutex_lock(&nvdev->lock);
549 for (i = 0; i < nvdev->nr_client; i++) {
550 id = ffs(nvdev->client[i]) - 1;
555 clients = realloc(nvdev->client, sizeof(uint32_t) * (i + 1));
558 nvdev->client = clients;
559 nvdev->client[i] = 0;
563 pcli = calloc(1, sizeof(*pcli));
565 nvdev->client[i] |= (1 << id);
566 pcli->base.device = dev;
567 pcli->base.id = (i * 32) + id;
571 *pclient = &pcli->base;
574 pthread_mutex_unlock(&nvdev->lock);
579 nouveau_client_del(struct nouveau_client **pclient)
581 struct nouveau_client_priv *pcli = nouveau_client(*pclient);
582 struct nouveau_device_priv *nvdev;
584 int id = pcli->base.id;
585 nvdev = nouveau_device(pcli->base.device);
586 pthread_mutex_lock(&nvdev->lock);
587 nvdev->client[id / 32] &= ~(1 << (id % 32));
588 pthread_mutex_unlock(&nvdev->lock);
595 nouveau_bo_del(struct nouveau_bo *bo)
597 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
598 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
599 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
600 struct drm_gem_close req = { .handle = bo->handle };
602 if (nvbo->head.next) {
603 pthread_mutex_lock(&nvdev->lock);
604 if (atomic_read(&nvbo->refcnt) == 0) {
605 DRMLISTDEL(&nvbo->head);
607 * This bo has to be closed with the lock held because
608 * gem handles are not refcounted. If a shared bo is
609 * closed and re-opened in another thread a race
610 * against DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle
611 * might cause the bo to be closed accidentally while
614 drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
616 pthread_mutex_unlock(&nvdev->lock);
618 drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
621 drm_munmap(bo->map, bo->size);
626 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
627 uint64_t size, union nouveau_bo_config *config,
628 struct nouveau_bo **pbo)
630 struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo));
631 struct nouveau_bo *bo = &nvbo->base;
636 atomic_set(&nvbo->refcnt, 1);
641 ret = abi16_bo_init(bo, align, config);
652 nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
653 struct nouveau_bo **pbo, int name)
655 struct nouveau_drm *drm = nouveau_drm(&dev->object);
656 struct nouveau_device_priv *nvdev = nouveau_device(dev);
657 struct drm_nouveau_gem_info req = { .handle = handle };
658 struct nouveau_bo_priv *nvbo;
661 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
662 if (nvbo->base.handle == handle) {
663 if (atomic_inc_return(&nvbo->refcnt) == 1) {
665 * Uh oh, this bo is dead and someone else
666 * will free it, but because refcnt is
667 * now non-zero fortunately they won't
668 * call the ioctl to close the bo.
670 * Remove this bo from the list so other
671 * calls to nouveau_bo_wrap_locked will
672 * see our replacement nvbo.
674 DRMLISTDEL(&nvbo->head);
685 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_INFO,
690 nvbo = calloc(1, sizeof(*nvbo));
692 atomic_set(&nvbo->refcnt, 1);
693 nvbo->base.device = dev;
694 abi16_bo_info(&nvbo->base, &req);
696 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
705 nouveau_bo_make_global(struct nouveau_bo_priv *nvbo)
707 if (!nvbo->head.next) {
708 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
709 pthread_mutex_lock(&nvdev->lock);
710 if (!nvbo->head.next)
711 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
712 pthread_mutex_unlock(&nvdev->lock);
717 nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
718 struct nouveau_bo **pbo)
720 struct nouveau_device_priv *nvdev = nouveau_device(dev);
722 pthread_mutex_lock(&nvdev->lock);
723 ret = nouveau_bo_wrap_locked(dev, handle, pbo, 0);
724 pthread_mutex_unlock(&nvdev->lock);
729 nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
730 struct nouveau_bo **pbo)
732 struct nouveau_drm *drm = nouveau_drm(&dev->object);
733 struct nouveau_device_priv *nvdev = nouveau_device(dev);
734 struct nouveau_bo_priv *nvbo;
735 struct drm_gem_open req = { .name = name };
738 pthread_mutex_lock(&nvdev->lock);
739 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
740 if (nvbo->name == name) {
741 ret = nouveau_bo_wrap_locked(dev, nvbo->base.handle,
743 pthread_mutex_unlock(&nvdev->lock);
748 ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_OPEN, &req);
750 ret = nouveau_bo_wrap_locked(dev, req.handle, pbo, name);
753 pthread_mutex_unlock(&nvdev->lock);
758 nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
760 struct drm_gem_flink req = { .handle = bo->handle };
761 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
762 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
766 int ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_FLINK, &req);
772 nvbo->name = *name = req.name;
774 nouveau_bo_make_global(nvbo);
780 nouveau_bo_ref(struct nouveau_bo *bo, struct nouveau_bo **pref)
782 struct nouveau_bo *ref = *pref;
784 atomic_inc(&nouveau_bo(bo)->refcnt);
787 if (atomic_dec_and_test(&nouveau_bo(ref)->refcnt))
794 nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
795 struct nouveau_bo **bo)
797 struct nouveau_drm *drm = nouveau_drm(&dev->object);
798 struct nouveau_device_priv *nvdev = nouveau_device(dev);
802 nouveau_bo_ref(NULL, bo);
804 pthread_mutex_lock(&nvdev->lock);
805 ret = drmPrimeFDToHandle(drm->fd, prime_fd, &handle);
807 ret = nouveau_bo_wrap_locked(dev, handle, bo, 0);
809 pthread_mutex_unlock(&nvdev->lock);
814 nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
816 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
817 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
820 ret = drmPrimeHandleToFD(drm->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
824 nouveau_bo_make_global(nvbo);
829 nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
830 struct nouveau_client *client)
832 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
833 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
834 struct drm_nouveau_gem_cpu_prep req;
835 struct nouveau_pushbuf *push;
838 if (!(access & NOUVEAU_BO_RDWR))
841 push = cli_push_get(client, bo);
842 if (push && push->channel)
843 nouveau_pushbuf_kick(push, push->channel);
845 if (!nvbo->head.next && !(nvbo->access & NOUVEAU_BO_WR) &&
846 !(access & NOUVEAU_BO_WR))
849 req.handle = bo->handle;
851 if (access & NOUVEAU_BO_WR)
852 req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
853 if (access & NOUVEAU_BO_NOBLOCK)
854 req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
856 ret = drmCommandWrite(drm->fd, DRM_NOUVEAU_GEM_CPU_PREP,
864 nouveau_bo_map(struct nouveau_bo *bo, uint32_t access,
865 struct nouveau_client *client)
867 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
868 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
869 if (bo->map == NULL) {
870 bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
871 MAP_SHARED, drm->fd, nvbo->map_handle);
872 if (bo->map == MAP_FAILED) {
877 return nouveau_bo_wait(bo, access, client);