2 * Copyright 2012 Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <xf86atomic.h>
37 #include "libdrm_macros.h"
38 #include "libdrm_lists.h"
39 #include "nouveau_drm.h"
44 #include "nvif/class.h"
45 #include "nvif/cl0080.h"
46 #include "nvif/ioctl.h"
47 #include "nvif/unpack.h"
50 drm_private uint32_t nouveau_debug = 0;
53 debug_init(char *args)
56 int n = strtol(args, NULL, 0);
64 nouveau_object_ioctl(struct nouveau_object *obj, void *data, uint32_t size)
66 struct nouveau_drm *drm = nouveau_drm(obj);
68 struct nvif_ioctl_v0 v0;
73 if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
75 if (obj != &drm->client)
76 args->v0.object = (unsigned long)(void *)obj;
79 args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
80 args->v0.route = 0x00;
82 args->v0.route = 0xff;
83 args->v0.token = obj->handle;
88 return drmCommandWriteRead(drm->fd, DRM_NOUVEAU_NVIF, args, argc);
92 nouveau_object_mthd(struct nouveau_object *obj,
93 uint32_t mthd, void *data, uint32_t size)
95 struct nouveau_drm *drm = nouveau_drm(obj);
97 struct nvif_ioctl_v0 ioctl;
98 struct nvif_ioctl_mthd_v0 mthd;
100 uint32_t argc = sizeof(*args) + size;
107 if (argc > sizeof(stack)) {
108 if (!(args = malloc(argc)))
111 args = (void *)stack;
113 args->ioctl.version = 0;
114 args->ioctl.type = NVIF_IOCTL_V0_MTHD;
115 args->mthd.version = 0;
116 args->mthd.method = mthd;
118 memcpy(args->mthd.data, data, size);
119 ret = nouveau_object_ioctl(obj, args, argc);
120 memcpy(data, args->mthd.data, size);
121 if (args != (void *)stack)
127 nouveau_object_sclass_put(struct nouveau_sclass **psclass)
134 nouveau_object_sclass_get(struct nouveau_object *obj,
135 struct nouveau_sclass **psclass)
137 struct nouveau_drm *drm = nouveau_drm(obj);
139 struct nvif_ioctl_v0 ioctl;
140 struct nvif_ioctl_sclass_v0 sclass;
142 struct nouveau_sclass *sclass;
147 return abi16_sclass(obj, psclass);
150 size = sizeof(*args) + cnt * sizeof(args->sclass.oclass[0]);
151 if (!(args = malloc(size)))
153 args->ioctl.version = 0;
154 args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
155 args->sclass.version = 0;
156 args->sclass.count = cnt;
158 ret = nouveau_object_ioctl(obj, args, size);
159 if (ret == 0 && args->sclass.count <= cnt)
161 cnt = args->sclass.count;
167 if ((sclass = calloc(args->sclass.count, sizeof(*sclass)))) {
168 for (i = 0; i < args->sclass.count; i++) {
169 sclass[i].oclass = args->sclass.oclass[i].oclass;
170 sclass[i].minver = args->sclass.oclass[i].minver;
171 sclass[i].maxver = args->sclass.oclass[i].maxver;
174 ret = args->sclass.count;
184 nouveau_object_mclass(struct nouveau_object *obj,
185 const struct nouveau_mclass *mclass)
187 struct nouveau_sclass *sclass;
191 cnt = nouveau_object_sclass_get(obj, &sclass);
195 for (i = 0; ret < 0 && mclass[i].oclass; i++) {
196 for (j = 0; j < cnt; j++) {
197 if (mclass[i].oclass == sclass[j].oclass &&
198 mclass[i].version >= sclass[j].minver &&
199 mclass[i].version <= sclass[j].maxver) {
206 nouveau_object_sclass_put(&sclass);
211 nouveau_object_fini(struct nouveau_object *obj)
214 struct nvif_ioctl_v0 ioctl;
215 struct nvif_ioctl_del del;
217 .ioctl.type = NVIF_IOCTL_V0_DEL,
227 nouveau_object_ioctl(obj, &args, sizeof(args));
231 nouveau_object_init(struct nouveau_object *parent, uint32_t handle,
232 int32_t oclass, void *data, uint32_t size,
233 struct nouveau_object *obj)
235 struct nouveau_drm *drm = nouveau_drm(parent);
237 struct nvif_ioctl_v0 ioctl;
238 struct nvif_ioctl_new_v0 new;
240 uint32_t argc = sizeof(*args) + size;
241 int (*func)(struct nouveau_object *);
244 obj->parent = parent;
245 obj->handle = handle;
246 obj->oclass = oclass;
250 if (!abi16_object(obj, &func) && drm->nvif) {
251 if (!(args = malloc(argc)))
253 args->ioctl.version = 0;
254 args->ioctl.type = NVIF_IOCTL_V0_NEW;
255 args->new.version = 0;
256 args->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
257 args->new.token = (unsigned long)(void *)obj;
258 args->new.object = (unsigned long)(void *)obj;
259 args->new.handle = handle;
260 args->new.oclass = oclass;
261 memcpy(args->new.data, data, size);
262 ret = nouveau_object_ioctl(parent, args, argc);
263 memcpy(data, args->new.data, size);
267 obj->length = size ? size : sizeof(struct nouveau_object *);
268 if (!(obj->data = malloc(obj->length)))
271 memcpy(obj->data, data, obj->length);
272 *(struct nouveau_object **)obj->data = obj;
278 nouveau_object_fini(obj);
286 nouveau_object_new(struct nouveau_object *parent, uint64_t handle,
287 uint32_t oclass, void *data, uint32_t length,
288 struct nouveau_object **pobj)
290 struct nouveau_object *obj;
293 if (!(obj = malloc(sizeof(*obj))))
296 ret = nouveau_object_init(parent, handle, oclass, data, length, obj);
307 nouveau_object_del(struct nouveau_object **pobj)
309 struct nouveau_object *obj = *pobj;
311 nouveau_object_fini(obj);
318 nouveau_drm_del(struct nouveau_drm **pdrm)
325 nouveau_drm_new(int fd, struct nouveau_drm **pdrm)
327 struct nouveau_drm *drm;
331 debug_init(getenv("NOUVEAU_LIBDRM_DEBUG"));
334 if (!(drm = calloc(1, sizeof(*drm))))
338 if (!(ver = drmGetVersion(fd))) {
339 nouveau_drm_del(&drm);
344 drm->version = (ver->version_major << 24) |
345 (ver->version_minor << 8) |
346 ver->version_patchlevel;
347 drm->nvif = (drm->version >= 0x01000301);
352 /* this is the old libdrm's version of nouveau_device_wrap(), the symbol
353 * is kept here to prevent AIGLX from crashing if the DDX is linked against
354 * the new libdrm, but the DRI driver against the old
357 nouveau_device_open_existing(struct nouveau_device **pdev, int close, int fd,
364 nouveau_device_new(struct nouveau_object *parent, int32_t oclass,
365 void *data, uint32_t size, struct nouveau_device **pdev)
367 struct nv_device_info_v0 info = {};
369 struct nv_device_v0 v0;
371 uint32_t argc = size;
372 struct nouveau_drm *drm = nouveau_drm(parent);
373 struct nouveau_device_priv *nvdev;
374 struct nouveau_device *dev;
379 if (oclass != NV_DEVICE ||
380 nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))
383 if (!(nvdev = calloc(1, sizeof(*nvdev))))
385 dev = *pdev = &nvdev->base;
389 ret = nouveau_object_init(parent, 0, oclass, args, argc,
396 ret = nouveau_object_mthd(&dev->object, NV_DEVICE_V0_INFO,
397 &info, sizeof(info));
401 nvdev->base.chipset = info.chipset;
402 nvdev->have_bo_usage = true;
404 if (args->v0.device == ~0ULL) {
405 nvdev->base.object.parent = &drm->client;
406 nvdev->base.object.handle = ~0ULL;
407 nvdev->base.object.oclass = NOUVEAU_DEVICE_CLASS;
408 nvdev->base.object.length = ~0;
410 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_CHIPSET_ID, &v);
413 nvdev->base.chipset = v;
415 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_HAS_BO_USAGE, &v);
417 nvdev->have_bo_usage = (v != 0);
421 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_FB_SIZE, &v);
424 nvdev->base.vram_size = v;
426 ret = nouveau_getparam(dev, NOUVEAU_GETPARAM_AGP_SIZE, &v);
429 nvdev->base.gart_size = v;
431 tmp = getenv("NOUVEAU_LIBDRM_VRAM_LIMIT_PERCENT");
433 nvdev->vram_limit_percent = atoi(tmp);
435 nvdev->vram_limit_percent = 80;
437 nvdev->base.vram_limit =
438 (nvdev->base.vram_size * nvdev->vram_limit_percent) / 100;
440 tmp = getenv("NOUVEAU_LIBDRM_GART_LIMIT_PERCENT");
442 nvdev->gart_limit_percent = atoi(tmp);
444 nvdev->gart_limit_percent = 80;
446 nvdev->base.gart_limit =
447 (nvdev->base.gart_size * nvdev->gart_limit_percent) / 100;
449 ret = pthread_mutex_init(&nvdev->lock, NULL);
450 DRMINITLISTHEAD(&nvdev->bo_list);
453 nouveau_device_del(pdev);
458 nouveau_device_wrap(int fd, int close, struct nouveau_device **pdev)
460 struct nouveau_drm *drm;
461 struct nouveau_device_priv *nvdev;
464 ret = nouveau_drm_new(fd, &drm);
469 ret = nouveau_device_new(&drm->client, NV_DEVICE,
470 &(struct nv_device_v0) {
472 }, sizeof(struct nv_device_v0), pdev);
474 nouveau_drm_del(&drm);
478 nvdev = nouveau_device(*pdev);
479 nvdev->base.fd = drm->fd;
480 nvdev->base.drm_version = drm->version;
481 nvdev->close = close;
486 nouveau_device_open(const char *busid, struct nouveau_device **pdev)
488 int ret = -ENODEV, fd = drmOpen("nouveau", busid);
490 ret = nouveau_device_wrap(fd, 1, pdev);
498 nouveau_device_del(struct nouveau_device **pdev)
500 struct nouveau_device_priv *nvdev = nouveau_device(*pdev);
503 pthread_mutex_destroy(&nvdev->lock);
504 if (nvdev->base.fd >= 0) {
505 struct nouveau_drm *drm =
506 nouveau_drm(&nvdev->base.object);
507 nouveau_drm_del(&drm);
509 drmClose(nvdev->base.fd);
517 nouveau_getparam(struct nouveau_device *dev, uint64_t param, uint64_t *value)
519 struct nouveau_drm *drm = nouveau_drm(&dev->object);
520 struct drm_nouveau_getparam r = { .param = param };
521 int fd = drm->fd, ret =
522 drmCommandWriteRead(fd, DRM_NOUVEAU_GETPARAM, &r, sizeof(r));
528 nouveau_setparam(struct nouveau_device *dev, uint64_t param, uint64_t value)
530 struct nouveau_drm *drm = nouveau_drm(&dev->object);
531 struct drm_nouveau_setparam r = { .param = param, .value = value };
532 return drmCommandWrite(drm->fd, DRM_NOUVEAU_SETPARAM, &r, sizeof(r));
536 nouveau_client_new(struct nouveau_device *dev, struct nouveau_client **pclient)
538 struct nouveau_device_priv *nvdev = nouveau_device(dev);
539 struct nouveau_client_priv *pcli;
540 int id = 0, i, ret = -ENOMEM;
543 pthread_mutex_lock(&nvdev->lock);
545 for (i = 0; i < nvdev->nr_client; i++) {
546 id = ffs(nvdev->client[i]) - 1;
551 clients = realloc(nvdev->client, sizeof(uint32_t) * (i + 1));
554 nvdev->client = clients;
555 nvdev->client[i] = 0;
559 pcli = calloc(1, sizeof(*pcli));
561 nvdev->client[i] |= (1 << id);
562 pcli->base.device = dev;
563 pcli->base.id = (i * 32) + id;
567 *pclient = &pcli->base;
570 pthread_mutex_unlock(&nvdev->lock);
575 nouveau_client_del(struct nouveau_client **pclient)
577 struct nouveau_client_priv *pcli = nouveau_client(*pclient);
578 struct nouveau_device_priv *nvdev;
580 int id = pcli->base.id;
581 nvdev = nouveau_device(pcli->base.device);
582 pthread_mutex_lock(&nvdev->lock);
583 nvdev->client[id / 32] &= ~(1 << (id % 32));
584 pthread_mutex_unlock(&nvdev->lock);
591 nouveau_bo_del(struct nouveau_bo *bo)
593 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
594 struct nouveau_device_priv *nvdev = nouveau_device(bo->device);
595 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
596 struct drm_gem_close req = { .handle = bo->handle };
598 if (nvbo->head.next) {
599 pthread_mutex_lock(&nvdev->lock);
600 if (atomic_read(&nvbo->refcnt) == 0) {
601 DRMLISTDEL(&nvbo->head);
603 * This bo has to be closed with the lock held because
604 * gem handles are not refcounted. If a shared bo is
605 * closed and re-opened in another thread a race
606 * against DRM_IOCTL_GEM_OPEN or drmPrimeFDToHandle
607 * might cause the bo to be closed accidentally while
610 drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
612 pthread_mutex_unlock(&nvdev->lock);
614 drmIoctl(drm->fd, DRM_IOCTL_GEM_CLOSE, &req);
617 drm_munmap(bo->map, bo->size);
622 nouveau_bo_new(struct nouveau_device *dev, uint32_t flags, uint32_t align,
623 uint64_t size, union nouveau_bo_config *config,
624 struct nouveau_bo **pbo)
626 struct nouveau_bo_priv *nvbo = calloc(1, sizeof(*nvbo));
627 struct nouveau_bo *bo = &nvbo->base;
632 atomic_set(&nvbo->refcnt, 1);
637 ret = abi16_bo_init(bo, align, config);
648 nouveau_bo_wrap_locked(struct nouveau_device *dev, uint32_t handle,
649 struct nouveau_bo **pbo, int name)
651 struct nouveau_drm *drm = nouveau_drm(&dev->object);
652 struct nouveau_device_priv *nvdev = nouveau_device(dev);
653 struct drm_nouveau_gem_info req = { .handle = handle };
654 struct nouveau_bo_priv *nvbo;
657 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
658 if (nvbo->base.handle == handle) {
659 if (atomic_inc_return(&nvbo->refcnt) == 1) {
661 * Uh oh, this bo is dead and someone else
662 * will free it, but because refcnt is
663 * now non-zero fortunately they won't
664 * call the ioctl to close the bo.
666 * Remove this bo from the list so other
667 * calls to nouveau_bo_wrap_locked will
668 * see our replacement nvbo.
670 DRMLISTDEL(&nvbo->head);
681 ret = drmCommandWriteRead(drm->fd, DRM_NOUVEAU_GEM_INFO,
686 nvbo = calloc(1, sizeof(*nvbo));
688 atomic_set(&nvbo->refcnt, 1);
689 nvbo->base.device = dev;
690 abi16_bo_info(&nvbo->base, &req);
692 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
701 nouveau_bo_make_global(struct nouveau_bo_priv *nvbo)
703 if (!nvbo->head.next) {
704 struct nouveau_device_priv *nvdev = nouveau_device(nvbo->base.device);
705 pthread_mutex_lock(&nvdev->lock);
706 if (!nvbo->head.next)
707 DRMLISTADD(&nvbo->head, &nvdev->bo_list);
708 pthread_mutex_unlock(&nvdev->lock);
713 nouveau_bo_wrap(struct nouveau_device *dev, uint32_t handle,
714 struct nouveau_bo **pbo)
716 struct nouveau_device_priv *nvdev = nouveau_device(dev);
718 pthread_mutex_lock(&nvdev->lock);
719 ret = nouveau_bo_wrap_locked(dev, handle, pbo, 0);
720 pthread_mutex_unlock(&nvdev->lock);
725 nouveau_bo_name_ref(struct nouveau_device *dev, uint32_t name,
726 struct nouveau_bo **pbo)
728 struct nouveau_drm *drm = nouveau_drm(&dev->object);
729 struct nouveau_device_priv *nvdev = nouveau_device(dev);
730 struct nouveau_bo_priv *nvbo;
731 struct drm_gem_open req = { .name = name };
734 pthread_mutex_lock(&nvdev->lock);
735 DRMLISTFOREACHENTRY(nvbo, &nvdev->bo_list, head) {
736 if (nvbo->name == name) {
737 ret = nouveau_bo_wrap_locked(dev, nvbo->base.handle,
739 pthread_mutex_unlock(&nvdev->lock);
744 ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_OPEN, &req);
746 ret = nouveau_bo_wrap_locked(dev, req.handle, pbo, name);
749 pthread_mutex_unlock(&nvdev->lock);
754 nouveau_bo_name_get(struct nouveau_bo *bo, uint32_t *name)
756 struct drm_gem_flink req = { .handle = bo->handle };
757 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
758 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
762 int ret = drmIoctl(drm->fd, DRM_IOCTL_GEM_FLINK, &req);
768 nvbo->name = *name = req.name;
770 nouveau_bo_make_global(nvbo);
776 nouveau_bo_ref(struct nouveau_bo *bo, struct nouveau_bo **pref)
778 struct nouveau_bo *ref = *pref;
780 atomic_inc(&nouveau_bo(bo)->refcnt);
783 if (atomic_dec_and_test(&nouveau_bo(ref)->refcnt))
790 nouveau_bo_prime_handle_ref(struct nouveau_device *dev, int prime_fd,
791 struct nouveau_bo **bo)
793 struct nouveau_drm *drm = nouveau_drm(&dev->object);
794 struct nouveau_device_priv *nvdev = nouveau_device(dev);
798 nouveau_bo_ref(NULL, bo);
800 pthread_mutex_lock(&nvdev->lock);
801 ret = drmPrimeFDToHandle(drm->fd, prime_fd, &handle);
803 ret = nouveau_bo_wrap_locked(dev, handle, bo, 0);
805 pthread_mutex_unlock(&nvdev->lock);
810 nouveau_bo_set_prime(struct nouveau_bo *bo, int *prime_fd)
812 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
813 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
816 ret = drmPrimeHandleToFD(drm->fd, nvbo->base.handle, DRM_CLOEXEC, prime_fd);
820 nouveau_bo_make_global(nvbo);
825 nouveau_bo_wait(struct nouveau_bo *bo, uint32_t access,
826 struct nouveau_client *client)
828 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
829 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
830 struct drm_nouveau_gem_cpu_prep req;
831 struct nouveau_pushbuf *push;
834 if (!(access & NOUVEAU_BO_RDWR))
837 push = cli_push_get(client, bo);
838 if (push && push->channel)
839 nouveau_pushbuf_kick(push, push->channel);
841 if (!nvbo->head.next && !(nvbo->access & NOUVEAU_BO_WR) &&
842 !(access & NOUVEAU_BO_WR))
845 req.handle = bo->handle;
847 if (access & NOUVEAU_BO_WR)
848 req.flags |= NOUVEAU_GEM_CPU_PREP_WRITE;
849 if (access & NOUVEAU_BO_NOBLOCK)
850 req.flags |= NOUVEAU_GEM_CPU_PREP_NOWAIT;
852 ret = drmCommandWrite(drm->fd, DRM_NOUVEAU_GEM_CPU_PREP,
860 nouveau_bo_map(struct nouveau_bo *bo, uint32_t access,
861 struct nouveau_client *client)
863 struct nouveau_drm *drm = nouveau_drm(&bo->device->object);
864 struct nouveau_bo_priv *nvbo = nouveau_bo(bo);
865 if (bo->map == NULL) {
866 bo->map = drm_mmap(0, bo->size, PROT_READ | PROT_WRITE,
867 MAP_SHARED, drm->fd, nvbo->map_handle);
868 if (bo->map == MAP_FAILED) {
873 return nouveau_bo_wait(bo, access, client);