2 * Copyright 2014, 2015 Red Hat.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * on the rights to use, copy, modify, merge, publish, distribute, sub
8 * license, and/or sell copies of the Software, and to permit persons to whom
9 * the Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
27 #include <sys/ioctl.h>
30 #include "os/os_mman.h"
31 #include "os/os_time.h"
32 #include "util/u_memory.h"
33 #include "util/u_format.h"
34 #include "util/u_hash_table.h"
35 #include "util/u_inlines.h"
36 #include "state_tracker/drm_driver.h"
37 #include "virgl/virgl_screen.h"
38 #include "virgl/virgl_public.h"
41 #include "virtgpu_drm.h"
43 #include "virgl_drm_winsys.h"
44 #include "virgl_drm_public.h"
46 static inline boolean can_cache_resource(struct virgl_hw_res *res)
48 return res->cacheable == TRUE;
51 static void virgl_hw_res_destroy(struct virgl_drm_winsys *qdws,
52 struct virgl_hw_res *res)
54 struct drm_gem_close args;
57 pipe_mutex_lock(qdws->bo_handles_mutex);
58 util_hash_table_remove(qdws->bo_names,
59 (void *)(uintptr_t)res->flink);
60 pipe_mutex_unlock(qdws->bo_handles_mutex);
64 pipe_mutex_lock(qdws->bo_handles_mutex);
65 util_hash_table_remove(qdws->bo_handles,
66 (void *)(uintptr_t)res->bo_handle);
67 pipe_mutex_unlock(qdws->bo_handles_mutex);
71 os_munmap(res->ptr, res->size);
73 memset(&args, 0, sizeof(args));
74 args.handle = res->bo_handle;
75 drmIoctl(qdws->fd, DRM_IOCTL_GEM_CLOSE, &args);
79 static boolean virgl_drm_resource_is_busy(struct virgl_drm_winsys *qdws,
80 struct virgl_hw_res *res)
82 struct drm_virtgpu_3d_wait waitcmd;
85 memset(&waitcmd, 0, sizeof(waitcmd));
86 waitcmd.handle = res->bo_handle;
87 waitcmd.flags = VIRTGPU_WAIT_NOWAIT;
89 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
90 if (ret && errno == EBUSY)
96 virgl_cache_flush(struct virgl_drm_winsys *qdws)
98 struct list_head *curr, *next;
99 struct virgl_hw_res *res;
101 pipe_mutex_lock(qdws->mutex);
102 curr = qdws->delayed.next;
105 while (curr != &qdws->delayed) {
106 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
107 LIST_DEL(&res->head);
108 virgl_hw_res_destroy(qdws, res);
112 pipe_mutex_unlock(qdws->mutex);
115 virgl_drm_winsys_destroy(struct virgl_winsys *qws)
117 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
119 virgl_cache_flush(qdws);
121 util_hash_table_destroy(qdws->bo_handles);
122 util_hash_table_destroy(qdws->bo_names);
123 pipe_mutex_destroy(qdws->bo_handles_mutex);
124 pipe_mutex_destroy(qdws->mutex);
130 virgl_cache_list_check_free(struct virgl_drm_winsys *qdws)
132 struct list_head *curr, *next;
133 struct virgl_hw_res *res;
137 curr = qdws->delayed.next;
139 while (curr != &qdws->delayed) {
140 res = LIST_ENTRY(struct virgl_hw_res, curr, head);
141 if (!os_time_timeout(res->start, res->end, now))
144 LIST_DEL(&res->head);
145 virgl_hw_res_destroy(qdws, res);
151 static void virgl_drm_resource_reference(struct virgl_drm_winsys *qdws,
152 struct virgl_hw_res **dres,
153 struct virgl_hw_res *sres)
155 struct virgl_hw_res *old = *dres;
156 if (pipe_reference(&(*dres)->reference, &sres->reference)) {
158 if (!can_cache_resource(old)) {
159 virgl_hw_res_destroy(qdws, old);
161 pipe_mutex_lock(qdws->mutex);
162 virgl_cache_list_check_free(qdws);
164 old->start = os_time_get();
165 old->end = old->start + qdws->usecs;
166 LIST_ADDTAIL(&old->head, &qdws->delayed);
168 pipe_mutex_unlock(qdws->mutex);
174 static struct virgl_hw_res *
175 virgl_drm_winsys_resource_create(struct virgl_winsys *qws,
176 enum pipe_texture_target target,
187 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
188 struct drm_virtgpu_resource_create createcmd;
190 struct virgl_hw_res *res;
191 uint32_t stride = width * util_format_get_blocksize(format);
193 res = CALLOC_STRUCT(virgl_hw_res);
197 memset(&createcmd, 0, sizeof(createcmd));
198 createcmd.target = target;
199 createcmd.format = format;
200 createcmd.bind = bind;
201 createcmd.width = width;
202 createcmd.height = height;
203 createcmd.depth = depth;
204 createcmd.array_size = array_size;
205 createcmd.last_level = last_level;
206 createcmd.nr_samples = nr_samples;
207 createcmd.stride = stride;
208 createcmd.size = size;
210 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_CREATE, &createcmd);
217 res->format = format;
219 res->res_handle = createcmd.res_handle;
220 res->bo_handle = createcmd.bo_handle;
222 res->stride = stride;
223 pipe_reference_init(&res->reference, 1);
224 res->num_cs_references = 0;
228 static inline int virgl_is_res_compat(struct virgl_drm_winsys *qdws,
229 struct virgl_hw_res *res,
230 uint32_t size, uint32_t bind,
233 if (res->bind != bind)
235 if (res->format != format)
237 if (res->size < size)
239 if (res->size > size * 2)
242 if (virgl_drm_resource_is_busy(qdws, res)) {
250 virgl_bo_transfer_put(struct virgl_winsys *vws,
251 struct virgl_hw_res *res,
252 const struct pipe_box *box,
253 uint32_t stride, uint32_t layer_stride,
254 uint32_t buf_offset, uint32_t level)
256 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
257 struct drm_virtgpu_3d_transfer_to_host tohostcmd;
259 memset(&tohostcmd, 0, sizeof(tohostcmd));
260 tohostcmd.bo_handle = res->bo_handle;
261 tohostcmd.box = *(struct drm_virtgpu_3d_box *)box;
262 tohostcmd.offset = buf_offset;
263 tohostcmd.level = level;
264 // tohostcmd.stride = stride;
265 // tohostcmd.layer_stride = stride;
266 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_TO_HOST, &tohostcmd);
270 virgl_bo_transfer_get(struct virgl_winsys *vws,
271 struct virgl_hw_res *res,
272 const struct pipe_box *box,
273 uint32_t stride, uint32_t layer_stride,
274 uint32_t buf_offset, uint32_t level)
276 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
277 struct drm_virtgpu_3d_transfer_from_host fromhostcmd;
279 memset(&fromhostcmd, 0, sizeof(fromhostcmd));
280 fromhostcmd.bo_handle = res->bo_handle;
281 fromhostcmd.level = level;
282 fromhostcmd.offset = buf_offset;
283 // fromhostcmd.stride = stride;
284 // fromhostcmd.layer_stride = layer_stride;
285 fromhostcmd.box = *(struct drm_virtgpu_3d_box *)box;
286 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_TRANSFER_FROM_HOST, &fromhostcmd);
289 static struct virgl_hw_res *
290 virgl_drm_winsys_resource_cache_create(struct virgl_winsys *qws,
291 enum pipe_texture_target target,
302 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
303 struct virgl_hw_res *res, *curr_res;
304 struct list_head *curr, *next;
308 /* only store binds for vertex/index/const buffers */
309 if (bind != VIRGL_BIND_CONSTANT_BUFFER && bind != VIRGL_BIND_INDEX_BUFFER &&
310 bind != VIRGL_BIND_VERTEX_BUFFER && bind != VIRGL_BIND_CUSTOM)
313 pipe_mutex_lock(qdws->mutex);
316 curr = qdws->delayed.next;
320 while (curr != &qdws->delayed) {
321 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
323 if (!res && ((ret = virgl_is_res_compat(qdws, curr_res, size, bind, format)) > 0))
325 else if (os_time_timeout(curr_res->start, curr_res->end, now)) {
326 LIST_DEL(&curr_res->head);
327 virgl_hw_res_destroy(qdws, curr_res);
338 if (!res && ret != -1) {
339 while (curr != &qdws->delayed) {
340 curr_res = LIST_ENTRY(struct virgl_hw_res, curr, head);
341 ret = virgl_is_res_compat(qdws, curr_res, size, bind, format);
354 LIST_DEL(&res->head);
356 pipe_mutex_unlock(qdws->mutex);
357 pipe_reference_init(&res->reference, 1);
361 pipe_mutex_unlock(qdws->mutex);
364 res = virgl_drm_winsys_resource_create(qws, target, format, bind,
365 width, height, depth, array_size,
366 last_level, nr_samples, size);
367 if (bind == VIRGL_BIND_CONSTANT_BUFFER || bind == VIRGL_BIND_INDEX_BUFFER ||
368 bind == VIRGL_BIND_VERTEX_BUFFER)
369 res->cacheable = TRUE;
373 static struct virgl_hw_res *
374 virgl_drm_winsys_resource_create_handle(struct virgl_winsys *qws,
375 struct winsys_handle *whandle)
377 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
378 struct drm_gem_open open_arg = {};
379 struct drm_virtgpu_resource_info info_arg = {};
380 struct virgl_hw_res *res;
381 uint32_t handle = whandle->handle;
383 if (whandle->offset != 0) {
384 fprintf(stderr, "attempt to import unsupported winsys offset %u\n",
389 pipe_mutex_lock(qdws->bo_handles_mutex);
391 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
392 res = util_hash_table_get(qdws->bo_names, (void*)(uintptr_t)handle);
394 struct virgl_hw_res *r = NULL;
395 virgl_drm_resource_reference(qdws, &r, res);
400 if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
402 r = drmPrimeFDToHandle(qdws->fd, whandle->handle, &handle);
409 res = util_hash_table_get(qdws->bo_handles, (void*)(uintptr_t)handle);
410 fprintf(stderr, "resource %p for handle %d, pfd=%d\n", res, handle, whandle->handle);
412 struct virgl_hw_res *r = NULL;
413 virgl_drm_resource_reference(qdws, &r, res);
417 res = CALLOC_STRUCT(virgl_hw_res);
421 if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
422 res->bo_handle = handle;
424 fprintf(stderr, "gem open handle %d\n", handle);
425 memset(&open_arg, 0, sizeof(open_arg));
426 open_arg.name = whandle->handle;
427 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_OPEN, &open_arg)) {
432 res->bo_handle = open_arg.handle;
436 memset(&info_arg, 0, sizeof(info_arg));
437 info_arg.bo_handle = res->bo_handle;
439 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_RESOURCE_INFO, &info_arg)) {
446 res->res_handle = info_arg.res_handle;
448 res->size = info_arg.size;
449 res->stride = info_arg.stride;
450 pipe_reference_init(&res->reference, 1);
451 res->num_cs_references = 0;
453 util_hash_table_set(qdws->bo_handles, (void *)(uintptr_t)handle, res);
456 pipe_mutex_unlock(qdws->bo_handles_mutex);
460 static boolean virgl_drm_winsys_resource_get_handle(struct virgl_winsys *qws,
461 struct virgl_hw_res *res,
463 struct winsys_handle *whandle)
465 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
466 struct drm_gem_flink flink;
471 if (whandle->type == DRM_API_HANDLE_TYPE_SHARED) {
473 memset(&flink, 0, sizeof(flink));
474 flink.handle = res->bo_handle;
476 if (drmIoctl(qdws->fd, DRM_IOCTL_GEM_FLINK, &flink)) {
480 res->flink = flink.name;
482 pipe_mutex_lock(qdws->bo_handles_mutex);
483 util_hash_table_set(qdws->bo_names, (void *)(uintptr_t)res->flink, res);
484 pipe_mutex_unlock(qdws->bo_handles_mutex);
486 whandle->handle = res->flink;
487 } else if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
488 whandle->handle = res->bo_handle;
489 } else if (whandle->type == DRM_API_HANDLE_TYPE_FD) {
490 if (drmPrimeHandleToFD(qdws->fd, res->bo_handle, DRM_CLOEXEC, (int*)&whandle->handle))
493 whandle->stride = stride;
497 static void virgl_drm_winsys_resource_unref(struct virgl_winsys *qws,
498 struct virgl_hw_res *hres)
500 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
502 virgl_drm_resource_reference(qdws, &hres, NULL);
505 static void *virgl_drm_resource_map(struct virgl_winsys *qws,
506 struct virgl_hw_res *res)
508 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
509 struct drm_virtgpu_map mmap_arg;
515 memset(&mmap_arg, 0, sizeof(mmap_arg));
516 mmap_arg.handle = res->bo_handle;
517 if (drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_MAP, &mmap_arg))
520 ptr = os_mmap(0, res->size, PROT_READ|PROT_WRITE, MAP_SHARED,
521 qdws->fd, mmap_arg.offset);
522 if (ptr == MAP_FAILED)
530 static void virgl_drm_resource_wait(struct virgl_winsys *qws,
531 struct virgl_hw_res *res)
533 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
534 struct drm_virtgpu_3d_wait waitcmd;
537 memset(&waitcmd, 0, sizeof(waitcmd));
538 waitcmd.handle = res->bo_handle;
540 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_WAIT, &waitcmd);
545 static struct virgl_cmd_buf *virgl_drm_cmd_buf_create(struct virgl_winsys *qws)
547 struct virgl_drm_cmd_buf *cbuf;
549 cbuf = CALLOC_STRUCT(virgl_drm_cmd_buf);
556 cbuf->res_bo = CALLOC(cbuf->nres, sizeof(struct virgl_hw_buf*));
561 cbuf->res_hlist = MALLOC(cbuf->nres * sizeof(uint32_t));
562 if (!cbuf->res_hlist) {
568 cbuf->base.buf = cbuf->buf;
572 static void virgl_drm_cmd_buf_destroy(struct virgl_cmd_buf *_cbuf)
574 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
576 FREE(cbuf->res_hlist);
582 static int virgl_drm_lookup_res(struct virgl_drm_cmd_buf *cbuf,
583 struct virgl_hw_res *res)
585 if (!res) return false;
586 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
589 if (cbuf->is_handle_added[hash]) {
590 i = cbuf->reloc_indices_hashlist[hash];
591 if (cbuf->res_bo[i] == res)
594 for (i = 0; i < cbuf->cres; i++) {
595 if (cbuf->res_bo[i] == res) {
596 cbuf->reloc_indices_hashlist[hash] = i;
604 static void virgl_drm_add_res(struct virgl_drm_winsys *qdws,
605 struct virgl_drm_cmd_buf *cbuf,
606 struct virgl_hw_res *res,
607 enum virgl_bo_usage usage)
610 unsigned hash = res->res_handle & (sizeof(cbuf->is_handle_added)-1);
612 if (cbuf->cres > cbuf->nres) {
613 fprintf(stderr,"failure to add relocation\n");
617 cbuf->res_bo[cbuf->cres] = NULL;
618 virgl_drm_resource_reference(qdws, &cbuf->res_bo[cbuf->cres], res);
619 cbuf->bo_usage[cbuf->cres] = usage;
620 cbuf->res_hlist[cbuf->cres] = res->bo_handle;
621 cbuf->is_handle_added[hash] = TRUE;
623 cbuf->reloc_indices_hashlist[hash] = cbuf->cres;
624 p_atomic_inc(&res->num_cs_references);
628 static void virgl_drm_release_all_res(struct virgl_drm_winsys *qdws,
629 struct virgl_drm_cmd_buf *cbuf)
633 for (i = 0; i < cbuf->cres; i++) {
634 p_atomic_dec(&cbuf->res_bo[i]->num_cs_references);
635 virgl_drm_resource_reference(qdws, &cbuf->res_bo[i], NULL);
640 static void virgl_drm_emit_res(struct virgl_winsys *qws,
641 struct virgl_cmd_buf *_cbuf,
642 struct virgl_hw_res *res,
644 enum virgl_bo_usage usage)
646 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
647 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
648 int index_in_list = virgl_drm_lookup_res(cbuf, res);
651 cbuf->base.buf[cbuf->base.cdw++] = res->res_handle;
653 if (index_in_list == -1)
654 virgl_drm_add_res(qdws, cbuf, res, usage);
656 cbuf->bo_usage[index_in_list] |= usage;
659 static boolean virgl_drm_res_is_ref(struct virgl_winsys *qws,
660 struct virgl_cmd_buf *_cbuf,
661 struct virgl_hw_res *res,
662 enum virgl_bo_usage usage)
664 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
667 /* no commands in cbuf yet */
668 if (cbuf->base.cdw <= 2)
670 if (!res->num_cs_references)
673 index = virgl_drm_lookup_res(cbuf, res);
677 if (cbuf->bo_usage[index] & usage)
682 static int virgl_drm_winsys_submit_cmd(struct virgl_winsys *qws,
683 struct virgl_cmd_buf *_cbuf)
685 struct virgl_drm_winsys *qdws = virgl_drm_winsys(qws);
686 struct virgl_drm_cmd_buf *cbuf = virgl_drm_cmd_buf(_cbuf);
687 struct drm_virtgpu_execbuffer eb;
690 if (cbuf->base.cdw == 0)
693 memset(&eb, 0, sizeof(struct drm_virtgpu_execbuffer));
694 eb.command = (unsigned long)(void*)cbuf->buf;
695 eb.size = cbuf->base.cdw * 4;
696 eb.num_bo_handles = cbuf->cres;
697 eb.bo_handles = (unsigned long)(void *)cbuf->res_hlist;
699 ret = drmIoctl(qdws->fd, DRM_IOCTL_VIRTGPU_EXECBUFFER, &eb);
701 fprintf(stderr,"got error from kernel - expect bad rendering %d\n", errno);
704 virgl_drm_release_all_res(qdws, cbuf);
706 memset(cbuf->is_handle_added, 0, sizeof(cbuf->is_handle_added));
707 memset(cbuf->bo_usage, 0, sizeof(cbuf->bo_usage));
711 static int virgl_drm_get_caps(struct virgl_winsys *vws,
712 struct virgl_drm_caps *caps)
714 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
715 struct drm_virtgpu_get_caps args;
717 memset(&args, 0, sizeof(args));
720 args.addr = (unsigned long)&caps->caps;
721 args.size = sizeof(union virgl_caps);
722 return drmIoctl(vdws->fd, DRM_IOCTL_VIRTGPU_GET_CAPS, &args);
725 #define PTR_TO_UINT(x) ((unsigned)((intptr_t)(x)))
727 static unsigned handle_hash(void *key)
729 return PTR_TO_UINT(key);
732 static int handle_compare(void *key1, void *key2)
734 return PTR_TO_UINT(key1) != PTR_TO_UINT(key2);
737 static struct pipe_fence_handle *
738 virgl_cs_create_fence(struct virgl_winsys *vws)
740 struct virgl_hw_res *res;
742 res = virgl_drm_winsys_resource_cache_create(vws,
744 PIPE_FORMAT_R8_UNORM,
746 8, 1, 1, 0, 0, 0, 8);
748 return (struct pipe_fence_handle *)res;
751 static bool virgl_fence_wait(struct virgl_winsys *vws,
752 struct pipe_fence_handle *fence,
755 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
756 struct virgl_hw_res *res = virgl_hw_res(fence);
759 return !virgl_drm_resource_is_busy(vdws, res);
761 if (timeout != PIPE_TIMEOUT_INFINITE) {
762 int64_t start_time = os_time_get();
764 while (virgl_drm_resource_is_busy(vdws, res)) {
765 if (os_time_get() - start_time >= timeout)
771 virgl_drm_resource_wait(vws, res);
775 static void virgl_fence_reference(struct virgl_winsys *vws,
776 struct pipe_fence_handle **dst,
777 struct pipe_fence_handle *src)
779 struct virgl_drm_winsys *vdws = virgl_drm_winsys(vws);
780 virgl_drm_resource_reference(vdws, (struct virgl_hw_res **)dst,
785 static struct virgl_winsys *
786 virgl_drm_winsys_create(int drmFD)
788 struct virgl_drm_winsys *qdws;
790 qdws = CALLOC_STRUCT(virgl_drm_winsys);
795 qdws->num_delayed = 0;
796 qdws->usecs = 1000000;
797 LIST_INITHEAD(&qdws->delayed);
798 pipe_mutex_init(qdws->mutex);
799 pipe_mutex_init(qdws->bo_handles_mutex);
800 qdws->bo_handles = util_hash_table_create(handle_hash, handle_compare);
801 qdws->bo_names = util_hash_table_create(handle_hash, handle_compare);
802 qdws->base.destroy = virgl_drm_winsys_destroy;
804 qdws->base.transfer_put = virgl_bo_transfer_put;
805 qdws->base.transfer_get = virgl_bo_transfer_get;
806 qdws->base.resource_create = virgl_drm_winsys_resource_cache_create;
807 qdws->base.resource_unref = virgl_drm_winsys_resource_unref;
808 qdws->base.resource_create_from_handle = virgl_drm_winsys_resource_create_handle;
809 qdws->base.resource_get_handle = virgl_drm_winsys_resource_get_handle;
810 qdws->base.resource_map = virgl_drm_resource_map;
811 qdws->base.resource_wait = virgl_drm_resource_wait;
812 qdws->base.cmd_buf_create = virgl_drm_cmd_buf_create;
813 qdws->base.cmd_buf_destroy = virgl_drm_cmd_buf_destroy;
814 qdws->base.submit_cmd = virgl_drm_winsys_submit_cmd;
815 qdws->base.emit_res = virgl_drm_emit_res;
816 qdws->base.res_is_referenced = virgl_drm_res_is_ref;
818 qdws->base.cs_create_fence = virgl_cs_create_fence;
819 qdws->base.fence_wait = virgl_fence_wait;
820 qdws->base.fence_reference = virgl_fence_reference;
822 qdws->base.get_caps = virgl_drm_get_caps;
827 static struct util_hash_table *fd_tab = NULL;
828 pipe_static_mutex(virgl_screen_mutex);
831 virgl_drm_screen_destroy(struct pipe_screen *pscreen)
833 struct virgl_screen *screen = virgl_screen(pscreen);
836 pipe_mutex_lock(virgl_screen_mutex);
837 destroy = --screen->refcnt == 0;
839 int fd = virgl_drm_winsys(screen->vws)->fd;
840 util_hash_table_remove(fd_tab, intptr_to_pointer(fd));
842 pipe_mutex_unlock(virgl_screen_mutex);
845 pscreen->destroy = screen->winsys_priv;
846 pscreen->destroy(pscreen);
850 static unsigned hash_fd(void *key)
852 int fd = pointer_to_intptr(key);
856 return stat.st_dev ^ stat.st_ino ^ stat.st_rdev;
859 static int compare_fd(void *key1, void *key2)
861 int fd1 = pointer_to_intptr(key1);
862 int fd2 = pointer_to_intptr(key2);
863 struct stat stat1, stat2;
867 return stat1.st_dev != stat2.st_dev ||
868 stat1.st_ino != stat2.st_ino ||
869 stat1.st_rdev != stat2.st_rdev;
873 virgl_drm_screen_create(int fd)
875 struct pipe_screen *pscreen = NULL;
877 pipe_mutex_lock(virgl_screen_mutex);
879 fd_tab = util_hash_table_create(hash_fd, compare_fd);
884 pscreen = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
886 virgl_screen(pscreen)->refcnt++;
888 struct virgl_winsys *vws;
889 int dup_fd = dup(fd);
891 vws = virgl_drm_winsys_create(dup_fd);
893 pscreen = virgl_create_screen(vws);
895 util_hash_table_set(fd_tab, intptr_to_pointer(dup_fd), pscreen);
897 /* Bit of a hack, to avoid circular linkage dependency,
898 * ie. pipe driver having to call in to winsys, we
899 * override the pipe drivers screen->destroy():
901 virgl_screen(pscreen)->winsys_priv = pscreen->destroy;
902 pscreen->destroy = virgl_drm_screen_destroy;
907 pipe_mutex_unlock(virgl_screen_mutex);