OSDN Git Service

radeonsi: add support for hainan chips
[android-x86/external-mesa.git] / src / gallium / winsys / radeon / drm / radeon_drm_winsys.c
1 /*
2  * Copyright © 2009 Corbin Simpson
3  * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
15  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
16  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
17  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
18  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * The above copyright notice and this permission notice (including the
24  * next paragraph) shall be included in all copies or substantial portions
25  * of the Software.
26  */
27 /*
28  * Authors:
29  *      Corbin Simpson <MostAwesomeDude@gmail.com>
30  *      Joakim Sindholt <opensource@zhasha.com>
31  *      Marek Olšák <maraeo@gmail.com>
32  */
33
34 #include "radeon_drm_bo.h"
35 #include "radeon_drm_cs.h"
36 #include "radeon_drm_public.h"
37
38 #include "pipebuffer/pb_bufmgr.h"
39 #include "util/u_memory.h"
40 #include "util/u_hash_table.h"
41
42 #include <xf86drm.h>
43 #include <stdio.h>
44
45 /*
46  * this are copy from radeon_drm, once an updated libdrm is released
47  * we should bump configure.ac requirement for it and remove the following
48  * field
49  */
50 #ifndef RADEON_INFO_TILING_CONFIG
51 #define RADEON_INFO_TILING_CONFIG 6
52 #endif
53
54 #ifndef RADEON_INFO_WANT_HYPERZ
55 #define RADEON_INFO_WANT_HYPERZ 7
56 #endif
57
58 #ifndef RADEON_INFO_WANT_CMASK
59 #define RADEON_INFO_WANT_CMASK 8
60 #endif
61
62 #ifndef RADEON_INFO_CLOCK_CRYSTAL_FREQ
63 #define RADEON_INFO_CLOCK_CRYSTAL_FREQ 9
64 #endif
65
66 #ifndef RADEON_INFO_NUM_BACKENDS
67 #define RADEON_INFO_NUM_BACKENDS 0xa
68 #endif
69
70 #ifndef RADEON_INFO_NUM_TILE_PIPES
71 #define RADEON_INFO_NUM_TILE_PIPES 0xb
72 #endif
73
74 #ifndef RADEON_INFO_BACKEND_MAP
75 #define RADEON_INFO_BACKEND_MAP 0xd
76 #endif
77
78 #ifndef RADEON_INFO_VA_START
79 /* virtual address start, va < start are reserved by the kernel */
80 #define RADEON_INFO_VA_START        0x0e
81 /* maximum size of ib using the virtual memory cs */
82 #define RADEON_INFO_IB_VM_MAX_SIZE  0x0f
83 #endif
84
85 #ifndef RADEON_INFO_MAX_PIPES
86 #define RADEON_INFO_MAX_PIPES 0x10
87 #endif
88
89 #ifndef RADEON_INFO_TIMESTAMP
90 #define RADEON_INFO_TIMESTAMP 0x11
91 #endif
92
93 #ifndef RADEON_INFO_RING_WORKING
94 #define RADEON_INFO_RING_WORKING 0x15
95 #endif
96
97 #ifndef RADEON_CS_RING_UVD
98 #define RADEON_CS_RING_UVD      3
99 #endif
100
101 static struct util_hash_table *fd_tab = NULL;
102
103 /* Enable/disable feature access for one command stream.
104  * If enable == TRUE, return TRUE on success.
105  * Otherwise, return FALSE.
106  *
107  * We basically do the same thing kernel does, because we have to deal
108  * with multiple contexts (here command streams) backed by one winsys. */
109 static boolean radeon_set_fd_access(struct radeon_drm_cs *applier,
110                                     struct radeon_drm_cs **owner,
111                                     pipe_mutex *mutex,
112                                     unsigned request, const char *request_name,
113                                     boolean enable)
114 {
115     struct drm_radeon_info info;
116     unsigned value = enable ? 1 : 0;
117
118     memset(&info, 0, sizeof(info));
119
120     pipe_mutex_lock(*mutex);
121
122     /* Early exit if we are sure the request will fail. */
123     if (enable) {
124         if (*owner) {
125             pipe_mutex_unlock(*mutex);
126             return FALSE;
127         }
128     } else {
129         if (*owner != applier) {
130             pipe_mutex_unlock(*mutex);
131             return FALSE;
132         }
133     }
134
135     /* Pass through the request to the kernel. */
136     info.value = (unsigned long)&value;
137     info.request = request;
138     if (drmCommandWriteRead(applier->ws->fd, DRM_RADEON_INFO,
139                             &info, sizeof(info)) != 0) {
140         pipe_mutex_unlock(*mutex);
141         return FALSE;
142     }
143
144     /* Update the rights in the winsys. */
145     if (enable) {
146         if (value) {
147             *owner = applier;
148             printf("radeon: Acquired access to %s.\n", request_name);
149             pipe_mutex_unlock(*mutex);
150             return TRUE;
151         }
152     } else {
153         *owner = NULL;
154         printf("radeon: Released access to %s.\n", request_name);
155     }
156
157     pipe_mutex_unlock(*mutex);
158     return FALSE;
159 }
160
161 static boolean radeon_get_drm_value(int fd, unsigned request,
162                                     const char *errname, uint32_t *out)
163 {
164     struct drm_radeon_info info;
165     int retval;
166
167     memset(&info, 0, sizeof(info));
168
169     info.value = (unsigned long)out;
170     info.request = request;
171
172     retval = drmCommandWriteRead(fd, DRM_RADEON_INFO, &info, sizeof(info));
173     if (retval) {
174         if (errname) {
175             fprintf(stderr, "radeon: Failed to get %s, error number %d\n",
176                     errname, retval);
177         }
178         return FALSE;
179     }
180     return TRUE;
181 }
182
183 /* Helper function to do the ioctls needed for setup and init. */
184 static boolean do_winsys_init(struct radeon_drm_winsys *ws)
185 {
186     struct drm_radeon_gem_info gem_info;
187     int retval;
188     drmVersionPtr version;
189
190     memset(&gem_info, 0, sizeof(gem_info));
191
192     /* We do things in a specific order here.
193      *
194      * DRM version first. We need to be sure we're running on a KMS chipset.
195      * This is also for some features.
196      *
197      * Then, the PCI ID. This is essential and should return usable numbers
198      * for all Radeons. If this fails, we probably got handed an FD for some
199      * non-Radeon card.
200      *
201      * The GEM info is actually bogus on the kernel side, as well as our side
202      * (see radeon_gem_info_ioctl in radeon_gem.c) but that's alright because
203      * we don't actually use the info for anything yet.
204      *
205      * The GB and Z pipe requests should always succeed, but they might not
206      * return sensical values for all chipsets, but that's alright because
207      * the pipe drivers already know that.
208      */
209
210     /* Get DRM version. */
211     version = drmGetVersion(ws->fd);
212     if (version->version_major != 2 ||
213         version->version_minor < 3) {
214         fprintf(stderr, "%s: DRM version is %d.%d.%d but this driver is "
215                 "only compatible with 2.3.x (kernel 2.6.34) or later.\n",
216                 __FUNCTION__,
217                 version->version_major,
218                 version->version_minor,
219                 version->version_patchlevel);
220         drmFreeVersion(version);
221         return FALSE;
222     }
223
224     ws->info.drm_major = version->version_major;
225     ws->info.drm_minor = version->version_minor;
226     ws->info.drm_patchlevel = version->version_patchlevel;
227     drmFreeVersion(version);
228
229     /* Get PCI ID. */
230     if (!radeon_get_drm_value(ws->fd, RADEON_INFO_DEVICE_ID, "PCI ID",
231                               &ws->info.pci_id))
232         return FALSE;
233
234     /* Check PCI ID. */
235     switch (ws->info.pci_id) {
236 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R300; break;
237 #include "pci_ids/r300_pci_ids.h"
238 #undef CHIPSET
239
240 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_R600; break;
241 #include "pci_ids/r600_pci_ids.h"
242 #undef CHIPSET
243
244 #define CHIPSET(pci_id, name, cfamily) case pci_id: ws->info.family = CHIP_##cfamily; ws->gen = DRV_SI; break;
245 #include "pci_ids/radeonsi_pci_ids.h"
246 #undef CHIPSET
247
248     default:
249         fprintf(stderr, "radeon: Invalid PCI ID.\n");
250         return FALSE;
251     }
252
253     switch (ws->info.family) {
254     default:
255     case CHIP_UNKNOWN:
256         fprintf(stderr, "radeon: Unknown family.\n");
257         return FALSE;
258     case CHIP_R300:
259     case CHIP_R350:
260     case CHIP_RV350:
261     case CHIP_RV370:
262     case CHIP_RV380:
263     case CHIP_RS400:
264     case CHIP_RC410:
265     case CHIP_RS480:
266         ws->info.chip_class = R300;
267         break;
268     case CHIP_R420:     /* R4xx-based cores. */
269     case CHIP_R423:
270     case CHIP_R430:
271     case CHIP_R480:
272     case CHIP_R481:
273     case CHIP_RV410:
274     case CHIP_RS600:
275     case CHIP_RS690:
276     case CHIP_RS740:
277         ws->info.chip_class = R400;
278         break;
279     case CHIP_RV515:    /* R5xx-based cores. */
280     case CHIP_R520:
281     case CHIP_RV530:
282     case CHIP_R580:
283     case CHIP_RV560:
284     case CHIP_RV570:
285         ws->info.chip_class = R500;
286         break;
287     case CHIP_R600:
288     case CHIP_RV610:
289     case CHIP_RV630:
290     case CHIP_RV670:
291     case CHIP_RV620:
292     case CHIP_RV635:
293     case CHIP_RS780:
294     case CHIP_RS880:
295         ws->info.chip_class = R600;
296         break;
297     case CHIP_RV770:
298     case CHIP_RV730:
299     case CHIP_RV710:
300     case CHIP_RV740:
301         ws->info.chip_class = R700;
302         break;
303     case CHIP_CEDAR:
304     case CHIP_REDWOOD:
305     case CHIP_JUNIPER:
306     case CHIP_CYPRESS:
307     case CHIP_HEMLOCK:
308     case CHIP_PALM:
309     case CHIP_SUMO:
310     case CHIP_SUMO2:
311     case CHIP_BARTS:
312     case CHIP_TURKS:
313     case CHIP_CAICOS:
314         ws->info.chip_class = EVERGREEN;
315         break;
316     case CHIP_CAYMAN:
317     case CHIP_ARUBA:
318         ws->info.chip_class = CAYMAN;
319         break;
320     case CHIP_TAHITI:
321     case CHIP_PITCAIRN:
322     case CHIP_VERDE:
323     case CHIP_OLAND:
324     case CHIP_HAINAN:
325         ws->info.chip_class = TAHITI;
326         break;
327     }
328
329     /* Check for dma */
330     ws->info.r600_has_dma = FALSE;
331     if (ws->info.chip_class >= R700 && ws->info.drm_minor >= 27) {
332         ws->info.r600_has_dma = TRUE;
333     }
334
335     /* Check for UVD */
336     ws->info.has_uvd = FALSE;
337     if (ws->info.drm_minor >= 32) {
338         uint32_t value = RADEON_CS_RING_UVD;
339         if (radeon_get_drm_value(ws->fd, RADEON_INFO_RING_WORKING,
340                                  "UVD Ring working", &value))
341             ws->info.has_uvd = value;
342     }
343
344     /* Get GEM info. */
345     retval = drmCommandWriteRead(ws->fd, DRM_RADEON_GEM_INFO,
346             &gem_info, sizeof(gem_info));
347     if (retval) {
348         fprintf(stderr, "radeon: Failed to get MM info, error number %d\n",
349                 retval);
350         return FALSE;
351     }
352     ws->info.gart_size = gem_info.gart_size;
353     ws->info.vram_size = gem_info.vram_size;
354
355     ws->num_cpus = sysconf(_SC_NPROCESSORS_ONLN);
356
357     /* Generation-specific queries. */
358     if (ws->gen == DRV_R300) {
359         if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_GB_PIPES,
360                                   "GB pipe count",
361                                   &ws->info.r300_num_gb_pipes))
362             return FALSE;
363
364         if (!radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_Z_PIPES,
365                                   "Z pipe count",
366                                   &ws->info.r300_num_z_pipes))
367             return FALSE;
368     }
369     else if (ws->gen >= DRV_R600) {
370         if (ws->info.drm_minor >= 9 &&
371             !radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_BACKENDS,
372                                   "num backends",
373                                   &ws->info.r600_num_backends))
374             return FALSE;
375
376         /* get the GPU counter frequency, failure is not fatal */
377         radeon_get_drm_value(ws->fd, RADEON_INFO_CLOCK_CRYSTAL_FREQ, NULL,
378                              &ws->info.r600_clock_crystal_freq);
379
380         radeon_get_drm_value(ws->fd, RADEON_INFO_TILING_CONFIG, NULL,
381                              &ws->info.r600_tiling_config);
382
383         if (ws->info.drm_minor >= 11) {
384             radeon_get_drm_value(ws->fd, RADEON_INFO_NUM_TILE_PIPES, NULL,
385                                  &ws->info.r600_num_tile_pipes);
386
387             if (radeon_get_drm_value(ws->fd, RADEON_INFO_BACKEND_MAP, NULL,
388                                       &ws->info.r600_backend_map))
389                 ws->info.r600_backend_map_valid = TRUE;
390         }
391
392         ws->info.r600_virtual_address = FALSE;
393         if (ws->info.drm_minor >= 13) {
394             ws->info.r600_virtual_address = TRUE;
395             if (!radeon_get_drm_value(ws->fd, RADEON_INFO_VA_START, NULL,
396                                       &ws->info.r600_va_start))
397                 ws->info.r600_virtual_address = FALSE;
398             if (!radeon_get_drm_value(ws->fd, RADEON_INFO_IB_VM_MAX_SIZE, NULL,
399                                       &ws->info.r600_ib_vm_max_size))
400                 ws->info.r600_virtual_address = FALSE;
401         }
402     }
403
404     /* Get max pipes, this is only needed for compute shaders.  All evergreen+
405      * chips have at least 2 pipes, so we use 2 as a default. */
406     ws->info.r600_max_pipes = 2;
407     radeon_get_drm_value(ws->fd, RADEON_INFO_MAX_PIPES, NULL,
408                          &ws->info.r600_max_pipes);
409
410     return TRUE;
411 }
412
413 static void radeon_winsys_destroy(struct radeon_winsys *rws)
414 {
415     struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
416
417     if (ws->thread) {
418         ws->kill_thread = 1;
419         pipe_semaphore_signal(&ws->cs_queued);
420         pipe_thread_wait(ws->thread);
421     }
422     pipe_semaphore_destroy(&ws->cs_queued);
423     pipe_condvar_destroy(ws->cs_queue_empty);
424
425     if (!pipe_reference(&ws->base.reference, NULL)) {
426         return;
427     }
428
429     pipe_mutex_destroy(ws->hyperz_owner_mutex);
430     pipe_mutex_destroy(ws->cmask_owner_mutex);
431     pipe_mutex_destroy(ws->cs_stack_lock);
432
433     ws->cman->destroy(ws->cman);
434     ws->kman->destroy(ws->kman);
435     if (ws->gen >= DRV_R600) {
436         radeon_surface_manager_free(ws->surf_man);
437     }
438     if (fd_tab) {
439         util_hash_table_remove(fd_tab, intptr_to_pointer(ws->fd));
440     }
441     FREE(rws);
442 }
443
444 static void radeon_query_info(struct radeon_winsys *rws,
445                               struct radeon_info *info)
446 {
447     *info = ((struct radeon_drm_winsys *)rws)->info;
448 }
449
450 static boolean radeon_cs_request_feature(struct radeon_winsys_cs *rcs,
451                                          enum radeon_feature_id fid,
452                                          boolean enable)
453 {
454     struct radeon_drm_cs *cs = radeon_drm_cs(rcs);
455
456     switch (fid) {
457     case RADEON_FID_R300_HYPERZ_ACCESS:
458         return radeon_set_fd_access(cs, &cs->ws->hyperz_owner,
459                                     &cs->ws->hyperz_owner_mutex,
460                                     RADEON_INFO_WANT_HYPERZ, "Hyper-Z",
461                                     enable);
462
463     case RADEON_FID_R300_CMASK_ACCESS:
464         return radeon_set_fd_access(cs, &cs->ws->cmask_owner,
465                                     &cs->ws->cmask_owner_mutex,
466                                     RADEON_INFO_WANT_CMASK, "AA optimizations",
467                                     enable);
468     }
469     return FALSE;
470 }
471
472 static int radeon_drm_winsys_surface_init(struct radeon_winsys *rws,
473                                           struct radeon_surface *surf)
474 {
475     struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
476
477     return radeon_surface_init(ws->surf_man, surf);
478 }
479
480 static int radeon_drm_winsys_surface_best(struct radeon_winsys *rws,
481                                           struct radeon_surface *surf)
482 {
483     struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
484
485     return radeon_surface_best(ws->surf_man, surf);
486 }
487
488 static uint64_t radeon_query_value(struct radeon_winsys *rws,
489                                    enum radeon_value_id value)
490 {
491     struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
492     uint64_t ts = 0;
493
494     switch (value) {
495     case RADEON_REQUESTED_VRAM_MEMORY:
496         return ws->allocated_vram;
497     case RADEON_REQUESTED_GTT_MEMORY:
498         return ws->allocated_gtt;
499     case RADEON_BUFFER_WAIT_TIME_NS:
500         return ws->buffer_wait_time;
501     case RADEON_TIMESTAMP:
502         if (ws->info.drm_minor < 20 || ws->gen < DRV_R600) {
503             assert(0);
504             return 0;
505         }
506
507         radeon_get_drm_value(ws->fd, RADEON_INFO_TIMESTAMP, "timestamp",
508                              (uint32_t*)&ts);
509         return ts;
510     }
511     return 0;
512 }
513
514 static unsigned hash_fd(void *key)
515 {
516     return pointer_to_intptr(key);
517 }
518
519 static int compare_fd(void *key1, void *key2)
520 {
521     return pointer_to_intptr(key1) != pointer_to_intptr(key2);
522 }
523
524 void radeon_drm_ws_queue_cs(struct radeon_drm_winsys *ws, struct radeon_drm_cs *cs)
525 {
526 retry:
527     pipe_mutex_lock(ws->cs_stack_lock);
528     if (p_atomic_read(&ws->ncs) >= RING_LAST) {
529         /* no room left for a flush */
530         pipe_mutex_unlock(ws->cs_stack_lock);
531         goto retry;
532     }
533     ws->cs_stack[p_atomic_read(&ws->ncs)] = cs;
534     p_atomic_inc(&ws->ncs);
535     pipe_mutex_unlock(ws->cs_stack_lock);
536     pipe_semaphore_signal(&ws->cs_queued);
537 }
538
539 static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param)
540 {
541     struct radeon_drm_winsys *ws = (struct radeon_drm_winsys *)param;
542     struct radeon_drm_cs *cs;
543     unsigned i, empty_stack;
544
545     while (1) {
546         pipe_semaphore_wait(&ws->cs_queued);
547         if (ws->kill_thread)
548             break;
549 next:
550         pipe_mutex_lock(ws->cs_stack_lock);
551         cs = ws->cs_stack[0];
552         pipe_mutex_unlock(ws->cs_stack_lock);
553
554         if (cs) {
555             radeon_drm_cs_emit_ioctl_oneshot(cs, cs->cst);
556
557             pipe_mutex_lock(ws->cs_stack_lock);
558             for (i = 1; i < p_atomic_read(&ws->ncs); i++) {
559                 ws->cs_stack[i - 1] = ws->cs_stack[i];
560             }
561             ws->cs_stack[p_atomic_read(&ws->ncs) - 1] = NULL;
562             empty_stack = p_atomic_dec_zero(&ws->ncs);
563             if (empty_stack) {
564                 pipe_condvar_signal(ws->cs_queue_empty);
565             }
566             pipe_mutex_unlock(ws->cs_stack_lock);
567
568             pipe_semaphore_signal(&cs->flush_completed);
569
570             if (!empty_stack) {
571                 goto next;
572             }
573         }
574     }
575     pipe_mutex_lock(ws->cs_stack_lock);
576     for (i = 0; i < p_atomic_read(&ws->ncs); i++) {
577         pipe_semaphore_signal(&ws->cs_stack[i]->flush_completed);
578         ws->cs_stack[i] = NULL;
579     }
580     p_atomic_set(&ws->ncs, 0);
581     pipe_condvar_signal(ws->cs_queue_empty);
582     pipe_mutex_unlock(ws->cs_stack_lock);
583     return NULL;
584 }
585
586 DEBUG_GET_ONCE_BOOL_OPTION(thread, "RADEON_THREAD", TRUE)
587 static PIPE_THREAD_ROUTINE(radeon_drm_cs_emit_ioctl, param);
588
589 struct radeon_winsys *radeon_drm_winsys_create(int fd)
590 {
591     struct radeon_drm_winsys *ws;
592
593     if (!fd_tab) {
594         fd_tab = util_hash_table_create(hash_fd, compare_fd);
595     }
596
597     ws = util_hash_table_get(fd_tab, intptr_to_pointer(fd));
598     if (ws) {
599         pipe_reference(NULL, &ws->base.reference);
600         return &ws->base;
601     }
602
603     ws = CALLOC_STRUCT(radeon_drm_winsys);
604     if (!ws) {
605         return NULL;
606     }
607     ws->fd = fd;
608     util_hash_table_set(fd_tab, intptr_to_pointer(fd), ws);
609
610     if (!do_winsys_init(ws))
611         goto fail;
612
613     /* Create managers. */
614     ws->kman = radeon_bomgr_create(ws);
615     if (!ws->kman)
616         goto fail;
617     ws->cman = pb_cache_manager_create(ws->kman, 1000000);
618     if (!ws->cman)
619         goto fail;
620
621     if (ws->gen >= DRV_R600) {
622         ws->surf_man = radeon_surface_manager_new(fd);
623         if (!ws->surf_man)
624             goto fail;
625     }
626
627     /* init reference */
628     pipe_reference_init(&ws->base.reference, 1);
629
630     /* Set functions. */
631     ws->base.destroy = radeon_winsys_destroy;
632     ws->base.query_info = radeon_query_info;
633     ws->base.cs_request_feature = radeon_cs_request_feature;
634     ws->base.surface_init = radeon_drm_winsys_surface_init;
635     ws->base.surface_best = radeon_drm_winsys_surface_best;
636     ws->base.query_value = radeon_query_value;
637
638     radeon_bomgr_init_functions(ws);
639     radeon_drm_cs_init_functions(ws);
640
641     pipe_mutex_init(ws->hyperz_owner_mutex);
642     pipe_mutex_init(ws->cmask_owner_mutex);
643     pipe_mutex_init(ws->cs_stack_lock);
644
645     p_atomic_set(&ws->ncs, 0);
646     pipe_semaphore_init(&ws->cs_queued, 0);
647     pipe_condvar_init(ws->cs_queue_empty);
648     if (ws->num_cpus > 1 && debug_get_option_thread())
649         ws->thread = pipe_thread_create(radeon_drm_cs_emit_ioctl, ws);
650
651     return &ws->base;
652
653 fail:
654     if (ws->cman)
655         ws->cman->destroy(ws->cman);
656     if (ws->kman)
657         ws->kman->destroy(ws->kman);
658     if (ws->surf_man)
659         radeon_surface_manager_free(ws->surf_man);
660     FREE(ws);
661     return NULL;
662 }