OSDN Git Service

7bc5fed02f671885e914167e3d8dfdcd6b687468
[android-x86/external-mesa.git] / src / gallium / winsys / amdgpu / drm / amdgpu_winsys.c
1 /*
2  * Copyright © 2009 Corbin Simpson <MostAwesomeDude@gmail.com>
3  * Copyright © 2009 Joakim Sindholt <opensource@zhasha.com>
4  * Copyright © 2011 Marek Olšák <maraeo@gmail.com>
5  * Copyright © 2015 Advanced Micro Devices, Inc.
6  * All Rights Reserved.
7  *
8  * Permission is hereby granted, free of charge, to any person obtaining
9  * a copy of this software and associated documentation files (the
10  * "Software"), to deal in the Software without restriction, including
11  * without limitation the rights to use, copy, modify, merge, publish,
12  * distribute, sub license, and/or sell copies of the Software, and to
13  * permit persons to whom the Software is furnished to do so, subject to
14  * the following conditions:
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
18  * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
19  * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
20  * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
23  * USE OR OTHER DEALINGS IN THE SOFTWARE.
24  *
25  * The above copyright notice and this permission notice (including the
26  * next paragraph) shall be included in all copies or substantial portions
27  * of the Software.
28  */
29
30 #include "amdgpu_cs.h"
31 #include "amdgpu_public.h"
32
33 #include "util/u_cpu_detect.h"
34 #include "util/u_hash_table.h"
35 #include "util/hash_table.h"
36 #include "util/xmlconfig.h"
37 #include <amdgpu_drm.h>
38 #include <xf86drm.h>
39 #include <stdio.h>
40 #include <sys/stat.h>
41 #include <fcntl.h>
42 #include "ac_llvm_util.h"
43 #include "sid.h"
44
45 #ifndef AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS
46 #define AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS    0x1E
47 #endif
48
49 static struct util_hash_table *dev_tab = NULL;
50 static simple_mtx_t dev_tab_mutex = _SIMPLE_MTX_INITIALIZER_NP;
51
52 DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
53
54 static void handle_env_var_force_family(struct amdgpu_winsys *ws)
55 {
56       const char *family = debug_get_option("SI_FORCE_FAMILY", NULL);
57       unsigned i;
58
59       if (!family)
60                return;
61
62       for (i = CHIP_TAHITI; i < CHIP_LAST; i++) {
63          if (!strcmp(family, ac_get_llvm_processor_name(i))) {
64             /* Override family and chip_class. */
65             ws->info.family = i;
66             ws->info.name = "GCN-NOOP";
67
68             if (i >= CHIP_NAVI10)
69                ws->info.chip_class = GFX10;
70             else if (i >= CHIP_VEGA10)
71                ws->info.chip_class = GFX9;
72             else if (i >= CHIP_TONGA)
73                ws->info.chip_class = GFX8;
74             else if (i >= CHIP_BONAIRE)
75                ws->info.chip_class = GFX7;
76             else
77                ws->info.chip_class = GFX6;
78
79             /* Don't submit any IBs. */
80             setenv("RADEON_NOOP", "1", 1);
81             return;
82          }
83       }
84
85       fprintf(stderr, "radeonsi: Unknown family: %s\n", family);
86       exit(1);
87 }
88
89 /* Helper function to do the ioctls needed for setup and init. */
90 static bool do_winsys_init(struct amdgpu_winsys *ws,
91                            const struct pipe_screen_config *config,
92                            int fd)
93 {
94    if (!ac_query_gpu_info(fd, ws->dev, &ws->info, &ws->amdinfo))
95       goto fail;
96
97    /* TODO: Enable this once the kernel handles it efficiently. */
98    if (ws->info.has_dedicated_vram)
99       ws->info.has_local_buffers = false;
100
101    handle_env_var_force_family(ws);
102
103    ws->addrlib = amdgpu_addr_create(&ws->info, &ws->amdinfo, &ws->info.max_alignment);
104    if (!ws->addrlib) {
105       fprintf(stderr, "amdgpu: Cannot create addrlib.\n");
106       goto fail;
107    }
108
109    ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL ||
110                   strstr(debug_get_option("AMD_DEBUG", ""), "check_vm") != NULL;
111    ws->debug_all_bos = debug_get_option_all_bos();
112    ws->reserve_vmid = strstr(debug_get_option("R600_DEBUG", ""), "reserve_vmid") != NULL ||
113                       strstr(debug_get_option("AMD_DEBUG", ""), "reserve_vmid") != NULL;
114    ws->zero_all_vram_allocs = strstr(debug_get_option("R600_DEBUG", ""), "zerovram") != NULL ||
115                               strstr(debug_get_option("AMD_DEBUG", ""), "zerovram") != NULL ||
116                               driQueryOptionb(config->options, "radeonsi_zerovram");
117
118    return true;
119
120 fail:
121    amdgpu_device_deinitialize(ws->dev);
122    ws->dev = NULL;
123    return false;
124 }
125
126 static void do_winsys_deinit(struct amdgpu_winsys *ws)
127 {
128    if (ws->reserve_vmid)
129       amdgpu_vm_unreserve_vmid(ws->dev, 0);
130
131    if (util_queue_is_initialized(&ws->cs_queue))
132       util_queue_destroy(&ws->cs_queue);
133
134    simple_mtx_destroy(&ws->bo_fence_lock);
135    for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
136       if (ws->bo_slabs[i].groups)
137          pb_slabs_deinit(&ws->bo_slabs[i]);
138    }
139    pb_cache_deinit(&ws->bo_cache);
140    util_hash_table_destroy(ws->bo_export_table);
141    simple_mtx_destroy(&ws->sws_list_lock);
142    simple_mtx_destroy(&ws->global_bo_list_lock);
143    simple_mtx_destroy(&ws->bo_export_table_lock);
144
145    AddrDestroy(ws->addrlib);
146    amdgpu_device_deinitialize(ws->dev);
147    FREE(ws);
148 }
149
150 static void amdgpu_winsys_destroy(struct radeon_winsys *rws)
151 {
152    struct amdgpu_screen_winsys *sws = amdgpu_screen_winsys(rws);
153    struct amdgpu_winsys *ws = sws->aws;
154    bool destroy;
155
156    /* When the reference counter drops to zero, remove the device pointer
157     * from the table.
158     * This must happen while the mutex is locked, so that
159     * amdgpu_winsys_create in another thread doesn't get the winsys
160     * from the table when the counter drops to 0.
161     */
162    simple_mtx_lock(&dev_tab_mutex);
163
164    destroy = pipe_reference(&ws->reference, NULL);
165    if (destroy && dev_tab) {
166       util_hash_table_remove(dev_tab, ws->dev);
167       if (util_hash_table_count(dev_tab) == 0) {
168          util_hash_table_destroy(dev_tab);
169          dev_tab = NULL;
170       }
171    }
172
173    simple_mtx_unlock(&dev_tab_mutex);
174
175    if (destroy) {
176       do_winsys_deinit(ws);
177    } else {
178       struct amdgpu_screen_winsys **sws_iter;
179
180       /* Remove this amdgpu_screen_winsys from amdgpu_winsys' list */
181       simple_mtx_lock(&ws->sws_list_lock);
182       for (sws_iter = &ws->sws_list; *sws_iter; sws_iter = &(*sws_iter)->next) {
183          if (*sws_iter == sws) {
184             *sws_iter = sws->next;
185             break;
186          }
187       }
188       simple_mtx_unlock(&ws->sws_list_lock);
189    }
190
191    _mesa_hash_table_destroy(sws->kms_handles, NULL);
192    close(sws->fd);
193    FREE(rws);
194 }
195
196 static void amdgpu_winsys_query_info(struct radeon_winsys *rws,
197                                      struct radeon_info *info)
198 {
199    *info = amdgpu_winsys(rws)->info;
200 }
201
202 static bool amdgpu_cs_request_feature(struct radeon_cmdbuf *rcs,
203                                       enum radeon_feature_id fid,
204                                       bool enable)
205 {
206    return false;
207 }
208
209 static uint64_t amdgpu_query_value(struct radeon_winsys *rws,
210                                    enum radeon_value_id value)
211 {
212    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
213    struct amdgpu_heap_info heap;
214    uint64_t retval = 0;
215
216    switch (value) {
217    case RADEON_REQUESTED_VRAM_MEMORY:
218       return ws->allocated_vram;
219    case RADEON_REQUESTED_GTT_MEMORY:
220       return ws->allocated_gtt;
221    case RADEON_MAPPED_VRAM:
222       return ws->mapped_vram;
223    case RADEON_MAPPED_GTT:
224       return ws->mapped_gtt;
225    case RADEON_BUFFER_WAIT_TIME_NS:
226       return ws->buffer_wait_time;
227    case RADEON_NUM_MAPPED_BUFFERS:
228       return ws->num_mapped_buffers;
229    case RADEON_TIMESTAMP:
230       amdgpu_query_info(ws->dev, AMDGPU_INFO_TIMESTAMP, 8, &retval);
231       return retval;
232    case RADEON_NUM_GFX_IBS:
233       return ws->num_gfx_IBs;
234    case RADEON_NUM_SDMA_IBS:
235       return ws->num_sdma_IBs;
236    case RADEON_GFX_BO_LIST_COUNTER:
237       return ws->gfx_bo_list_counter;
238    case RADEON_GFX_IB_SIZE_COUNTER:
239       return ws->gfx_ib_size_counter;
240    case RADEON_NUM_BYTES_MOVED:
241       amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_BYTES_MOVED, 8, &retval);
242       return retval;
243    case RADEON_NUM_EVICTIONS:
244       amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_EVICTIONS, 8, &retval);
245       return retval;
246    case RADEON_NUM_VRAM_CPU_PAGE_FAULTS:
247       amdgpu_query_info(ws->dev, AMDGPU_INFO_NUM_VRAM_CPU_PAGE_FAULTS, 8, &retval);
248       return retval;
249    case RADEON_VRAM_USAGE:
250       amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM, 0, &heap);
251       return heap.heap_usage;
252    case RADEON_VRAM_VIS_USAGE:
253       amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_VRAM,
254                              AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED, &heap);
255       return heap.heap_usage;
256    case RADEON_GTT_USAGE:
257       amdgpu_query_heap_info(ws->dev, AMDGPU_GEM_DOMAIN_GTT, 0, &heap);
258       return heap.heap_usage;
259    case RADEON_GPU_TEMPERATURE:
260       amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GPU_TEMP, 4, &retval);
261       return retval;
262    case RADEON_CURRENT_SCLK:
263       amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_SCLK, 4, &retval);
264       return retval;
265    case RADEON_CURRENT_MCLK:
266       amdgpu_query_sensor_info(ws->dev, AMDGPU_INFO_SENSOR_GFX_MCLK, 4, &retval);
267       return retval;
268    case RADEON_CS_THREAD_TIME:
269       return util_queue_get_thread_time_nano(&ws->cs_queue, 0);
270    }
271    return 0;
272 }
273
274 static bool amdgpu_read_registers(struct radeon_winsys *rws,
275                                   unsigned reg_offset,
276                                   unsigned num_registers, uint32_t *out)
277 {
278    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
279
280    return amdgpu_read_mm_registers(ws->dev, reg_offset / 4, num_registers,
281                                    0xffffffff, 0, out) == 0;
282 }
283
284 static unsigned hash_pointer(void *key)
285 {
286    return _mesa_hash_pointer(key);
287 }
288
289 static int compare_pointers(void *key1, void *key2)
290 {
291    return key1 != key2;
292 }
293
294 static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
295 {
296    /* radeon_winsys corresponds to amdgpu_screen_winsys, which is never
297     * referenced multiple times, so amdgpu_winsys_destroy always needs to be
298     * called. It handles reference counting for amdgpu_winsys.
299     */
300    return true;
301 }
302
303 static void amdgpu_pin_threads_to_L3_cache(struct radeon_winsys *rws,
304                                            unsigned cache)
305 {
306    struct amdgpu_winsys *ws = amdgpu_winsys(rws);
307
308    util_pin_thread_to_L3(ws->cs_queue.threads[0], cache,
309                          util_cpu_caps.cores_per_L3);
310 }
311
312 static uint32_t kms_handle_hash(const void *key)
313 {
314    const struct amdgpu_winsys_bo *bo = key;
315
316    return bo->u.real.kms_handle;
317 }
318
319 static bool kms_handle_equals(const void *a, const void *b)
320 {
321    return a == b;
322 }
323
324 PUBLIC struct radeon_winsys *
325 amdgpu_winsys_create(int fd, const struct pipe_screen_config *config,
326                      radeon_screen_create_t screen_create)
327 {
328    struct amdgpu_screen_winsys *ws;
329    struct amdgpu_winsys *aws;
330    amdgpu_device_handle dev;
331    uint32_t drm_major, drm_minor, r;
332
333    ws = CALLOC_STRUCT(amdgpu_screen_winsys);
334    if (!ws)
335       return NULL;
336
337    ws->fd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
338
339    ws->kms_handles = _mesa_hash_table_create(NULL, kms_handle_hash,
340                                              kms_handle_equals);
341    if (!ws->kms_handles)
342       goto fail;
343
344    /* Look up the winsys from the dev table. */
345    simple_mtx_lock(&dev_tab_mutex);
346    if (!dev_tab)
347       dev_tab = util_hash_table_create(hash_pointer, compare_pointers);
348
349    /* Initialize the amdgpu device. This should always return the same pointer
350     * for the same fd. */
351    r = amdgpu_device_initialize(fd, &drm_major, &drm_minor, &dev);
352    if (r) {
353       fprintf(stderr, "amdgpu: amdgpu_device_initialize failed.\n");
354       goto fail;
355    }
356
357    /* Lookup a winsys if we have already created one for this device. */
358    aws = util_hash_table_get(dev_tab, dev);
359    if (aws) {
360       pipe_reference(NULL, &aws->reference);
361
362       /* Release the device handle, because we don't need it anymore.
363        * This function is returning an existing winsys instance, which
364        * has its own device handle.
365        */
366       amdgpu_device_deinitialize(dev);
367    } else {
368       /* Create a new winsys. */
369       aws = CALLOC_STRUCT(amdgpu_winsys);
370       if (!aws)
371          goto fail;
372
373       aws->dev = dev;
374       aws->info.drm_major = drm_major;
375       aws->info.drm_minor = drm_minor;
376
377       if (!do_winsys_init(aws, config, fd))
378          goto fail_alloc;
379
380       /* Create managers. */
381       pb_cache_init(&aws->bo_cache, RADEON_MAX_CACHED_HEAPS,
382                     500000, aws->check_vm ? 1.0f : 2.0f, 0,
383                     (aws->info.vram_size + aws->info.gart_size) / 8,
384                     amdgpu_bo_destroy, amdgpu_bo_can_reclaim);
385
386       unsigned min_slab_order = 9;  /* 512 bytes */
387       unsigned max_slab_order = 18; /* 256 KB - higher numbers increase memory usage */
388       unsigned num_slab_orders_per_allocator = (max_slab_order - min_slab_order) /
389                                                NUM_SLAB_ALLOCATORS;
390
391       /* Divide the size order range among slab managers. */
392       for (unsigned i = 0; i < NUM_SLAB_ALLOCATORS; i++) {
393          unsigned min_order = min_slab_order;
394          unsigned max_order = MIN2(min_order + num_slab_orders_per_allocator,
395                                    max_slab_order);
396
397          if (!pb_slabs_init(&aws->bo_slabs[i],
398                             min_order, max_order,
399                             RADEON_MAX_SLAB_HEAPS,
400                             aws,
401                             amdgpu_bo_can_reclaim_slab,
402                             amdgpu_bo_slab_alloc,
403                             amdgpu_bo_slab_free)) {
404             amdgpu_winsys_destroy(&ws->base);
405             simple_mtx_unlock(&dev_tab_mutex);
406             return NULL;
407          }
408
409          min_slab_order = max_order + 1;
410       }
411
412       aws->info.min_alloc_size = 1 << aws->bo_slabs[0].min_order;
413
414       /* init reference */
415       pipe_reference_init(&aws->reference, 1);
416
417       list_inithead(&aws->global_bo_list);
418       aws->bo_export_table = util_hash_table_create(hash_pointer, compare_pointers);
419
420       (void) simple_mtx_init(&aws->sws_list_lock, mtx_plain);
421       (void) simple_mtx_init(&aws->global_bo_list_lock, mtx_plain);
422       (void) simple_mtx_init(&aws->bo_fence_lock, mtx_plain);
423       (void) simple_mtx_init(&aws->bo_export_table_lock, mtx_plain);
424
425       if (!util_queue_init(&aws->cs_queue, "cs", 8, 1,
426                            UTIL_QUEUE_INIT_RESIZE_IF_FULL)) {
427          amdgpu_winsys_destroy(&ws->base);
428          simple_mtx_unlock(&dev_tab_mutex);
429          return NULL;
430       }
431
432       util_hash_table_set(dev_tab, dev, aws);
433
434       if (aws->reserve_vmid) {
435          r = amdgpu_vm_reserve_vmid(dev, 0);
436          if (r) {
437             amdgpu_winsys_destroy(&ws->base);
438             simple_mtx_unlock(&dev_tab_mutex);
439             return NULL;
440          }
441       }
442    }
443
444    ws->aws = aws;
445
446    /* Set functions. */
447    ws->base.unref = amdgpu_winsys_unref;
448    ws->base.destroy = amdgpu_winsys_destroy;
449    ws->base.query_info = amdgpu_winsys_query_info;
450    ws->base.cs_request_feature = amdgpu_cs_request_feature;
451    ws->base.query_value = amdgpu_query_value;
452    ws->base.read_registers = amdgpu_read_registers;
453    ws->base.pin_threads_to_L3_cache = amdgpu_pin_threads_to_L3_cache;
454
455    amdgpu_bo_init_functions(ws);
456    amdgpu_cs_init_functions(ws);
457    amdgpu_surface_init_functions(ws);
458
459    /* Create the screen at the end. The winsys must be initialized
460     * completely.
461     *
462     * Alternatively, we could create the screen based on "ws->gen"
463     * and link all drivers into one binary blob. */
464    ws->base.screen = screen_create(&ws->base, config);
465    if (!ws->base.screen) {
466       amdgpu_winsys_destroy(&ws->base);
467       simple_mtx_unlock(&dev_tab_mutex);
468       return NULL;
469    }
470
471    simple_mtx_lock(&aws->sws_list_lock);
472    ws->next = aws->sws_list;
473    aws->sws_list = ws;
474    simple_mtx_unlock(&aws->sws_list_lock);
475
476    /* We must unlock the mutex once the winsys is fully initialized, so that
477     * other threads attempting to create the winsys from the same fd will
478     * get a fully initialized winsys and not just half-way initialized. */
479    simple_mtx_unlock(&dev_tab_mutex);
480
481    return &ws->base;
482
483 fail_alloc:
484    FREE(aws);
485 fail:
486    if (ws->kms_handles)
487       _mesa_hash_table_destroy(ws->kms_handles, NULL);
488    close(ws->fd);
489    FREE(ws);
490    simple_mtx_unlock(&dev_tab_mutex);
491    return NULL;
492 }