2 * Copyright © 2016 Red Hat.
3 * Copyright © 2016 Bas Nieuwenhuizen
5 * based in part on anv driver which is:
6 * Copyright © 2015 Intel Corporation
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
23 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
24 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
32 #include "radv_debug.h"
33 #include "radv_private.h"
34 #include "radv_shader.h"
36 #include "util/disk_cache.h"
37 #include "util/strtod.h"
41 #include <amdgpu_drm.h>
42 #include "winsys/amdgpu/radv_amdgpu_winsys_public.h"
43 #include "ac_llvm_util.h"
44 #include "vk_format.h"
48 #include "addrlib/gfx9/chip/gfx9_enum.h"
49 #include "util/build_id.h"
50 #include "util/debug.h"
51 #include "util/mesa-sha1.h"
54 radv_device_get_cache_uuid(enum radeon_family family, void *uuid)
57 unsigned char sha1[20];
58 unsigned ptr_size = sizeof(void*);
60 memset(uuid, 0, VK_UUID_SIZE);
61 _mesa_sha1_init(&ctx);
63 if (!disk_cache_get_function_identifier(radv_device_get_cache_uuid, &ctx) ||
64 !disk_cache_get_function_identifier(LLVMInitializeAMDGPUTargetInfo, &ctx))
67 _mesa_sha1_update(&ctx, &family, sizeof(family));
68 _mesa_sha1_update(&ctx, &ptr_size, sizeof(ptr_size));
69 _mesa_sha1_final(&ctx, sha1);
71 memcpy(uuid, sha1, VK_UUID_SIZE);
76 radv_get_driver_uuid(void *uuid)
78 ac_compute_driver_uuid(uuid, VK_UUID_SIZE);
82 radv_get_device_uuid(struct radeon_info *info, void *uuid)
84 ac_compute_device_uuid(info, uuid, VK_UUID_SIZE);
88 radv_get_device_name(enum radeon_family family, char *name, size_t name_len)
90 const char *chip_string;
91 char llvm_string[32] = {};
94 case CHIP_TAHITI: chip_string = "AMD RADV TAHITI"; break;
95 case CHIP_PITCAIRN: chip_string = "AMD RADV PITCAIRN"; break;
96 case CHIP_VERDE: chip_string = "AMD RADV CAPE VERDE"; break;
97 case CHIP_OLAND: chip_string = "AMD RADV OLAND"; break;
98 case CHIP_HAINAN: chip_string = "AMD RADV HAINAN"; break;
99 case CHIP_BONAIRE: chip_string = "AMD RADV BONAIRE"; break;
100 case CHIP_KAVERI: chip_string = "AMD RADV KAVERI"; break;
101 case CHIP_KABINI: chip_string = "AMD RADV KABINI"; break;
102 case CHIP_HAWAII: chip_string = "AMD RADV HAWAII"; break;
103 case CHIP_MULLINS: chip_string = "AMD RADV MULLINS"; break;
104 case CHIP_TONGA: chip_string = "AMD RADV TONGA"; break;
105 case CHIP_ICELAND: chip_string = "AMD RADV ICELAND"; break;
106 case CHIP_CARRIZO: chip_string = "AMD RADV CARRIZO"; break;
107 case CHIP_FIJI: chip_string = "AMD RADV FIJI"; break;
108 case CHIP_POLARIS10: chip_string = "AMD RADV POLARIS10"; break;
109 case CHIP_POLARIS11: chip_string = "AMD RADV POLARIS11"; break;
110 case CHIP_POLARIS12: chip_string = "AMD RADV POLARIS12"; break;
111 case CHIP_STONEY: chip_string = "AMD RADV STONEY"; break;
112 case CHIP_VEGAM: chip_string = "AMD RADV VEGA M"; break;
113 case CHIP_VEGA10: chip_string = "AMD RADV VEGA10"; break;
114 case CHIP_VEGA12: chip_string = "AMD RADV VEGA12"; break;
115 case CHIP_RAVEN: chip_string = "AMD RADV RAVEN"; break;
116 case CHIP_RAVEN2: chip_string = "AMD RADV RAVEN2"; break;
117 default: chip_string = "AMD RADV unknown"; break;
120 snprintf(llvm_string, sizeof(llvm_string),
121 " (LLVM %i.%i.%i)", (HAVE_LLVM >> 8) & 0xff,
122 HAVE_LLVM & 0xff, MESA_LLVM_VERSION_PATCH);
123 snprintf(name, name_len, "%s%s", chip_string, llvm_string);
127 radv_physical_device_init_mem_types(struct radv_physical_device *device)
129 STATIC_ASSERT(RADV_MEM_HEAP_COUNT <= VK_MAX_MEMORY_HEAPS);
130 uint64_t visible_vram_size = MIN2(device->rad_info.vram_size,
131 device->rad_info.vram_vis_size);
133 int vram_index = -1, visible_vram_index = -1, gart_index = -1;
134 device->memory_properties.memoryHeapCount = 0;
135 if (device->rad_info.vram_size - visible_vram_size > 0) {
136 vram_index = device->memory_properties.memoryHeapCount++;
137 device->memory_properties.memoryHeaps[vram_index] = (VkMemoryHeap) {
138 .size = device->rad_info.vram_size - visible_vram_size,
139 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
142 if (visible_vram_size) {
143 visible_vram_index = device->memory_properties.memoryHeapCount++;
144 device->memory_properties.memoryHeaps[visible_vram_index] = (VkMemoryHeap) {
145 .size = visible_vram_size,
146 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
149 if (device->rad_info.gart_size > 0) {
150 gart_index = device->memory_properties.memoryHeapCount++;
151 device->memory_properties.memoryHeaps[gart_index] = (VkMemoryHeap) {
152 .size = device->rad_info.gart_size,
153 .flags = device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
157 STATIC_ASSERT(RADV_MEM_TYPE_COUNT <= VK_MAX_MEMORY_TYPES);
158 unsigned type_count = 0;
159 if (vram_index >= 0) {
160 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM;
161 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
162 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT,
163 .heapIndex = vram_index,
166 if (gart_index >= 0) {
167 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_WRITE_COMBINE;
168 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
169 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
170 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
171 (device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT),
172 .heapIndex = gart_index,
175 if (visible_vram_index >= 0) {
176 device->mem_type_indices[type_count] = RADV_MEM_TYPE_VRAM_CPU_ACCESS;
177 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
178 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
179 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
180 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
181 .heapIndex = visible_vram_index,
184 if (gart_index >= 0) {
185 device->mem_type_indices[type_count] = RADV_MEM_TYPE_GTT_CACHED;
186 device->memory_properties.memoryTypes[type_count++] = (VkMemoryType) {
187 .propertyFlags = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
188 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
189 VK_MEMORY_PROPERTY_HOST_CACHED_BIT |
190 (device->rad_info.has_dedicated_vram ? 0 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT),
191 .heapIndex = gart_index,
194 device->memory_properties.memoryTypeCount = type_count;
198 radv_handle_env_var_force_family(struct radv_physical_device *device)
200 const char *family = getenv("RADV_FORCE_FAMILY");
206 for (i = CHIP_TAHITI; i < CHIP_LAST; i++) {
207 if (!strcmp(family, ac_get_llvm_processor_name(i))) {
208 /* Override family and chip_class. */
209 device->rad_info.family = i;
211 if (i >= CHIP_VEGA10)
212 device->rad_info.chip_class = GFX9;
213 else if (i >= CHIP_TONGA)
214 device->rad_info.chip_class = VI;
215 else if (i >= CHIP_BONAIRE)
216 device->rad_info.chip_class = CIK;
218 device->rad_info.chip_class = SI;
224 fprintf(stderr, "radv: Unknown family: %s\n", family);
229 radv_physical_device_init(struct radv_physical_device *device,
230 struct radv_instance *instance,
231 drmDevicePtr drm_device)
233 const char *path = drm_device->nodes[DRM_NODE_RENDER];
235 drmVersionPtr version;
239 fd = open(path, O_RDWR | O_CLOEXEC);
241 if (instance->debug_flags & RADV_DEBUG_STARTUP)
242 radv_logi("Could not open device '%s'", path);
244 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
247 version = drmGetVersion(fd);
251 if (instance->debug_flags & RADV_DEBUG_STARTUP)
252 radv_logi("Could not get the kernel driver version for device '%s'", path);
254 return vk_errorf(instance, VK_ERROR_INCOMPATIBLE_DRIVER,
255 "failed to get version %s: %m", path);
258 if (strcmp(version->name, "amdgpu")) {
259 drmFreeVersion(version);
262 if (instance->debug_flags & RADV_DEBUG_STARTUP)
263 radv_logi("Device '%s' is not using the amdgpu kernel driver.", path);
265 return VK_ERROR_INCOMPATIBLE_DRIVER;
267 drmFreeVersion(version);
269 if (instance->debug_flags & RADV_DEBUG_STARTUP)
270 radv_logi("Found compatible device '%s'.", path);
272 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
273 device->instance = instance;
274 assert(strlen(path) < ARRAY_SIZE(device->path));
275 strncpy(device->path, path, ARRAY_SIZE(device->path));
277 device->ws = radv_amdgpu_winsys_create(fd, instance->debug_flags,
278 instance->perftest_flags);
280 result = vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
284 if (instance->enabled_extensions.KHR_display) {
285 master_fd = open(drm_device->nodes[DRM_NODE_PRIMARY], O_RDWR | O_CLOEXEC);
286 if (master_fd >= 0) {
287 uint32_t accel_working = 0;
288 struct drm_amdgpu_info request = {
289 .return_pointer = (uintptr_t)&accel_working,
290 .return_size = sizeof(accel_working),
291 .query = AMDGPU_INFO_ACCEL_WORKING
294 if (drmCommandWrite(master_fd, DRM_AMDGPU_INFO, &request, sizeof (struct drm_amdgpu_info)) < 0 || !accel_working) {
301 device->master_fd = master_fd;
302 device->local_fd = fd;
303 device->ws->query_info(device->ws, &device->rad_info);
305 radv_handle_env_var_force_family(device);
307 radv_get_device_name(device->rad_info.family, device->name, sizeof(device->name));
309 if (radv_device_get_cache_uuid(device->rad_info.family, device->cache_uuid)) {
310 device->ws->destroy(device->ws);
311 result = vk_errorf(instance, VK_ERROR_INITIALIZATION_FAILED,
312 "cannot generate UUID");
316 /* These flags affect shader compilation. */
317 uint64_t shader_env_flags =
318 (device->instance->perftest_flags & RADV_PERFTEST_SISCHED ? 0x1 : 0) |
319 (device->instance->debug_flags & RADV_DEBUG_UNSAFE_MATH ? 0x2 : 0);
321 /* The gpu id is already embedded in the uuid so we just pass "radv"
322 * when creating the cache.
324 char buf[VK_UUID_SIZE * 2 + 1];
325 disk_cache_format_hex_id(buf, device->cache_uuid, VK_UUID_SIZE * 2);
326 device->disk_cache = disk_cache_create(device->name, buf, shader_env_flags);
328 if (device->rad_info.chip_class < VI ||
329 device->rad_info.chip_class > GFX9)
330 fprintf(stderr, "WARNING: radv is not a conformant vulkan implementation, testing use only.\n");
332 radv_get_driver_uuid(&device->device_uuid);
333 radv_get_device_uuid(&device->rad_info, &device->device_uuid);
335 if (device->rad_info.family == CHIP_STONEY ||
336 device->rad_info.chip_class >= GFX9) {
337 device->has_rbplus = true;
338 device->rbplus_allowed = device->rad_info.family == CHIP_STONEY ||
339 device->rad_info.family == CHIP_VEGA12 ||
340 device->rad_info.family == CHIP_RAVEN ||
341 device->rad_info.family == CHIP_RAVEN2;
344 /* The mere presence of CLEAR_STATE in the IB causes random GPU hangs
347 device->has_clear_state = device->rad_info.chip_class >= CIK;
349 device->cpdma_prefetch_writes_memory = device->rad_info.chip_class <= VI;
351 /* Vega10/Raven need a special workaround for a hardware bug. */
352 device->has_scissor_bug = device->rad_info.family == CHIP_VEGA10 ||
353 device->rad_info.family == CHIP_RAVEN;
355 /* Out-of-order primitive rasterization. */
356 device->has_out_of_order_rast = device->rad_info.chip_class >= VI &&
357 device->rad_info.max_se >= 2;
358 device->out_of_order_rast_allowed = device->has_out_of_order_rast &&
359 !(device->instance->debug_flags & RADV_DEBUG_NO_OUT_OF_ORDER);
361 device->dcc_msaa_allowed =
362 (device->instance->perftest_flags & RADV_PERFTEST_DCC_MSAA);
364 radv_physical_device_init_mem_types(device);
365 radv_fill_device_extension_table(device, &device->supported_extensions);
367 device->bus_info = *drm_device->businfo.pci;
369 if ((device->instance->debug_flags & RADV_DEBUG_INFO))
370 ac_print_gpu_info(&device->rad_info);
372 /* The WSI is structured as a layer on top of the driver, so this has
373 * to be the last part of initialization (at least until we get other
376 result = radv_init_wsi(device);
377 if (result != VK_SUCCESS) {
378 device->ws->destroy(device->ws);
379 vk_error(instance, result);
393 radv_physical_device_finish(struct radv_physical_device *device)
395 radv_finish_wsi(device);
396 device->ws->destroy(device->ws);
397 disk_cache_destroy(device->disk_cache);
398 close(device->local_fd);
399 if (device->master_fd != -1)
400 close(device->master_fd);
404 default_alloc_func(void *pUserData, size_t size, size_t align,
405 VkSystemAllocationScope allocationScope)
411 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
412 size_t align, VkSystemAllocationScope allocationScope)
414 return realloc(pOriginal, size);
418 default_free_func(void *pUserData, void *pMemory)
423 static const VkAllocationCallbacks default_alloc = {
425 .pfnAllocation = default_alloc_func,
426 .pfnReallocation = default_realloc_func,
427 .pfnFree = default_free_func,
430 static const struct debug_control radv_debug_options[] = {
431 {"nofastclears", RADV_DEBUG_NO_FAST_CLEARS},
432 {"nodcc", RADV_DEBUG_NO_DCC},
433 {"shaders", RADV_DEBUG_DUMP_SHADERS},
434 {"nocache", RADV_DEBUG_NO_CACHE},
435 {"shaderstats", RADV_DEBUG_DUMP_SHADER_STATS},
436 {"nohiz", RADV_DEBUG_NO_HIZ},
437 {"nocompute", RADV_DEBUG_NO_COMPUTE_QUEUE},
438 {"unsafemath", RADV_DEBUG_UNSAFE_MATH},
439 {"allbos", RADV_DEBUG_ALL_BOS},
440 {"noibs", RADV_DEBUG_NO_IBS},
441 {"spirv", RADV_DEBUG_DUMP_SPIRV},
442 {"vmfaults", RADV_DEBUG_VM_FAULTS},
443 {"zerovram", RADV_DEBUG_ZERO_VRAM},
444 {"syncshaders", RADV_DEBUG_SYNC_SHADERS},
445 {"nosisched", RADV_DEBUG_NO_SISCHED},
446 {"preoptir", RADV_DEBUG_PREOPTIR},
447 {"nodynamicbounds", RADV_DEBUG_NO_DYNAMIC_BOUNDS},
448 {"nooutoforder", RADV_DEBUG_NO_OUT_OF_ORDER},
449 {"info", RADV_DEBUG_INFO},
450 {"errors", RADV_DEBUG_ERRORS},
451 {"startup", RADV_DEBUG_STARTUP},
452 {"checkir", RADV_DEBUG_CHECKIR},
453 {"nothreadllvm", RADV_DEBUG_NOTHREADLLVM},
458 radv_get_debug_option_name(int id)
460 assert(id < ARRAY_SIZE(radv_debug_options) - 1);
461 return radv_debug_options[id].string;
464 static const struct debug_control radv_perftest_options[] = {
465 {"nobatchchain", RADV_PERFTEST_NO_BATCHCHAIN},
466 {"sisched", RADV_PERFTEST_SISCHED},
467 {"localbos", RADV_PERFTEST_LOCAL_BOS},
468 {"binning", RADV_PERFTEST_BINNING},
469 {"dccmsaa", RADV_PERFTEST_DCC_MSAA},
474 radv_get_perftest_option_name(int id)
476 assert(id < ARRAY_SIZE(radv_perftest_options) - 1);
477 return radv_perftest_options[id].string;
481 radv_handle_per_app_options(struct radv_instance *instance,
482 const VkApplicationInfo *info)
484 const char *name = info ? info->pApplicationName : NULL;
489 if (!strcmp(name, "Talos - Linux - 32bit") ||
490 !strcmp(name, "Talos - Linux - 64bit")) {
491 if (!(instance->debug_flags & RADV_DEBUG_NO_SISCHED)) {
492 /* Force enable LLVM sisched for Talos because it looks
493 * safe and it gives few more FPS.
495 instance->perftest_flags |= RADV_PERFTEST_SISCHED;
497 } else if (!strcmp(name, "DOOM_VFR")) {
498 /* Work around a Doom VFR game bug */
499 instance->debug_flags |= RADV_DEBUG_NO_DYNAMIC_BOUNDS;
503 static int radv_get_instance_extension_index(const char *name)
505 for (unsigned i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; ++i) {
506 if (strcmp(name, radv_instance_extensions[i].extensionName) == 0)
513 VkResult radv_CreateInstance(
514 const VkInstanceCreateInfo* pCreateInfo,
515 const VkAllocationCallbacks* pAllocator,
516 VkInstance* pInstance)
518 struct radv_instance *instance;
521 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
523 uint32_t client_version;
524 if (pCreateInfo->pApplicationInfo &&
525 pCreateInfo->pApplicationInfo->apiVersion != 0) {
526 client_version = pCreateInfo->pApplicationInfo->apiVersion;
528 client_version = VK_API_VERSION_1_0;
531 instance = vk_zalloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
532 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
534 return vk_error(NULL, VK_ERROR_OUT_OF_HOST_MEMORY);
536 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
539 instance->alloc = *pAllocator;
541 instance->alloc = default_alloc;
543 instance->apiVersion = client_version;
544 instance->physicalDeviceCount = -1;
546 instance->debug_flags = parse_debug_string(getenv("RADV_DEBUG"),
549 instance->perftest_flags = parse_debug_string(getenv("RADV_PERFTEST"),
550 radv_perftest_options);
553 if (instance->debug_flags & RADV_DEBUG_STARTUP)
554 radv_logi("Created an instance");
556 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
557 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
558 int index = radv_get_instance_extension_index(ext_name);
560 if (index < 0 || !radv_supported_instance_extensions.extensions[index]) {
561 vk_free2(&default_alloc, pAllocator, instance);
562 return vk_error(instance, VK_ERROR_EXTENSION_NOT_PRESENT);
565 instance->enabled_extensions.extensions[index] = true;
568 result = vk_debug_report_instance_init(&instance->debug_report_callbacks);
569 if (result != VK_SUCCESS) {
570 vk_free2(&default_alloc, pAllocator, instance);
571 return vk_error(instance, result);
576 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
578 radv_handle_per_app_options(instance, pCreateInfo->pApplicationInfo);
580 *pInstance = radv_instance_to_handle(instance);
585 void radv_DestroyInstance(
586 VkInstance _instance,
587 const VkAllocationCallbacks* pAllocator)
589 RADV_FROM_HANDLE(radv_instance, instance, _instance);
594 for (int i = 0; i < instance->physicalDeviceCount; ++i) {
595 radv_physical_device_finish(instance->physicalDevices + i);
598 VG(VALGRIND_DESTROY_MEMPOOL(instance));
602 vk_debug_report_instance_destroy(&instance->debug_report_callbacks);
604 vk_free(&instance->alloc, instance);
608 radv_enumerate_devices(struct radv_instance *instance)
610 /* TODO: Check for more devices ? */
611 drmDevicePtr devices[8];
612 VkResult result = VK_ERROR_INCOMPATIBLE_DRIVER;
615 instance->physicalDeviceCount = 0;
617 max_devices = drmGetDevices2(0, devices, ARRAY_SIZE(devices));
619 if (instance->debug_flags & RADV_DEBUG_STARTUP)
620 radv_logi("Found %d drm nodes", max_devices);
623 return vk_error(instance, VK_ERROR_INCOMPATIBLE_DRIVER);
625 for (unsigned i = 0; i < (unsigned)max_devices; i++) {
626 if (devices[i]->available_nodes & 1 << DRM_NODE_RENDER &&
627 devices[i]->bustype == DRM_BUS_PCI &&
628 devices[i]->deviceinfo.pci->vendor_id == ATI_VENDOR_ID) {
630 result = radv_physical_device_init(instance->physicalDevices +
631 instance->physicalDeviceCount,
634 if (result == VK_SUCCESS)
635 ++instance->physicalDeviceCount;
636 else if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
640 drmFreeDevices(devices, max_devices);
645 VkResult radv_EnumeratePhysicalDevices(
646 VkInstance _instance,
647 uint32_t* pPhysicalDeviceCount,
648 VkPhysicalDevice* pPhysicalDevices)
650 RADV_FROM_HANDLE(radv_instance, instance, _instance);
653 if (instance->physicalDeviceCount < 0) {
654 result = radv_enumerate_devices(instance);
655 if (result != VK_SUCCESS &&
656 result != VK_ERROR_INCOMPATIBLE_DRIVER)
660 if (!pPhysicalDevices) {
661 *pPhysicalDeviceCount = instance->physicalDeviceCount;
663 *pPhysicalDeviceCount = MIN2(*pPhysicalDeviceCount, instance->physicalDeviceCount);
664 for (unsigned i = 0; i < *pPhysicalDeviceCount; ++i)
665 pPhysicalDevices[i] = radv_physical_device_to_handle(instance->physicalDevices + i);
668 return *pPhysicalDeviceCount < instance->physicalDeviceCount ? VK_INCOMPLETE
672 VkResult radv_EnumeratePhysicalDeviceGroups(
673 VkInstance _instance,
674 uint32_t* pPhysicalDeviceGroupCount,
675 VkPhysicalDeviceGroupProperties* pPhysicalDeviceGroupProperties)
677 RADV_FROM_HANDLE(radv_instance, instance, _instance);
680 if (instance->physicalDeviceCount < 0) {
681 result = radv_enumerate_devices(instance);
682 if (result != VK_SUCCESS &&
683 result != VK_ERROR_INCOMPATIBLE_DRIVER)
687 if (!pPhysicalDeviceGroupProperties) {
688 *pPhysicalDeviceGroupCount = instance->physicalDeviceCount;
690 *pPhysicalDeviceGroupCount = MIN2(*pPhysicalDeviceGroupCount, instance->physicalDeviceCount);
691 for (unsigned i = 0; i < *pPhysicalDeviceGroupCount; ++i) {
692 pPhysicalDeviceGroupProperties[i].physicalDeviceCount = 1;
693 pPhysicalDeviceGroupProperties[i].physicalDevices[0] = radv_physical_device_to_handle(instance->physicalDevices + i);
694 pPhysicalDeviceGroupProperties[i].subsetAllocation = false;
697 return *pPhysicalDeviceGroupCount < instance->physicalDeviceCount ? VK_INCOMPLETE
701 void radv_GetPhysicalDeviceFeatures(
702 VkPhysicalDevice physicalDevice,
703 VkPhysicalDeviceFeatures* pFeatures)
705 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
706 memset(pFeatures, 0, sizeof(*pFeatures));
708 *pFeatures = (VkPhysicalDeviceFeatures) {
709 .robustBufferAccess = true,
710 .fullDrawIndexUint32 = true,
711 .imageCubeArray = true,
712 .independentBlend = true,
713 .geometryShader = true,
714 .tessellationShader = true,
715 .sampleRateShading = true,
716 .dualSrcBlend = true,
718 .multiDrawIndirect = true,
719 .drawIndirectFirstInstance = true,
721 .depthBiasClamp = true,
722 .fillModeNonSolid = true,
727 .multiViewport = true,
728 .samplerAnisotropy = true,
729 .textureCompressionETC2 = radv_device_supports_etc(pdevice),
730 .textureCompressionASTC_LDR = false,
731 .textureCompressionBC = true,
732 .occlusionQueryPrecise = true,
733 .pipelineStatisticsQuery = true,
734 .vertexPipelineStoresAndAtomics = true,
735 .fragmentStoresAndAtomics = true,
736 .shaderTessellationAndGeometryPointSize = true,
737 .shaderImageGatherExtended = true,
738 .shaderStorageImageExtendedFormats = true,
739 .shaderStorageImageMultisample = false,
740 .shaderUniformBufferArrayDynamicIndexing = true,
741 .shaderSampledImageArrayDynamicIndexing = true,
742 .shaderStorageBufferArrayDynamicIndexing = true,
743 .shaderStorageImageArrayDynamicIndexing = true,
744 .shaderStorageImageReadWithoutFormat = true,
745 .shaderStorageImageWriteWithoutFormat = true,
746 .shaderClipDistance = true,
747 .shaderCullDistance = true,
748 .shaderFloat64 = true,
750 .shaderInt16 = pdevice->rad_info.chip_class >= GFX9 && HAVE_LLVM >= 0x700,
751 .sparseBinding = true,
752 .variableMultisampleRate = true,
753 .inheritedQueries = true,
757 void radv_GetPhysicalDeviceFeatures2(
758 VkPhysicalDevice physicalDevice,
759 VkPhysicalDeviceFeatures2KHR *pFeatures)
761 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
762 vk_foreach_struct(ext, pFeatures->pNext) {
763 switch (ext->sType) {
764 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VARIABLE_POINTER_FEATURES_KHR: {
765 VkPhysicalDeviceVariablePointerFeaturesKHR *features = (void *)ext;
766 features->variablePointersStorageBuffer = true;
767 features->variablePointers = false;
770 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_FEATURES_KHR: {
771 VkPhysicalDeviceMultiviewFeaturesKHR *features = (VkPhysicalDeviceMultiviewFeaturesKHR*)ext;
772 features->multiview = true;
773 features->multiviewGeometryShader = true;
774 features->multiviewTessellationShader = true;
777 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_DRAW_PARAMETER_FEATURES: {
778 VkPhysicalDeviceShaderDrawParameterFeatures *features =
779 (VkPhysicalDeviceShaderDrawParameterFeatures*)ext;
780 features->shaderDrawParameters = true;
783 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_FEATURES: {
784 VkPhysicalDeviceProtectedMemoryFeatures *features =
785 (VkPhysicalDeviceProtectedMemoryFeatures*)ext;
786 features->protectedMemory = false;
789 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_16BIT_STORAGE_FEATURES: {
790 VkPhysicalDevice16BitStorageFeatures *features =
791 (VkPhysicalDevice16BitStorageFeatures*)ext;
792 bool enabled = HAVE_LLVM >= 0x0700 && pdevice->rad_info.chip_class >= VI;
793 features->storageBuffer16BitAccess = enabled;
794 features->uniformAndStorageBuffer16BitAccess = enabled;
795 features->storagePushConstant16 = enabled;
796 features->storageInputOutput16 = enabled && HAVE_LLVM >= 0x900;
799 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES: {
800 VkPhysicalDeviceSamplerYcbcrConversionFeatures *features =
801 (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)ext;
802 features->samplerYcbcrConversion = false;
805 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_FEATURES_EXT: {
806 VkPhysicalDeviceDescriptorIndexingFeaturesEXT *features =
807 (VkPhysicalDeviceDescriptorIndexingFeaturesEXT*)ext;
808 features->shaderInputAttachmentArrayDynamicIndexing = true;
809 features->shaderUniformTexelBufferArrayDynamicIndexing = true;
810 features->shaderStorageTexelBufferArrayDynamicIndexing = true;
811 features->shaderUniformBufferArrayNonUniformIndexing = false;
812 features->shaderSampledImageArrayNonUniformIndexing = false;
813 features->shaderStorageBufferArrayNonUniformIndexing = false;
814 features->shaderStorageImageArrayNonUniformIndexing = false;
815 features->shaderInputAttachmentArrayNonUniformIndexing = false;
816 features->shaderUniformTexelBufferArrayNonUniformIndexing = false;
817 features->shaderStorageTexelBufferArrayNonUniformIndexing = false;
818 features->descriptorBindingUniformBufferUpdateAfterBind = true;
819 features->descriptorBindingSampledImageUpdateAfterBind = true;
820 features->descriptorBindingStorageImageUpdateAfterBind = true;
821 features->descriptorBindingStorageBufferUpdateAfterBind = true;
822 features->descriptorBindingUniformTexelBufferUpdateAfterBind = true;
823 features->descriptorBindingStorageTexelBufferUpdateAfterBind = true;
824 features->descriptorBindingUpdateUnusedWhilePending = true;
825 features->descriptorBindingPartiallyBound = true;
826 features->descriptorBindingVariableDescriptorCount = true;
827 features->runtimeDescriptorArray = true;
830 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONDITIONAL_RENDERING_FEATURES_EXT: {
831 VkPhysicalDeviceConditionalRenderingFeaturesEXT *features =
832 (VkPhysicalDeviceConditionalRenderingFeaturesEXT*)ext;
833 features->conditionalRendering = true;
834 features->inheritedConditionalRendering = false;
837 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_FEATURES_EXT: {
838 VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *features =
839 (VkPhysicalDeviceVertexAttributeDivisorFeaturesEXT *)ext;
840 features->vertexAttributeInstanceRateDivisor = VK_TRUE;
841 features->vertexAttributeInstanceRateZeroDivisor = VK_TRUE;
844 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_FEATURES_EXT: {
845 VkPhysicalDeviceTransformFeedbackFeaturesEXT *features =
846 (VkPhysicalDeviceTransformFeedbackFeaturesEXT*)ext;
847 features->transformFeedback = true;
848 features->geometryStreams = true;
855 return radv_GetPhysicalDeviceFeatures(physicalDevice, &pFeatures->features);
858 void radv_GetPhysicalDeviceProperties(
859 VkPhysicalDevice physicalDevice,
860 VkPhysicalDeviceProperties* pProperties)
862 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
863 VkSampleCountFlags sample_counts = 0xf;
865 /* make sure that the entire descriptor set is addressable with a signed
866 * 32-bit int. So the sum of all limits scaled by descriptor size has to
867 * be at most 2 GiB. the combined image & samples object count as one of
868 * both. This limit is for the pipeline layout, not for the set layout, but
869 * there is no set limit, so we just set a pipeline limit. I don't think
870 * any app is going to hit this soon. */
871 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
872 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
873 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
874 32 /* sampler, largest when combined with image */ +
875 64 /* sampled image */ +
876 64 /* storage image */);
878 VkPhysicalDeviceLimits limits = {
879 .maxImageDimension1D = (1 << 14),
880 .maxImageDimension2D = (1 << 14),
881 .maxImageDimension3D = (1 << 11),
882 .maxImageDimensionCube = (1 << 14),
883 .maxImageArrayLayers = (1 << 11),
884 .maxTexelBufferElements = 128 * 1024 * 1024,
885 .maxUniformBufferRange = UINT32_MAX,
886 .maxStorageBufferRange = UINT32_MAX,
887 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
888 .maxMemoryAllocationCount = UINT32_MAX,
889 .maxSamplerAllocationCount = 64 * 1024,
890 .bufferImageGranularity = 64, /* A cache line */
891 .sparseAddressSpaceSize = 0xffffffffu, /* buffer max size */
892 .maxBoundDescriptorSets = MAX_SETS,
893 .maxPerStageDescriptorSamplers = max_descriptor_set_size,
894 .maxPerStageDescriptorUniformBuffers = max_descriptor_set_size,
895 .maxPerStageDescriptorStorageBuffers = max_descriptor_set_size,
896 .maxPerStageDescriptorSampledImages = max_descriptor_set_size,
897 .maxPerStageDescriptorStorageImages = max_descriptor_set_size,
898 .maxPerStageDescriptorInputAttachments = max_descriptor_set_size,
899 .maxPerStageResources = max_descriptor_set_size,
900 .maxDescriptorSetSamplers = max_descriptor_set_size,
901 .maxDescriptorSetUniformBuffers = max_descriptor_set_size,
902 .maxDescriptorSetUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS,
903 .maxDescriptorSetStorageBuffers = max_descriptor_set_size,
904 .maxDescriptorSetStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS,
905 .maxDescriptorSetSampledImages = max_descriptor_set_size,
906 .maxDescriptorSetStorageImages = max_descriptor_set_size,
907 .maxDescriptorSetInputAttachments = max_descriptor_set_size,
908 .maxVertexInputAttributes = 32,
909 .maxVertexInputBindings = 32,
910 .maxVertexInputAttributeOffset = 2047,
911 .maxVertexInputBindingStride = 2048,
912 .maxVertexOutputComponents = 128,
913 .maxTessellationGenerationLevel = 64,
914 .maxTessellationPatchSize = 32,
915 .maxTessellationControlPerVertexInputComponents = 128,
916 .maxTessellationControlPerVertexOutputComponents = 128,
917 .maxTessellationControlPerPatchOutputComponents = 120,
918 .maxTessellationControlTotalOutputComponents = 4096,
919 .maxTessellationEvaluationInputComponents = 128,
920 .maxTessellationEvaluationOutputComponents = 128,
921 .maxGeometryShaderInvocations = 127,
922 .maxGeometryInputComponents = 64,
923 .maxGeometryOutputComponents = 128,
924 .maxGeometryOutputVertices = 256,
925 .maxGeometryTotalOutputComponents = 1024,
926 .maxFragmentInputComponents = 128,
927 .maxFragmentOutputAttachments = 8,
928 .maxFragmentDualSrcAttachments = 1,
929 .maxFragmentCombinedOutputResources = 8,
930 .maxComputeSharedMemorySize = 32768,
931 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
932 .maxComputeWorkGroupInvocations = 2048,
933 .maxComputeWorkGroupSize = {
938 .subPixelPrecisionBits = 8,
939 .subTexelPrecisionBits = 8,
940 .mipmapPrecisionBits = 8,
941 .maxDrawIndexedIndexValue = UINT32_MAX,
942 .maxDrawIndirectCount = UINT32_MAX,
943 .maxSamplerLodBias = 16,
944 .maxSamplerAnisotropy = 16,
945 .maxViewports = MAX_VIEWPORTS,
946 .maxViewportDimensions = { (1 << 14), (1 << 14) },
947 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
948 .viewportSubPixelBits = 8,
949 .minMemoryMapAlignment = 4096, /* A page */
950 .minTexelBufferOffsetAlignment = 1,
951 .minUniformBufferOffsetAlignment = 4,
952 .minStorageBufferOffsetAlignment = 4,
953 .minTexelOffset = -32,
954 .maxTexelOffset = 31,
955 .minTexelGatherOffset = -32,
956 .maxTexelGatherOffset = 31,
957 .minInterpolationOffset = -2,
958 .maxInterpolationOffset = 2,
959 .subPixelInterpolationOffsetBits = 8,
960 .maxFramebufferWidth = (1 << 14),
961 .maxFramebufferHeight = (1 << 14),
962 .maxFramebufferLayers = (1 << 10),
963 .framebufferColorSampleCounts = sample_counts,
964 .framebufferDepthSampleCounts = sample_counts,
965 .framebufferStencilSampleCounts = sample_counts,
966 .framebufferNoAttachmentsSampleCounts = sample_counts,
967 .maxColorAttachments = MAX_RTS,
968 .sampledImageColorSampleCounts = sample_counts,
969 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
970 .sampledImageDepthSampleCounts = sample_counts,
971 .sampledImageStencilSampleCounts = sample_counts,
972 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
973 .maxSampleMaskWords = 1,
974 .timestampComputeAndGraphics = true,
975 .timestampPeriod = 1000000.0 / pdevice->rad_info.clock_crystal_freq,
976 .maxClipDistances = 8,
977 .maxCullDistances = 8,
978 .maxCombinedClipAndCullDistances = 8,
979 .discreteQueuePriorities = 2,
980 .pointSizeRange = { 0.125, 255.875 },
981 .lineWidthRange = { 0.0, 7.9921875 },
982 .pointSizeGranularity = (1.0 / 8.0),
983 .lineWidthGranularity = (1.0 / 128.0),
984 .strictLines = false, /* FINISHME */
985 .standardSampleLocations = true,
986 .optimalBufferCopyOffsetAlignment = 128,
987 .optimalBufferCopyRowPitchAlignment = 128,
988 .nonCoherentAtomSize = 64,
991 *pProperties = (VkPhysicalDeviceProperties) {
992 .apiVersion = radv_physical_device_api_version(pdevice),
993 .driverVersion = vk_get_driver_version(),
994 .vendorID = ATI_VENDOR_ID,
995 .deviceID = pdevice->rad_info.pci_id,
996 .deviceType = pdevice->rad_info.has_dedicated_vram ? VK_PHYSICAL_DEVICE_TYPE_DISCRETE_GPU : VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
998 .sparseProperties = {0},
1001 strcpy(pProperties->deviceName, pdevice->name);
1002 memcpy(pProperties->pipelineCacheUUID, pdevice->cache_uuid, VK_UUID_SIZE);
1005 void radv_GetPhysicalDeviceProperties2(
1006 VkPhysicalDevice physicalDevice,
1007 VkPhysicalDeviceProperties2KHR *pProperties)
1009 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1010 radv_GetPhysicalDeviceProperties(physicalDevice, &pProperties->properties);
1012 vk_foreach_struct(ext, pProperties->pNext) {
1013 switch (ext->sType) {
1014 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PUSH_DESCRIPTOR_PROPERTIES_KHR: {
1015 VkPhysicalDevicePushDescriptorPropertiesKHR *properties =
1016 (VkPhysicalDevicePushDescriptorPropertiesKHR *) ext;
1017 properties->maxPushDescriptors = MAX_PUSH_DESCRIPTORS;
1020 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ID_PROPERTIES_KHR: {
1021 VkPhysicalDeviceIDPropertiesKHR *properties = (VkPhysicalDeviceIDPropertiesKHR*)ext;
1022 memcpy(properties->driverUUID, pdevice->driver_uuid, VK_UUID_SIZE);
1023 memcpy(properties->deviceUUID, pdevice->device_uuid, VK_UUID_SIZE);
1024 properties->deviceLUIDValid = false;
1027 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MULTIVIEW_PROPERTIES_KHR: {
1028 VkPhysicalDeviceMultiviewPropertiesKHR *properties = (VkPhysicalDeviceMultiviewPropertiesKHR*)ext;
1029 properties->maxMultiviewViewCount = MAX_VIEWS;
1030 properties->maxMultiviewInstanceIndex = INT_MAX;
1033 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_POINT_CLIPPING_PROPERTIES_KHR: {
1034 VkPhysicalDevicePointClippingPropertiesKHR *properties =
1035 (VkPhysicalDevicePointClippingPropertiesKHR*)ext;
1036 properties->pointClippingBehavior = VK_POINT_CLIPPING_BEHAVIOR_ALL_CLIP_PLANES_KHR;
1039 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DISCARD_RECTANGLE_PROPERTIES_EXT: {
1040 VkPhysicalDeviceDiscardRectanglePropertiesEXT *properties =
1041 (VkPhysicalDeviceDiscardRectanglePropertiesEXT*)ext;
1042 properties->maxDiscardRectangles = MAX_DISCARD_RECTANGLES;
1045 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_EXTERNAL_MEMORY_HOST_PROPERTIES_EXT: {
1046 VkPhysicalDeviceExternalMemoryHostPropertiesEXT *properties =
1047 (VkPhysicalDeviceExternalMemoryHostPropertiesEXT *) ext;
1048 properties->minImportedHostPointerAlignment = 4096;
1051 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SUBGROUP_PROPERTIES: {
1052 VkPhysicalDeviceSubgroupProperties *properties =
1053 (VkPhysicalDeviceSubgroupProperties*)ext;
1054 properties->subgroupSize = 64;
1055 properties->supportedStages = VK_SHADER_STAGE_ALL;
1056 properties->supportedOperations =
1057 VK_SUBGROUP_FEATURE_BASIC_BIT |
1058 VK_SUBGROUP_FEATURE_BALLOT_BIT |
1059 VK_SUBGROUP_FEATURE_QUAD_BIT |
1060 VK_SUBGROUP_FEATURE_VOTE_BIT;
1061 if (pdevice->rad_info.chip_class >= VI) {
1062 properties->supportedOperations |=
1063 VK_SUBGROUP_FEATURE_ARITHMETIC_BIT |
1064 VK_SUBGROUP_FEATURE_SHUFFLE_BIT |
1065 VK_SUBGROUP_FEATURE_SHUFFLE_RELATIVE_BIT;
1067 properties->quadOperationsInAllStages = true;
1070 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_MAINTENANCE_3_PROPERTIES: {
1071 VkPhysicalDeviceMaintenance3Properties *properties =
1072 (VkPhysicalDeviceMaintenance3Properties*)ext;
1073 /* Make sure everything is addressable by a signed 32-bit int, and
1074 * our largest descriptors are 96 bytes. */
1075 properties->maxPerSetDescriptors = (1ull << 31) / 96;
1076 /* Our buffer size fields allow only this much */
1077 properties->maxMemoryAllocationSize = 0xFFFFFFFFull;
1080 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_FILTER_MINMAX_PROPERTIES_EXT: {
1081 VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *properties =
1082 (VkPhysicalDeviceSamplerFilterMinmaxPropertiesEXT *)ext;
1083 /* GFX6-8 only support single channel min/max filter. */
1084 properties->filterMinmaxImageComponentMapping = pdevice->rad_info.chip_class >= GFX9;
1085 properties->filterMinmaxSingleComponentFormats = true;
1088 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SHADER_CORE_PROPERTIES_AMD: {
1089 VkPhysicalDeviceShaderCorePropertiesAMD *properties =
1090 (VkPhysicalDeviceShaderCorePropertiesAMD *)ext;
1092 /* Shader engines. */
1093 properties->shaderEngineCount =
1094 pdevice->rad_info.max_se;
1095 properties->shaderArraysPerEngineCount =
1096 pdevice->rad_info.max_sh_per_se;
1097 properties->computeUnitsPerShaderArray =
1098 pdevice->rad_info.num_good_cu_per_sh;
1099 properties->simdPerComputeUnit = 4;
1100 properties->wavefrontsPerSimd =
1101 pdevice->rad_info.family == CHIP_TONGA ||
1102 pdevice->rad_info.family == CHIP_ICELAND ||
1103 pdevice->rad_info.family == CHIP_POLARIS10 ||
1104 pdevice->rad_info.family == CHIP_POLARIS11 ||
1105 pdevice->rad_info.family == CHIP_POLARIS12 ||
1106 pdevice->rad_info.family == CHIP_VEGAM ? 8 : 10;
1107 properties->wavefrontSize = 64;
1110 properties->sgprsPerSimd =
1111 radv_get_num_physical_sgprs(pdevice);
1112 properties->minSgprAllocation =
1113 pdevice->rad_info.chip_class >= VI ? 16 : 8;
1114 properties->maxSgprAllocation =
1115 pdevice->rad_info.family == CHIP_TONGA ||
1116 pdevice->rad_info.family == CHIP_ICELAND ? 96 : 104;
1117 properties->sgprAllocationGranularity =
1118 pdevice->rad_info.chip_class >= VI ? 16 : 8;
1121 properties->vgprsPerSimd = RADV_NUM_PHYSICAL_VGPRS;
1122 properties->minVgprAllocation = 4;
1123 properties->maxVgprAllocation = 256;
1124 properties->vgprAllocationGranularity = 4;
1127 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_VERTEX_ATTRIBUTE_DIVISOR_PROPERTIES_EXT: {
1128 VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *properties =
1129 (VkPhysicalDeviceVertexAttributeDivisorPropertiesEXT *)ext;
1130 properties->maxVertexAttribDivisor = UINT32_MAX;
1133 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DESCRIPTOR_INDEXING_PROPERTIES_EXT: {
1134 VkPhysicalDeviceDescriptorIndexingPropertiesEXT *properties =
1135 (VkPhysicalDeviceDescriptorIndexingPropertiesEXT*)ext;
1136 properties->maxUpdateAfterBindDescriptorsInAllPools = UINT32_MAX / 64;
1137 properties->shaderUniformBufferArrayNonUniformIndexingNative = false;
1138 properties->shaderSampledImageArrayNonUniformIndexingNative = false;
1139 properties->shaderStorageBufferArrayNonUniformIndexingNative = false;
1140 properties->shaderStorageImageArrayNonUniformIndexingNative = false;
1141 properties->shaderInputAttachmentArrayNonUniformIndexingNative = false;
1142 properties->robustBufferAccessUpdateAfterBind = false;
1143 properties->quadDivergentImplicitLod = false;
1145 size_t max_descriptor_set_size = ((1ull << 31) - 16 * MAX_DYNAMIC_BUFFERS) /
1146 (32 /* uniform buffer, 32 due to potential space wasted on alignment */ +
1147 32 /* storage buffer, 32 due to potential space wasted on alignment */ +
1148 32 /* sampler, largest when combined with image */ +
1149 64 /* sampled image */ +
1150 64 /* storage image */);
1151 properties->maxPerStageDescriptorUpdateAfterBindSamplers = max_descriptor_set_size;
1152 properties->maxPerStageDescriptorUpdateAfterBindUniformBuffers = max_descriptor_set_size;
1153 properties->maxPerStageDescriptorUpdateAfterBindStorageBuffers = max_descriptor_set_size;
1154 properties->maxPerStageDescriptorUpdateAfterBindSampledImages = max_descriptor_set_size;
1155 properties->maxPerStageDescriptorUpdateAfterBindStorageImages = max_descriptor_set_size;
1156 properties->maxPerStageDescriptorUpdateAfterBindInputAttachments = max_descriptor_set_size;
1157 properties->maxPerStageUpdateAfterBindResources = max_descriptor_set_size;
1158 properties->maxDescriptorSetUpdateAfterBindSamplers = max_descriptor_set_size;
1159 properties->maxDescriptorSetUpdateAfterBindUniformBuffers = max_descriptor_set_size;
1160 properties->maxDescriptorSetUpdateAfterBindUniformBuffersDynamic = MAX_DYNAMIC_UNIFORM_BUFFERS;
1161 properties->maxDescriptorSetUpdateAfterBindStorageBuffers = max_descriptor_set_size;
1162 properties->maxDescriptorSetUpdateAfterBindStorageBuffersDynamic = MAX_DYNAMIC_STORAGE_BUFFERS;
1163 properties->maxDescriptorSetUpdateAfterBindSampledImages = max_descriptor_set_size;
1164 properties->maxDescriptorSetUpdateAfterBindStorageImages = max_descriptor_set_size;
1165 properties->maxDescriptorSetUpdateAfterBindInputAttachments = max_descriptor_set_size;
1168 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROTECTED_MEMORY_PROPERTIES: {
1169 VkPhysicalDeviceProtectedMemoryProperties *properties =
1170 (VkPhysicalDeviceProtectedMemoryProperties *)ext;
1171 properties->protectedNoFault = false;
1174 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_CONSERVATIVE_RASTERIZATION_PROPERTIES_EXT: {
1175 VkPhysicalDeviceConservativeRasterizationPropertiesEXT *properties =
1176 (VkPhysicalDeviceConservativeRasterizationPropertiesEXT *)ext;
1177 properties->primitiveOverestimationSize = 0;
1178 properties->maxExtraPrimitiveOverestimationSize = 0;
1179 properties->extraPrimitiveOverestimationSizeGranularity = 0;
1180 properties->primitiveUnderestimation = VK_FALSE;
1181 properties->conservativePointAndLineRasterization = VK_FALSE;
1182 properties->degenerateTrianglesRasterized = VK_FALSE;
1183 properties->degenerateLinesRasterized = VK_FALSE;
1184 properties->fullyCoveredFragmentShaderInputVariable = VK_FALSE;
1185 properties->conservativeRasterizationPostDepthCoverage = VK_FALSE;
1188 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PCI_BUS_INFO_PROPERTIES_EXT: {
1189 VkPhysicalDevicePCIBusInfoPropertiesEXT *properties =
1190 (VkPhysicalDevicePCIBusInfoPropertiesEXT *)ext;
1191 properties->pciDomain = pdevice->bus_info.domain;
1192 properties->pciBus = pdevice->bus_info.bus;
1193 properties->pciDevice = pdevice->bus_info.dev;
1194 properties->pciFunction = pdevice->bus_info.func;
1197 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_DRIVER_PROPERTIES_KHR: {
1198 VkPhysicalDeviceDriverPropertiesKHR *driver_props =
1199 (VkPhysicalDeviceDriverPropertiesKHR *) ext;
1201 driver_props->driverID = VK_DRIVER_ID_MESA_RADV_KHR;
1202 memset(driver_props->driverName, 0, VK_MAX_DRIVER_NAME_SIZE_KHR);
1203 strcpy(driver_props->driverName, "radv");
1205 memset(driver_props->driverInfo, 0, VK_MAX_DRIVER_INFO_SIZE_KHR);
1206 snprintf(driver_props->driverInfo, VK_MAX_DRIVER_INFO_SIZE_KHR,
1207 "Mesa " PACKAGE_VERSION MESA_GIT_SHA1
1209 (HAVE_LLVM >> 8) & 0xff, HAVE_LLVM & 0xff,
1210 MESA_LLVM_VERSION_PATCH);
1212 driver_props->conformanceVersion = (VkConformanceVersionKHR) {
1220 case VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_TRANSFORM_FEEDBACK_PROPERTIES_EXT: {
1221 VkPhysicalDeviceTransformFeedbackPropertiesEXT *properties =
1222 (VkPhysicalDeviceTransformFeedbackPropertiesEXT *)ext;
1223 properties->maxTransformFeedbackStreams = MAX_SO_STREAMS;
1224 properties->maxTransformFeedbackBuffers = MAX_SO_BUFFERS;
1225 properties->maxTransformFeedbackBufferSize = UINT32_MAX;
1226 properties->maxTransformFeedbackStreamDataSize = 512;
1227 properties->maxTransformFeedbackBufferDataSize = UINT32_MAX;
1228 properties->maxTransformFeedbackBufferDataStride = 512;
1229 properties->transformFeedbackQueries = true;
1230 properties->transformFeedbackStreamsLinesTriangles = false;
1231 properties->transformFeedbackRasterizationStreamSelect = false;
1232 properties->transformFeedbackDraw = true;
1241 static void radv_get_physical_device_queue_family_properties(
1242 struct radv_physical_device* pdevice,
1244 VkQueueFamilyProperties** pQueueFamilyProperties)
1246 int num_queue_families = 1;
1248 if (pdevice->rad_info.num_compute_rings > 0 &&
1249 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE))
1250 num_queue_families++;
1252 if (pQueueFamilyProperties == NULL) {
1253 *pCount = num_queue_families;
1262 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
1263 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
1264 VK_QUEUE_COMPUTE_BIT |
1265 VK_QUEUE_TRANSFER_BIT |
1266 VK_QUEUE_SPARSE_BINDING_BIT,
1268 .timestampValidBits = 64,
1269 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
1274 if (pdevice->rad_info.num_compute_rings > 0 &&
1275 !(pdevice->instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
1276 if (*pCount > idx) {
1277 *pQueueFamilyProperties[idx] = (VkQueueFamilyProperties) {
1278 .queueFlags = VK_QUEUE_COMPUTE_BIT |
1279 VK_QUEUE_TRANSFER_BIT |
1280 VK_QUEUE_SPARSE_BINDING_BIT,
1281 .queueCount = pdevice->rad_info.num_compute_rings,
1282 .timestampValidBits = 64,
1283 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
1291 void radv_GetPhysicalDeviceQueueFamilyProperties(
1292 VkPhysicalDevice physicalDevice,
1294 VkQueueFamilyProperties* pQueueFamilyProperties)
1296 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1297 if (!pQueueFamilyProperties) {
1298 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
1301 VkQueueFamilyProperties *properties[] = {
1302 pQueueFamilyProperties + 0,
1303 pQueueFamilyProperties + 1,
1304 pQueueFamilyProperties + 2,
1306 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
1307 assert(*pCount <= 3);
1310 void radv_GetPhysicalDeviceQueueFamilyProperties2(
1311 VkPhysicalDevice physicalDevice,
1313 VkQueueFamilyProperties2KHR *pQueueFamilyProperties)
1315 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
1316 if (!pQueueFamilyProperties) {
1317 return radv_get_physical_device_queue_family_properties(pdevice, pCount, NULL);
1320 VkQueueFamilyProperties *properties[] = {
1321 &pQueueFamilyProperties[0].queueFamilyProperties,
1322 &pQueueFamilyProperties[1].queueFamilyProperties,
1323 &pQueueFamilyProperties[2].queueFamilyProperties,
1325 radv_get_physical_device_queue_family_properties(pdevice, pCount, properties);
1326 assert(*pCount <= 3);
1329 void radv_GetPhysicalDeviceMemoryProperties(
1330 VkPhysicalDevice physicalDevice,
1331 VkPhysicalDeviceMemoryProperties *pMemoryProperties)
1333 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
1335 *pMemoryProperties = physical_device->memory_properties;
1338 void radv_GetPhysicalDeviceMemoryProperties2(
1339 VkPhysicalDevice physicalDevice,
1340 VkPhysicalDeviceMemoryProperties2KHR *pMemoryProperties)
1342 return radv_GetPhysicalDeviceMemoryProperties(physicalDevice,
1343 &pMemoryProperties->memoryProperties);
1346 VkResult radv_GetMemoryHostPointerPropertiesEXT(
1348 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
1349 const void *pHostPointer,
1350 VkMemoryHostPointerPropertiesEXT *pMemoryHostPointerProperties)
1352 RADV_FROM_HANDLE(radv_device, device, _device);
1356 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT: {
1357 const struct radv_physical_device *physical_device = device->physical_device;
1358 uint32_t memoryTypeBits = 0;
1359 for (int i = 0; i < physical_device->memory_properties.memoryTypeCount; i++) {
1360 if (physical_device->mem_type_indices[i] == RADV_MEM_TYPE_GTT_CACHED) {
1361 memoryTypeBits = (1 << i);
1365 pMemoryHostPointerProperties->memoryTypeBits = memoryTypeBits;
1369 return VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
1373 static enum radeon_ctx_priority
1374 radv_get_queue_global_priority(const VkDeviceQueueGlobalPriorityCreateInfoEXT *pObj)
1376 /* Default to MEDIUM when a specific global priority isn't requested */
1378 return RADEON_CTX_PRIORITY_MEDIUM;
1380 switch(pObj->globalPriority) {
1381 case VK_QUEUE_GLOBAL_PRIORITY_REALTIME_EXT:
1382 return RADEON_CTX_PRIORITY_REALTIME;
1383 case VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT:
1384 return RADEON_CTX_PRIORITY_HIGH;
1385 case VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT:
1386 return RADEON_CTX_PRIORITY_MEDIUM;
1387 case VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT:
1388 return RADEON_CTX_PRIORITY_LOW;
1390 unreachable("Illegal global priority value");
1391 return RADEON_CTX_PRIORITY_INVALID;
1396 radv_queue_init(struct radv_device *device, struct radv_queue *queue,
1397 uint32_t queue_family_index, int idx,
1398 VkDeviceQueueCreateFlags flags,
1399 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority)
1401 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1402 queue->device = device;
1403 queue->queue_family_index = queue_family_index;
1404 queue->queue_idx = idx;
1405 queue->priority = radv_get_queue_global_priority(global_priority);
1406 queue->flags = flags;
1408 queue->hw_ctx = device->ws->ctx_create(device->ws, queue->priority);
1410 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1416 radv_queue_finish(struct radv_queue *queue)
1419 queue->device->ws->ctx_destroy(queue->hw_ctx);
1421 if (queue->initial_full_flush_preamble_cs)
1422 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
1423 if (queue->initial_preamble_cs)
1424 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
1425 if (queue->continue_preamble_cs)
1426 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
1427 if (queue->descriptor_bo)
1428 queue->device->ws->buffer_destroy(queue->descriptor_bo);
1429 if (queue->scratch_bo)
1430 queue->device->ws->buffer_destroy(queue->scratch_bo);
1431 if (queue->esgs_ring_bo)
1432 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
1433 if (queue->gsvs_ring_bo)
1434 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
1435 if (queue->tess_rings_bo)
1436 queue->device->ws->buffer_destroy(queue->tess_rings_bo);
1437 if (queue->compute_scratch_bo)
1438 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
1442 radv_bo_list_init(struct radv_bo_list *bo_list)
1444 pthread_mutex_init(&bo_list->mutex, NULL);
1445 bo_list->list.count = bo_list->capacity = 0;
1446 bo_list->list.bos = NULL;
1450 radv_bo_list_finish(struct radv_bo_list *bo_list)
1452 free(bo_list->list.bos);
1453 pthread_mutex_destroy(&bo_list->mutex);
1456 static VkResult radv_bo_list_add(struct radv_device *device,
1457 struct radeon_winsys_bo *bo)
1459 struct radv_bo_list *bo_list = &device->bo_list;
1461 if (unlikely(!device->use_global_bo_list))
1464 pthread_mutex_lock(&bo_list->mutex);
1465 if (bo_list->list.count == bo_list->capacity) {
1466 unsigned capacity = MAX2(4, bo_list->capacity * 2);
1467 void *data = realloc(bo_list->list.bos, capacity * sizeof(struct radeon_winsys_bo*));
1470 pthread_mutex_unlock(&bo_list->mutex);
1471 return VK_ERROR_OUT_OF_HOST_MEMORY;
1474 bo_list->list.bos = (struct radeon_winsys_bo**)data;
1475 bo_list->capacity = capacity;
1478 bo_list->list.bos[bo_list->list.count++] = bo;
1479 pthread_mutex_unlock(&bo_list->mutex);
1483 static void radv_bo_list_remove(struct radv_device *device,
1484 struct radeon_winsys_bo *bo)
1486 struct radv_bo_list *bo_list = &device->bo_list;
1488 if (unlikely(!device->use_global_bo_list))
1491 pthread_mutex_lock(&bo_list->mutex);
1492 for(unsigned i = 0; i < bo_list->list.count; ++i) {
1493 if (bo_list->list.bos[i] == bo) {
1494 bo_list->list.bos[i] = bo_list->list.bos[bo_list->list.count - 1];
1495 --bo_list->list.count;
1499 pthread_mutex_unlock(&bo_list->mutex);
1503 radv_device_init_gs_info(struct radv_device *device)
1505 device->gs_table_depth = ac_get_gs_table_depth(device->physical_device->rad_info.chip_class,
1506 device->physical_device->rad_info.family);
1509 static int radv_get_device_extension_index(const char *name)
1511 for (unsigned i = 0; i < RADV_DEVICE_EXTENSION_COUNT; ++i) {
1512 if (strcmp(name, radv_device_extensions[i].extensionName) == 0)
1519 radv_get_int_debug_option(const char *name, int default_value)
1526 result = default_value;
1530 result = strtol(str, &endptr, 0);
1531 if (str == endptr) {
1532 /* No digits founs. */
1533 result = default_value;
1540 VkResult radv_CreateDevice(
1541 VkPhysicalDevice physicalDevice,
1542 const VkDeviceCreateInfo* pCreateInfo,
1543 const VkAllocationCallbacks* pAllocator,
1546 RADV_FROM_HANDLE(radv_physical_device, physical_device, physicalDevice);
1548 struct radv_device *device;
1550 bool keep_shader_info = false;
1552 /* Check enabled features */
1553 if (pCreateInfo->pEnabledFeatures) {
1554 VkPhysicalDeviceFeatures supported_features;
1555 radv_GetPhysicalDeviceFeatures(physicalDevice, &supported_features);
1556 VkBool32 *supported_feature = (VkBool32 *)&supported_features;
1557 VkBool32 *enabled_feature = (VkBool32 *)pCreateInfo->pEnabledFeatures;
1558 unsigned num_features = sizeof(VkPhysicalDeviceFeatures) / sizeof(VkBool32);
1559 for (uint32_t i = 0; i < num_features; i++) {
1560 if (enabled_feature[i] && !supported_feature[i])
1561 return vk_error(physical_device->instance, VK_ERROR_FEATURE_NOT_PRESENT);
1565 device = vk_zalloc2(&physical_device->instance->alloc, pAllocator,
1567 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1569 return vk_error(physical_device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
1571 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
1572 device->instance = physical_device->instance;
1573 device->physical_device = physical_device;
1575 device->ws = physical_device->ws;
1577 device->alloc = *pAllocator;
1579 device->alloc = physical_device->instance->alloc;
1581 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
1582 const char *ext_name = pCreateInfo->ppEnabledExtensionNames[i];
1583 int index = radv_get_device_extension_index(ext_name);
1584 if (index < 0 || !physical_device->supported_extensions.extensions[index]) {
1585 vk_free(&device->alloc, device);
1586 return vk_error(physical_device->instance, VK_ERROR_EXTENSION_NOT_PRESENT);
1589 device->enabled_extensions.extensions[index] = true;
1592 keep_shader_info = device->enabled_extensions.AMD_shader_info;
1594 /* With update after bind we can't attach bo's to the command buffer
1595 * from the descriptor set anymore, so we have to use a global BO list.
1597 device->use_global_bo_list =
1598 device->enabled_extensions.EXT_descriptor_indexing;
1600 mtx_init(&device->shader_slab_mutex, mtx_plain);
1601 list_inithead(&device->shader_slabs);
1603 radv_bo_list_init(&device->bo_list);
1605 for (unsigned i = 0; i < pCreateInfo->queueCreateInfoCount; i++) {
1606 const VkDeviceQueueCreateInfo *queue_create = &pCreateInfo->pQueueCreateInfos[i];
1607 uint32_t qfi = queue_create->queueFamilyIndex;
1608 const VkDeviceQueueGlobalPriorityCreateInfoEXT *global_priority =
1609 vk_find_struct_const(queue_create->pNext, DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT);
1611 assert(!global_priority || device->physical_device->rad_info.has_ctx_priority);
1613 device->queues[qfi] = vk_alloc(&device->alloc,
1614 queue_create->queueCount * sizeof(struct radv_queue), 8, VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
1615 if (!device->queues[qfi]) {
1616 result = VK_ERROR_OUT_OF_HOST_MEMORY;
1620 memset(device->queues[qfi], 0, queue_create->queueCount * sizeof(struct radv_queue));
1622 device->queue_count[qfi] = queue_create->queueCount;
1624 for (unsigned q = 0; q < queue_create->queueCount; q++) {
1625 result = radv_queue_init(device, &device->queues[qfi][q],
1626 qfi, q, queue_create->flags,
1628 if (result != VK_SUCCESS)
1633 device->pbb_allowed = device->physical_device->rad_info.chip_class >= GFX9 &&
1634 ((device->instance->perftest_flags & RADV_PERFTEST_BINNING) ||
1635 device->physical_device->rad_info.family == CHIP_RAVEN ||
1636 device->physical_device->rad_info.family == CHIP_RAVEN2);
1638 /* Disabled and not implemented for now. */
1639 device->dfsm_allowed = device->pbb_allowed &&
1640 (device->physical_device->rad_info.family == CHIP_RAVEN ||
1641 device->physical_device->rad_info.family == CHIP_RAVEN2);
1644 device->always_use_syncobj = device->physical_device->rad_info.has_syncobj_wait_for_submit;
1647 /* The maximum number of scratch waves. Scratch space isn't divided
1648 * evenly between CUs. The number is only a function of the number of CUs.
1649 * We can decrease the constant to decrease the scratch buffer size.
1651 * sctx->scratch_waves must be >= the maximum possible size of
1652 * 1 threadgroup, so that the hw doesn't hang from being unable
1655 * The recommended value is 4 per CU at most. Higher numbers don't
1656 * bring much benefit, but they still occupy chip resources (think
1657 * async compute). I've seen ~2% performance difference between 4 and 32.
1659 uint32_t max_threads_per_block = 2048;
1660 device->scratch_waves = MAX2(32 * physical_device->rad_info.num_good_compute_units,
1661 max_threads_per_block / 64);
1663 device->dispatch_initiator = S_00B800_COMPUTE_SHADER_EN(1);
1665 if (device->physical_device->rad_info.chip_class >= CIK) {
1666 /* If the KMD allows it (there is a KMD hw register for it),
1667 * allow launching waves out-of-order.
1669 device->dispatch_initiator |= S_00B800_ORDER_MODE(1);
1672 radv_device_init_gs_info(device);
1674 device->tess_offchip_block_dw_size =
1675 device->physical_device->rad_info.family == CHIP_HAWAII ? 4096 : 8192;
1676 device->has_distributed_tess =
1677 device->physical_device->rad_info.chip_class >= VI &&
1678 device->physical_device->rad_info.max_se >= 2;
1680 if (getenv("RADV_TRACE_FILE")) {
1681 const char *filename = getenv("RADV_TRACE_FILE");
1683 keep_shader_info = true;
1685 if (!radv_init_trace(device))
1688 fprintf(stderr, "*****************************************************************************\n");
1689 fprintf(stderr, "* WARNING: RADV_TRACE_FILE is costly and should only be used for debugging! *\n");
1690 fprintf(stderr, "*****************************************************************************\n");
1692 fprintf(stderr, "Trace file will be dumped to %s\n", filename);
1693 radv_dump_enabled_options(device, stderr);
1696 device->keep_shader_info = keep_shader_info;
1698 result = radv_device_init_meta(device);
1699 if (result != VK_SUCCESS)
1702 radv_device_init_msaa(device);
1704 for (int family = 0; family < RADV_MAX_QUEUE_FAMILIES; ++family) {
1705 device->empty_cs[family] = device->ws->cs_create(device->ws, family);
1707 case RADV_QUEUE_GENERAL:
1708 radeon_emit(device->empty_cs[family], PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
1709 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_LOAD_ENABLE(1));
1710 radeon_emit(device->empty_cs[family], CONTEXT_CONTROL_SHADOW_ENABLE(1));
1712 case RADV_QUEUE_COMPUTE:
1713 radeon_emit(device->empty_cs[family], PKT3(PKT3_NOP, 0, 0));
1714 radeon_emit(device->empty_cs[family], 0);
1717 device->ws->cs_finalize(device->empty_cs[family]);
1720 if (device->physical_device->rad_info.chip_class >= CIK)
1721 cik_create_gfx_config(device);
1723 VkPipelineCacheCreateInfo ci;
1724 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
1727 ci.pInitialData = NULL;
1728 ci.initialDataSize = 0;
1730 result = radv_CreatePipelineCache(radv_device_to_handle(device),
1732 if (result != VK_SUCCESS)
1735 device->mem_cache = radv_pipeline_cache_from_handle(pc);
1737 device->force_aniso =
1738 MIN2(16, radv_get_int_debug_option("RADV_TEX_ANISO", -1));
1739 if (device->force_aniso >= 0) {
1740 fprintf(stderr, "radv: Forcing anisotropy filter to %ix\n",
1741 1 << util_logbase2(device->force_aniso));
1744 *pDevice = radv_device_to_handle(device);
1748 radv_device_finish_meta(device);
1750 radv_bo_list_finish(&device->bo_list);
1752 if (device->trace_bo)
1753 device->ws->buffer_destroy(device->trace_bo);
1755 if (device->gfx_init)
1756 device->ws->buffer_destroy(device->gfx_init);
1758 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1759 for (unsigned q = 0; q < device->queue_count[i]; q++)
1760 radv_queue_finish(&device->queues[i][q]);
1761 if (device->queue_count[i])
1762 vk_free(&device->alloc, device->queues[i]);
1765 vk_free(&device->alloc, device);
1769 void radv_DestroyDevice(
1771 const VkAllocationCallbacks* pAllocator)
1773 RADV_FROM_HANDLE(radv_device, device, _device);
1778 if (device->trace_bo)
1779 device->ws->buffer_destroy(device->trace_bo);
1781 if (device->gfx_init)
1782 device->ws->buffer_destroy(device->gfx_init);
1784 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
1785 for (unsigned q = 0; q < device->queue_count[i]; q++)
1786 radv_queue_finish(&device->queues[i][q]);
1787 if (device->queue_count[i])
1788 vk_free(&device->alloc, device->queues[i]);
1789 if (device->empty_cs[i])
1790 device->ws->cs_destroy(device->empty_cs[i]);
1792 radv_device_finish_meta(device);
1794 VkPipelineCache pc = radv_pipeline_cache_to_handle(device->mem_cache);
1795 radv_DestroyPipelineCache(radv_device_to_handle(device), pc, NULL);
1797 radv_destroy_shader_slabs(device);
1799 radv_bo_list_finish(&device->bo_list);
1800 vk_free(&device->alloc, device);
1803 VkResult radv_EnumerateInstanceLayerProperties(
1804 uint32_t* pPropertyCount,
1805 VkLayerProperties* pProperties)
1807 if (pProperties == NULL) {
1808 *pPropertyCount = 0;
1812 /* None supported at this time */
1813 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1816 VkResult radv_EnumerateDeviceLayerProperties(
1817 VkPhysicalDevice physicalDevice,
1818 uint32_t* pPropertyCount,
1819 VkLayerProperties* pProperties)
1821 if (pProperties == NULL) {
1822 *pPropertyCount = 0;
1826 /* None supported at this time */
1827 return vk_error(NULL, VK_ERROR_LAYER_NOT_PRESENT);
1830 void radv_GetDeviceQueue2(
1832 const VkDeviceQueueInfo2* pQueueInfo,
1835 RADV_FROM_HANDLE(radv_device, device, _device);
1836 struct radv_queue *queue;
1838 queue = &device->queues[pQueueInfo->queueFamilyIndex][pQueueInfo->queueIndex];
1839 if (pQueueInfo->flags != queue->flags) {
1840 /* From the Vulkan 1.1.70 spec:
1842 * "The queue returned by vkGetDeviceQueue2 must have the same
1843 * flags value from this structure as that used at device
1844 * creation time in a VkDeviceQueueCreateInfo instance. If no
1845 * matching flags were specified at device creation time then
1846 * pQueue will return VK_NULL_HANDLE."
1848 *pQueue = VK_NULL_HANDLE;
1852 *pQueue = radv_queue_to_handle(queue);
1855 void radv_GetDeviceQueue(
1857 uint32_t queueFamilyIndex,
1858 uint32_t queueIndex,
1861 const VkDeviceQueueInfo2 info = (VkDeviceQueueInfo2) {
1862 .sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_INFO_2,
1863 .queueFamilyIndex = queueFamilyIndex,
1864 .queueIndex = queueIndex
1867 radv_GetDeviceQueue2(_device, &info, pQueue);
1871 fill_geom_tess_rings(struct radv_queue *queue,
1873 bool add_sample_positions,
1874 uint32_t esgs_ring_size,
1875 struct radeon_winsys_bo *esgs_ring_bo,
1876 uint32_t gsvs_ring_size,
1877 struct radeon_winsys_bo *gsvs_ring_bo,
1878 uint32_t tess_factor_ring_size,
1879 uint32_t tess_offchip_ring_offset,
1880 uint32_t tess_offchip_ring_size,
1881 struct radeon_winsys_bo *tess_rings_bo)
1883 uint64_t esgs_va = 0, gsvs_va = 0;
1884 uint64_t tess_va = 0, tess_offchip_va = 0;
1885 uint32_t *desc = &map[4];
1888 esgs_va = radv_buffer_get_va(esgs_ring_bo);
1890 gsvs_va = radv_buffer_get_va(gsvs_ring_bo);
1891 if (tess_rings_bo) {
1892 tess_va = radv_buffer_get_va(tess_rings_bo);
1893 tess_offchip_va = tess_va + tess_offchip_ring_offset;
1896 /* stride 0, num records - size, add tid, swizzle, elsize4,
1899 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32) |
1900 S_008F04_STRIDE(0) |
1901 S_008F04_SWIZZLE_ENABLE(true);
1902 desc[2] = esgs_ring_size;
1903 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1904 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1905 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1906 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1907 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1908 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1909 S_008F0C_ELEMENT_SIZE(1) |
1910 S_008F0C_INDEX_STRIDE(3) |
1911 S_008F0C_ADD_TID_ENABLE(true);
1914 /* GS entry for ES->GS ring */
1915 /* stride 0, num records - size, elsize0,
1918 desc[1] = S_008F04_BASE_ADDRESS_HI(esgs_va >> 32)|
1919 S_008F04_STRIDE(0) |
1920 S_008F04_SWIZZLE_ENABLE(false);
1921 desc[2] = esgs_ring_size;
1922 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1923 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1924 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1925 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1926 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1927 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1928 S_008F0C_ELEMENT_SIZE(0) |
1929 S_008F0C_INDEX_STRIDE(0) |
1930 S_008F0C_ADD_TID_ENABLE(false);
1933 /* VS entry for GS->VS ring */
1934 /* stride 0, num records - size, elsize0,
1937 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1938 S_008F04_STRIDE(0) |
1939 S_008F04_SWIZZLE_ENABLE(false);
1940 desc[2] = gsvs_ring_size;
1941 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1942 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1943 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1944 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1945 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1946 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1947 S_008F0C_ELEMENT_SIZE(0) |
1948 S_008F0C_INDEX_STRIDE(0) |
1949 S_008F0C_ADD_TID_ENABLE(false);
1952 /* stride gsvs_itemsize, num records 64
1953 elsize 4, index stride 16 */
1954 /* shader will patch stride and desc[2] */
1956 desc[1] = S_008F04_BASE_ADDRESS_HI(gsvs_va >> 32)|
1957 S_008F04_STRIDE(0) |
1958 S_008F04_SWIZZLE_ENABLE(true);
1960 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1961 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1962 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1963 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1964 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1965 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1966 S_008F0C_ELEMENT_SIZE(1) |
1967 S_008F0C_INDEX_STRIDE(1) |
1968 S_008F0C_ADD_TID_ENABLE(true);
1972 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_va >> 32) |
1973 S_008F04_STRIDE(0) |
1974 S_008F04_SWIZZLE_ENABLE(false);
1975 desc[2] = tess_factor_ring_size;
1976 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1977 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1978 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1979 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1980 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1981 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1982 S_008F0C_ELEMENT_SIZE(0) |
1983 S_008F0C_INDEX_STRIDE(0) |
1984 S_008F0C_ADD_TID_ENABLE(false);
1987 desc[0] = tess_offchip_va;
1988 desc[1] = S_008F04_BASE_ADDRESS_HI(tess_offchip_va >> 32) |
1989 S_008F04_STRIDE(0) |
1990 S_008F04_SWIZZLE_ENABLE(false);
1991 desc[2] = tess_offchip_ring_size;
1992 desc[3] = S_008F0C_DST_SEL_X(V_008F0C_SQ_SEL_X) |
1993 S_008F0C_DST_SEL_Y(V_008F0C_SQ_SEL_Y) |
1994 S_008F0C_DST_SEL_Z(V_008F0C_SQ_SEL_Z) |
1995 S_008F0C_DST_SEL_W(V_008F0C_SQ_SEL_W) |
1996 S_008F0C_NUM_FORMAT(V_008F0C_BUF_NUM_FORMAT_FLOAT) |
1997 S_008F0C_DATA_FORMAT(V_008F0C_BUF_DATA_FORMAT_32) |
1998 S_008F0C_ELEMENT_SIZE(0) |
1999 S_008F0C_INDEX_STRIDE(0) |
2000 S_008F0C_ADD_TID_ENABLE(false);
2003 /* add sample positions after all rings */
2004 memcpy(desc, queue->device->sample_locations_1x, 8);
2006 memcpy(desc, queue->device->sample_locations_2x, 16);
2008 memcpy(desc, queue->device->sample_locations_4x, 32);
2010 memcpy(desc, queue->device->sample_locations_8x, 64);
2012 memcpy(desc, queue->device->sample_locations_16x, 128);
2016 radv_get_hs_offchip_param(struct radv_device *device, uint32_t *max_offchip_buffers_p)
2018 bool double_offchip_buffers = device->physical_device->rad_info.chip_class >= CIK &&
2019 device->physical_device->rad_info.family != CHIP_CARRIZO &&
2020 device->physical_device->rad_info.family != CHIP_STONEY;
2021 unsigned max_offchip_buffers_per_se = double_offchip_buffers ? 128 : 64;
2022 unsigned max_offchip_buffers;
2023 unsigned offchip_granularity;
2024 unsigned hs_offchip_param;
2028 * This must be one less than the maximum number due to a hw limitation.
2029 * Various hardware bugs in SI, CIK, and GFX9 need this.
2032 * Vega10 should limit max_offchip_buffers to 508 (4 * 127).
2033 * Gfx7 should limit max_offchip_buffers to 508
2034 * Gfx6 should limit max_offchip_buffers to 126 (2 * 63)
2036 * Follow AMDVLK here.
2038 if (device->physical_device->rad_info.family == CHIP_VEGA10 ||
2039 device->physical_device->rad_info.chip_class == CIK ||
2040 device->physical_device->rad_info.chip_class == SI)
2041 --max_offchip_buffers_per_se;
2043 max_offchip_buffers = max_offchip_buffers_per_se *
2044 device->physical_device->rad_info.max_se;
2046 switch (device->tess_offchip_block_dw_size) {
2051 offchip_granularity = V_03093C_X_8K_DWORDS;
2054 offchip_granularity = V_03093C_X_4K_DWORDS;
2058 switch (device->physical_device->rad_info.chip_class) {
2060 max_offchip_buffers = MIN2(max_offchip_buffers, 126);
2066 max_offchip_buffers = MIN2(max_offchip_buffers, 508);
2070 *max_offchip_buffers_p = max_offchip_buffers;
2071 if (device->physical_device->rad_info.chip_class >= CIK) {
2072 if (device->physical_device->rad_info.chip_class >= VI)
2073 --max_offchip_buffers;
2075 S_03093C_OFFCHIP_BUFFERING(max_offchip_buffers) |
2076 S_03093C_OFFCHIP_GRANULARITY(offchip_granularity);
2079 S_0089B0_OFFCHIP_BUFFERING(max_offchip_buffers);
2081 return hs_offchip_param;
2085 radv_emit_gs_ring_sizes(struct radv_queue *queue, struct radeon_cmdbuf *cs,
2086 struct radeon_winsys_bo *esgs_ring_bo,
2087 uint32_t esgs_ring_size,
2088 struct radeon_winsys_bo *gsvs_ring_bo,
2089 uint32_t gsvs_ring_size)
2091 if (!esgs_ring_bo && !gsvs_ring_bo)
2095 radv_cs_add_buffer(queue->device->ws, cs, esgs_ring_bo);
2098 radv_cs_add_buffer(queue->device->ws, cs, gsvs_ring_bo);
2100 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
2101 radeon_set_uconfig_reg_seq(cs, R_030900_VGT_ESGS_RING_SIZE, 2);
2102 radeon_emit(cs, esgs_ring_size >> 8);
2103 radeon_emit(cs, gsvs_ring_size >> 8);
2105 radeon_set_config_reg_seq(cs, R_0088C8_VGT_ESGS_RING_SIZE, 2);
2106 radeon_emit(cs, esgs_ring_size >> 8);
2107 radeon_emit(cs, gsvs_ring_size >> 8);
2112 radv_emit_tess_factor_ring(struct radv_queue *queue, struct radeon_cmdbuf *cs,
2113 unsigned hs_offchip_param, unsigned tf_ring_size,
2114 struct radeon_winsys_bo *tess_rings_bo)
2121 tf_va = radv_buffer_get_va(tess_rings_bo);
2123 radv_cs_add_buffer(queue->device->ws, cs, tess_rings_bo);
2125 if (queue->device->physical_device->rad_info.chip_class >= CIK) {
2126 radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
2127 S_030938_SIZE(tf_ring_size / 4));
2128 radeon_set_uconfig_reg(cs, R_030940_VGT_TF_MEMORY_BASE,
2130 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
2131 radeon_set_uconfig_reg(cs, R_030944_VGT_TF_MEMORY_BASE_HI,
2132 S_030944_BASE_HI(tf_va >> 40));
2134 radeon_set_uconfig_reg(cs, R_03093C_VGT_HS_OFFCHIP_PARAM,
2137 radeon_set_config_reg(cs, R_008988_VGT_TF_RING_SIZE,
2138 S_008988_SIZE(tf_ring_size / 4));
2139 radeon_set_config_reg(cs, R_0089B8_VGT_TF_MEMORY_BASE,
2141 radeon_set_config_reg(cs, R_0089B0_VGT_HS_OFFCHIP_PARAM,
2147 radv_emit_compute_scratch(struct radv_queue *queue, struct radeon_cmdbuf *cs,
2148 struct radeon_winsys_bo *compute_scratch_bo)
2150 uint64_t scratch_va;
2152 if (!compute_scratch_bo)
2155 scratch_va = radv_buffer_get_va(compute_scratch_bo);
2157 radv_cs_add_buffer(queue->device->ws, cs, compute_scratch_bo);
2159 radeon_set_sh_reg_seq(cs, R_00B900_COMPUTE_USER_DATA_0, 2);
2160 radeon_emit(cs, scratch_va);
2161 radeon_emit(cs, S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
2162 S_008F04_SWIZZLE_ENABLE(1));
2166 radv_emit_global_shader_pointers(struct radv_queue *queue,
2167 struct radeon_cmdbuf *cs,
2168 struct radeon_winsys_bo *descriptor_bo)
2175 va = radv_buffer_get_va(descriptor_bo);
2177 radv_cs_add_buffer(queue->device->ws, cs, descriptor_bo);
2179 if (queue->device->physical_device->rad_info.chip_class >= GFX9) {
2180 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
2181 R_00B130_SPI_SHADER_USER_DATA_VS_0,
2182 R_00B208_SPI_SHADER_USER_DATA_ADDR_LO_GS,
2183 R_00B408_SPI_SHADER_USER_DATA_ADDR_LO_HS};
2185 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
2186 radv_emit_shader_pointer(queue->device, cs, regs[i],
2190 uint32_t regs[] = {R_00B030_SPI_SHADER_USER_DATA_PS_0,
2191 R_00B130_SPI_SHADER_USER_DATA_VS_0,
2192 R_00B230_SPI_SHADER_USER_DATA_GS_0,
2193 R_00B330_SPI_SHADER_USER_DATA_ES_0,
2194 R_00B430_SPI_SHADER_USER_DATA_HS_0,
2195 R_00B530_SPI_SHADER_USER_DATA_LS_0};
2197 for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
2198 radv_emit_shader_pointer(queue->device, cs, regs[i],
2205 radv_init_graphics_state(struct radeon_cmdbuf *cs, struct radv_queue *queue)
2207 struct radv_device *device = queue->device;
2209 if (device->gfx_init) {
2210 uint64_t va = radv_buffer_get_va(device->gfx_init);
2212 radeon_emit(cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
2213 radeon_emit(cs, va);
2214 radeon_emit(cs, va >> 32);
2215 radeon_emit(cs, device->gfx_init_size_dw & 0xffff);
2217 radv_cs_add_buffer(device->ws, cs, device->gfx_init);
2219 struct radv_physical_device *physical_device = device->physical_device;
2220 si_emit_graphics(physical_device, cs);
2225 radv_init_compute_state(struct radeon_cmdbuf *cs, struct radv_queue *queue)
2227 struct radv_physical_device *physical_device = queue->device->physical_device;
2228 si_emit_compute(physical_device, cs);
2232 radv_get_preamble_cs(struct radv_queue *queue,
2233 uint32_t scratch_size,
2234 uint32_t compute_scratch_size,
2235 uint32_t esgs_ring_size,
2236 uint32_t gsvs_ring_size,
2237 bool needs_tess_rings,
2238 bool needs_sample_positions,
2239 struct radeon_cmdbuf **initial_full_flush_preamble_cs,
2240 struct radeon_cmdbuf **initial_preamble_cs,
2241 struct radeon_cmdbuf **continue_preamble_cs)
2243 struct radeon_winsys_bo *scratch_bo = NULL;
2244 struct radeon_winsys_bo *descriptor_bo = NULL;
2245 struct radeon_winsys_bo *compute_scratch_bo = NULL;
2246 struct radeon_winsys_bo *esgs_ring_bo = NULL;
2247 struct radeon_winsys_bo *gsvs_ring_bo = NULL;
2248 struct radeon_winsys_bo *tess_rings_bo = NULL;
2249 struct radeon_cmdbuf *dest_cs[3] = {0};
2250 bool add_tess_rings = false, add_sample_positions = false;
2251 unsigned tess_factor_ring_size = 0, tess_offchip_ring_size = 0;
2252 unsigned max_offchip_buffers;
2253 unsigned hs_offchip_param = 0;
2254 unsigned tess_offchip_ring_offset;
2255 uint32_t ring_bo_flags = RADEON_FLAG_NO_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING;
2256 if (!queue->has_tess_rings) {
2257 if (needs_tess_rings)
2258 add_tess_rings = true;
2260 if (!queue->has_sample_positions) {
2261 if (needs_sample_positions)
2262 add_sample_positions = true;
2264 tess_factor_ring_size = 32768 * queue->device->physical_device->rad_info.max_se;
2265 hs_offchip_param = radv_get_hs_offchip_param(queue->device,
2266 &max_offchip_buffers);
2267 tess_offchip_ring_offset = align(tess_factor_ring_size, 64 * 1024);
2268 tess_offchip_ring_size = max_offchip_buffers *
2269 queue->device->tess_offchip_block_dw_size * 4;
2271 if (scratch_size <= queue->scratch_size &&
2272 compute_scratch_size <= queue->compute_scratch_size &&
2273 esgs_ring_size <= queue->esgs_ring_size &&
2274 gsvs_ring_size <= queue->gsvs_ring_size &&
2275 !add_tess_rings && !add_sample_positions &&
2276 queue->initial_preamble_cs) {
2277 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
2278 *initial_preamble_cs = queue->initial_preamble_cs;
2279 *continue_preamble_cs = queue->continue_preamble_cs;
2280 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
2281 *continue_preamble_cs = NULL;
2285 if (scratch_size > queue->scratch_size) {
2286 scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
2294 scratch_bo = queue->scratch_bo;
2296 if (compute_scratch_size > queue->compute_scratch_size) {
2297 compute_scratch_bo = queue->device->ws->buffer_create(queue->device->ws,
2298 compute_scratch_size,
2302 if (!compute_scratch_bo)
2306 compute_scratch_bo = queue->compute_scratch_bo;
2308 if (esgs_ring_size > queue->esgs_ring_size) {
2309 esgs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
2317 esgs_ring_bo = queue->esgs_ring_bo;
2318 esgs_ring_size = queue->esgs_ring_size;
2321 if (gsvs_ring_size > queue->gsvs_ring_size) {
2322 gsvs_ring_bo = queue->device->ws->buffer_create(queue->device->ws,
2330 gsvs_ring_bo = queue->gsvs_ring_bo;
2331 gsvs_ring_size = queue->gsvs_ring_size;
2334 if (add_tess_rings) {
2335 tess_rings_bo = queue->device->ws->buffer_create(queue->device->ws,
2336 tess_offchip_ring_offset + tess_offchip_ring_size,
2343 tess_rings_bo = queue->tess_rings_bo;
2346 if (scratch_bo != queue->scratch_bo ||
2347 esgs_ring_bo != queue->esgs_ring_bo ||
2348 gsvs_ring_bo != queue->gsvs_ring_bo ||
2349 tess_rings_bo != queue->tess_rings_bo ||
2350 add_sample_positions) {
2352 if (gsvs_ring_bo || esgs_ring_bo ||
2353 tess_rings_bo || add_sample_positions) {
2354 size = 112; /* 2 dword + 2 padding + 4 dword * 6 */
2355 if (add_sample_positions)
2356 size += 256; /* 32+16+8+4+2+1 samples * 4 * 2 = 248 bytes. */
2358 else if (scratch_bo)
2359 size = 8; /* 2 dword */
2361 descriptor_bo = queue->device->ws->buffer_create(queue->device->ws,
2365 RADEON_FLAG_CPU_ACCESS |
2366 RADEON_FLAG_NO_INTERPROCESS_SHARING |
2367 RADEON_FLAG_READ_ONLY);
2371 descriptor_bo = queue->descriptor_bo;
2373 for(int i = 0; i < 3; ++i) {
2374 struct radeon_cmdbuf *cs = NULL;
2375 cs = queue->device->ws->cs_create(queue->device->ws,
2376 queue->queue_family_index ? RING_COMPUTE : RING_GFX);
2383 radv_cs_add_buffer(queue->device->ws, cs, scratch_bo);
2385 /* Emit initial configuration. */
2386 switch (queue->queue_family_index) {
2387 case RADV_QUEUE_GENERAL:
2388 radv_init_graphics_state(cs, queue);
2390 case RADV_QUEUE_COMPUTE:
2391 radv_init_compute_state(cs, queue);
2393 case RADV_QUEUE_TRANSFER:
2397 if (descriptor_bo != queue->descriptor_bo) {
2398 uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
2401 uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
2402 uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
2403 S_008F04_SWIZZLE_ENABLE(1);
2404 map[0] = scratch_va;
2408 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo ||
2409 add_sample_positions)
2410 fill_geom_tess_rings(queue, map, add_sample_positions,
2411 esgs_ring_size, esgs_ring_bo,
2412 gsvs_ring_size, gsvs_ring_bo,
2413 tess_factor_ring_size,
2414 tess_offchip_ring_offset,
2415 tess_offchip_ring_size,
2418 queue->device->ws->buffer_unmap(descriptor_bo);
2421 if (esgs_ring_bo || gsvs_ring_bo || tess_rings_bo) {
2422 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2423 radeon_emit(cs, EVENT_TYPE(V_028A90_VS_PARTIAL_FLUSH) | EVENT_INDEX(4));
2424 radeon_emit(cs, PKT3(PKT3_EVENT_WRITE, 0, 0));
2425 radeon_emit(cs, EVENT_TYPE(V_028A90_VGT_FLUSH) | EVENT_INDEX(0));
2428 radv_emit_gs_ring_sizes(queue, cs, esgs_ring_bo, esgs_ring_size,
2429 gsvs_ring_bo, gsvs_ring_size);
2430 radv_emit_tess_factor_ring(queue, cs, hs_offchip_param,
2431 tess_factor_ring_size, tess_rings_bo);
2432 radv_emit_global_shader_pointers(queue, cs, descriptor_bo);
2433 radv_emit_compute_scratch(queue, cs, compute_scratch_bo);
2436 si_cs_emit_cache_flush(cs,
2437 queue->device->physical_device->rad_info.chip_class,
2439 queue->queue_family_index == RING_COMPUTE &&
2440 queue->device->physical_device->rad_info.chip_class >= CIK,
2441 (queue->queue_family_index == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
2442 RADV_CMD_FLAG_INV_ICACHE |
2443 RADV_CMD_FLAG_INV_SMEM_L1 |
2444 RADV_CMD_FLAG_INV_VMEM_L1 |
2445 RADV_CMD_FLAG_INV_GLOBAL_L2 |
2446 RADV_CMD_FLAG_START_PIPELINE_STATS, 0);
2447 } else if (i == 1) {
2448 si_cs_emit_cache_flush(cs,
2449 queue->device->physical_device->rad_info.chip_class,
2451 queue->queue_family_index == RING_COMPUTE &&
2452 queue->device->physical_device->rad_info.chip_class >= CIK,
2453 RADV_CMD_FLAG_INV_ICACHE |
2454 RADV_CMD_FLAG_INV_SMEM_L1 |
2455 RADV_CMD_FLAG_INV_VMEM_L1 |
2456 RADV_CMD_FLAG_INV_GLOBAL_L2 |
2457 RADV_CMD_FLAG_START_PIPELINE_STATS, 0);
2460 if (!queue->device->ws->cs_finalize(cs))
2464 if (queue->initial_full_flush_preamble_cs)
2465 queue->device->ws->cs_destroy(queue->initial_full_flush_preamble_cs);
2467 if (queue->initial_preamble_cs)
2468 queue->device->ws->cs_destroy(queue->initial_preamble_cs);
2470 if (queue->continue_preamble_cs)
2471 queue->device->ws->cs_destroy(queue->continue_preamble_cs);
2473 queue->initial_full_flush_preamble_cs = dest_cs[0];
2474 queue->initial_preamble_cs = dest_cs[1];
2475 queue->continue_preamble_cs = dest_cs[2];
2477 if (scratch_bo != queue->scratch_bo) {
2478 if (queue->scratch_bo)
2479 queue->device->ws->buffer_destroy(queue->scratch_bo);
2480 queue->scratch_bo = scratch_bo;
2481 queue->scratch_size = scratch_size;
2484 if (compute_scratch_bo != queue->compute_scratch_bo) {
2485 if (queue->compute_scratch_bo)
2486 queue->device->ws->buffer_destroy(queue->compute_scratch_bo);
2487 queue->compute_scratch_bo = compute_scratch_bo;
2488 queue->compute_scratch_size = compute_scratch_size;
2491 if (esgs_ring_bo != queue->esgs_ring_bo) {
2492 if (queue->esgs_ring_bo)
2493 queue->device->ws->buffer_destroy(queue->esgs_ring_bo);
2494 queue->esgs_ring_bo = esgs_ring_bo;
2495 queue->esgs_ring_size = esgs_ring_size;
2498 if (gsvs_ring_bo != queue->gsvs_ring_bo) {
2499 if (queue->gsvs_ring_bo)
2500 queue->device->ws->buffer_destroy(queue->gsvs_ring_bo);
2501 queue->gsvs_ring_bo = gsvs_ring_bo;
2502 queue->gsvs_ring_size = gsvs_ring_size;
2505 if (tess_rings_bo != queue->tess_rings_bo) {
2506 queue->tess_rings_bo = tess_rings_bo;
2507 queue->has_tess_rings = true;
2510 if (descriptor_bo != queue->descriptor_bo) {
2511 if (queue->descriptor_bo)
2512 queue->device->ws->buffer_destroy(queue->descriptor_bo);
2514 queue->descriptor_bo = descriptor_bo;
2517 if (add_sample_positions)
2518 queue->has_sample_positions = true;
2520 *initial_full_flush_preamble_cs = queue->initial_full_flush_preamble_cs;
2521 *initial_preamble_cs = queue->initial_preamble_cs;
2522 *continue_preamble_cs = queue->continue_preamble_cs;
2523 if (!scratch_size && !compute_scratch_size && !esgs_ring_size && !gsvs_ring_size)
2524 *continue_preamble_cs = NULL;
2527 for (int i = 0; i < ARRAY_SIZE(dest_cs); ++i)
2529 queue->device->ws->cs_destroy(dest_cs[i]);
2530 if (descriptor_bo && descriptor_bo != queue->descriptor_bo)
2531 queue->device->ws->buffer_destroy(descriptor_bo);
2532 if (scratch_bo && scratch_bo != queue->scratch_bo)
2533 queue->device->ws->buffer_destroy(scratch_bo);
2534 if (compute_scratch_bo && compute_scratch_bo != queue->compute_scratch_bo)
2535 queue->device->ws->buffer_destroy(compute_scratch_bo);
2536 if (esgs_ring_bo && esgs_ring_bo != queue->esgs_ring_bo)
2537 queue->device->ws->buffer_destroy(esgs_ring_bo);
2538 if (gsvs_ring_bo && gsvs_ring_bo != queue->gsvs_ring_bo)
2539 queue->device->ws->buffer_destroy(gsvs_ring_bo);
2540 if (tess_rings_bo && tess_rings_bo != queue->tess_rings_bo)
2541 queue->device->ws->buffer_destroy(tess_rings_bo);
2542 return vk_error(queue->device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
2545 static VkResult radv_alloc_sem_counts(struct radv_instance *instance,
2546 struct radv_winsys_sem_counts *counts,
2548 const VkSemaphore *sems,
2552 int syncobj_idx = 0, sem_idx = 0;
2554 if (num_sems == 0 && _fence == VK_NULL_HANDLE)
2557 for (uint32_t i = 0; i < num_sems; i++) {
2558 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2560 if (sem->temp_syncobj || sem->syncobj)
2561 counts->syncobj_count++;
2563 counts->sem_count++;
2566 if (_fence != VK_NULL_HANDLE) {
2567 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2568 if (fence->temp_syncobj || fence->syncobj)
2569 counts->syncobj_count++;
2572 if (counts->syncobj_count) {
2573 counts->syncobj = (uint32_t *)malloc(sizeof(uint32_t) * counts->syncobj_count);
2574 if (!counts->syncobj)
2575 return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2578 if (counts->sem_count) {
2579 counts->sem = (struct radeon_winsys_sem **)malloc(sizeof(struct radeon_winsys_sem *) * counts->sem_count);
2581 free(counts->syncobj);
2582 return vk_error(instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2586 for (uint32_t i = 0; i < num_sems; i++) {
2587 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2589 if (sem->temp_syncobj) {
2590 counts->syncobj[syncobj_idx++] = sem->temp_syncobj;
2592 else if (sem->syncobj)
2593 counts->syncobj[syncobj_idx++] = sem->syncobj;
2596 counts->sem[sem_idx++] = sem->sem;
2600 if (_fence != VK_NULL_HANDLE) {
2601 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2602 if (fence->temp_syncobj)
2603 counts->syncobj[syncobj_idx++] = fence->temp_syncobj;
2604 else if (fence->syncobj)
2605 counts->syncobj[syncobj_idx++] = fence->syncobj;
2612 radv_free_sem_info(struct radv_winsys_sem_info *sem_info)
2614 free(sem_info->wait.syncobj);
2615 free(sem_info->wait.sem);
2616 free(sem_info->signal.syncobj);
2617 free(sem_info->signal.sem);
2621 static void radv_free_temp_syncobjs(struct radv_device *device,
2623 const VkSemaphore *sems)
2625 for (uint32_t i = 0; i < num_sems; i++) {
2626 RADV_FROM_HANDLE(radv_semaphore, sem, sems[i]);
2628 if (sem->temp_syncobj) {
2629 device->ws->destroy_syncobj(device->ws, sem->temp_syncobj);
2630 sem->temp_syncobj = 0;
2636 radv_alloc_sem_info(struct radv_instance *instance,
2637 struct radv_winsys_sem_info *sem_info,
2639 const VkSemaphore *wait_sems,
2640 int num_signal_sems,
2641 const VkSemaphore *signal_sems,
2645 memset(sem_info, 0, sizeof(*sem_info));
2647 ret = radv_alloc_sem_counts(instance, &sem_info->wait, num_wait_sems, wait_sems, VK_NULL_HANDLE, true);
2650 ret = radv_alloc_sem_counts(instance, &sem_info->signal, num_signal_sems, signal_sems, fence, false);
2652 radv_free_sem_info(sem_info);
2654 /* caller can override these */
2655 sem_info->cs_emit_wait = true;
2656 sem_info->cs_emit_signal = true;
2660 /* Signals fence as soon as all the work currently put on queue is done. */
2661 static VkResult radv_signal_fence(struct radv_queue *queue,
2662 struct radv_fence *fence)
2666 struct radv_winsys_sem_info sem_info;
2668 result = radv_alloc_sem_info(queue->device->instance, &sem_info, 0, NULL, 0, NULL,
2669 radv_fence_to_handle(fence));
2670 if (result != VK_SUCCESS)
2673 ret = queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
2674 &queue->device->empty_cs[queue->queue_family_index],
2675 1, NULL, NULL, &sem_info, NULL,
2676 false, fence->fence);
2677 radv_free_sem_info(&sem_info);
2680 return vk_error(queue->device->instance, VK_ERROR_DEVICE_LOST);
2685 VkResult radv_QueueSubmit(
2687 uint32_t submitCount,
2688 const VkSubmitInfo* pSubmits,
2691 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2692 RADV_FROM_HANDLE(radv_fence, fence, _fence);
2693 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
2694 struct radeon_winsys_ctx *ctx = queue->hw_ctx;
2696 uint32_t max_cs_submission = queue->device->trace_bo ? 1 : UINT32_MAX;
2697 uint32_t scratch_size = 0;
2698 uint32_t compute_scratch_size = 0;
2699 uint32_t esgs_ring_size = 0, gsvs_ring_size = 0;
2700 struct radeon_cmdbuf *initial_preamble_cs = NULL, *initial_flush_preamble_cs = NULL, *continue_preamble_cs = NULL;
2702 bool fence_emitted = false;
2703 bool tess_rings_needed = false;
2704 bool sample_positions_needed = false;
2706 /* Do this first so failing to allocate scratch buffers can't result in
2707 * partially executed submissions. */
2708 for (uint32_t i = 0; i < submitCount; i++) {
2709 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
2710 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
2711 pSubmits[i].pCommandBuffers[j]);
2713 scratch_size = MAX2(scratch_size, cmd_buffer->scratch_size_needed);
2714 compute_scratch_size = MAX2(compute_scratch_size,
2715 cmd_buffer->compute_scratch_size_needed);
2716 esgs_ring_size = MAX2(esgs_ring_size, cmd_buffer->esgs_ring_size_needed);
2717 gsvs_ring_size = MAX2(gsvs_ring_size, cmd_buffer->gsvs_ring_size_needed);
2718 tess_rings_needed |= cmd_buffer->tess_rings_needed;
2719 sample_positions_needed |= cmd_buffer->sample_positions_needed;
2723 result = radv_get_preamble_cs(queue, scratch_size, compute_scratch_size,
2724 esgs_ring_size, gsvs_ring_size, tess_rings_needed,
2725 sample_positions_needed, &initial_flush_preamble_cs,
2726 &initial_preamble_cs, &continue_preamble_cs);
2727 if (result != VK_SUCCESS)
2730 for (uint32_t i = 0; i < submitCount; i++) {
2731 struct radeon_cmdbuf **cs_array;
2732 bool do_flush = !i || pSubmits[i].pWaitDstStageMask;
2733 bool can_patch = true;
2735 struct radv_winsys_sem_info sem_info;
2737 result = radv_alloc_sem_info(queue->device->instance,
2739 pSubmits[i].waitSemaphoreCount,
2740 pSubmits[i].pWaitSemaphores,
2741 pSubmits[i].signalSemaphoreCount,
2742 pSubmits[i].pSignalSemaphores,
2744 if (result != VK_SUCCESS)
2747 if (!pSubmits[i].commandBufferCount) {
2748 if (pSubmits[i].waitSemaphoreCount || pSubmits[i].signalSemaphoreCount) {
2749 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx,
2750 &queue->device->empty_cs[queue->queue_family_index],
2755 radv_loge("failed to submit CS %d\n", i);
2758 fence_emitted = true;
2760 radv_free_sem_info(&sem_info);
2764 cs_array = malloc(sizeof(struct radeon_cmdbuf *) *
2765 (pSubmits[i].commandBufferCount));
2767 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
2768 RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer,
2769 pSubmits[i].pCommandBuffers[j]);
2770 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
2772 cs_array[j] = cmd_buffer->cs;
2773 if ((cmd_buffer->usage_flags & VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT))
2776 cmd_buffer->status = RADV_CMD_BUFFER_STATUS_PENDING;
2779 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j += advance) {
2780 struct radeon_cmdbuf *initial_preamble = (do_flush && !j) ? initial_flush_preamble_cs : initial_preamble_cs;
2781 const struct radv_winsys_bo_list *bo_list = NULL;
2783 advance = MIN2(max_cs_submission,
2784 pSubmits[i].commandBufferCount - j);
2786 if (queue->device->trace_bo)
2787 *queue->device->trace_id_ptr = 0;
2789 sem_info.cs_emit_wait = j == 0;
2790 sem_info.cs_emit_signal = j + advance == pSubmits[i].commandBufferCount;
2792 if (unlikely(queue->device->use_global_bo_list)) {
2793 pthread_mutex_lock(&queue->device->bo_list.mutex);
2794 bo_list = &queue->device->bo_list.list;
2797 ret = queue->device->ws->cs_submit(ctx, queue->queue_idx, cs_array + j,
2798 advance, initial_preamble, continue_preamble_cs,
2800 can_patch, base_fence);
2802 if (unlikely(queue->device->use_global_bo_list))
2803 pthread_mutex_unlock(&queue->device->bo_list.mutex);
2806 radv_loge("failed to submit CS %d\n", i);
2809 fence_emitted = true;
2810 if (queue->device->trace_bo) {
2811 radv_check_gpu_hangs(queue, cs_array[j]);
2815 radv_free_temp_syncobjs(queue->device,
2816 pSubmits[i].waitSemaphoreCount,
2817 pSubmits[i].pWaitSemaphores);
2818 radv_free_sem_info(&sem_info);
2823 if (!fence_emitted) {
2824 result = radv_signal_fence(queue, fence);
2825 if (result != VK_SUCCESS)
2828 fence->submitted = true;
2834 VkResult radv_QueueWaitIdle(
2837 RADV_FROM_HANDLE(radv_queue, queue, _queue);
2839 queue->device->ws->ctx_wait_idle(queue->hw_ctx,
2840 radv_queue_family_to_ring(queue->queue_family_index),
2845 VkResult radv_DeviceWaitIdle(
2848 RADV_FROM_HANDLE(radv_device, device, _device);
2850 for (unsigned i = 0; i < RADV_MAX_QUEUE_FAMILIES; i++) {
2851 for (unsigned q = 0; q < device->queue_count[i]; q++) {
2852 radv_QueueWaitIdle(radv_queue_to_handle(&device->queues[i][q]));
2858 VkResult radv_EnumerateInstanceExtensionProperties(
2859 const char* pLayerName,
2860 uint32_t* pPropertyCount,
2861 VkExtensionProperties* pProperties)
2863 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2865 for (int i = 0; i < RADV_INSTANCE_EXTENSION_COUNT; i++) {
2866 if (radv_supported_instance_extensions.extensions[i]) {
2867 vk_outarray_append(&out, prop) {
2868 *prop = radv_instance_extensions[i];
2873 return vk_outarray_status(&out);
2876 VkResult radv_EnumerateDeviceExtensionProperties(
2877 VkPhysicalDevice physicalDevice,
2878 const char* pLayerName,
2879 uint32_t* pPropertyCount,
2880 VkExtensionProperties* pProperties)
2882 RADV_FROM_HANDLE(radv_physical_device, device, physicalDevice);
2883 VK_OUTARRAY_MAKE(out, pProperties, pPropertyCount);
2885 for (int i = 0; i < RADV_DEVICE_EXTENSION_COUNT; i++) {
2886 if (device->supported_extensions.extensions[i]) {
2887 vk_outarray_append(&out, prop) {
2888 *prop = radv_device_extensions[i];
2893 return vk_outarray_status(&out);
2896 PFN_vkVoidFunction radv_GetInstanceProcAddr(
2897 VkInstance _instance,
2900 RADV_FROM_HANDLE(radv_instance, instance, _instance);
2902 return radv_lookup_entrypoint_checked(pName,
2903 instance ? instance->apiVersion : 0,
2904 instance ? &instance->enabled_extensions : NULL,
2908 /* The loader wants us to expose a second GetInstanceProcAddr function
2909 * to work around certain LD_PRELOAD issues seen in apps.
2912 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2913 VkInstance instance,
2917 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
2918 VkInstance instance,
2921 return radv_GetInstanceProcAddr(instance, pName);
2924 PFN_vkVoidFunction radv_GetDeviceProcAddr(
2928 RADV_FROM_HANDLE(radv_device, device, _device);
2930 return radv_lookup_entrypoint_checked(pName,
2931 device->instance->apiVersion,
2932 &device->instance->enabled_extensions,
2933 &device->enabled_extensions);
2936 bool radv_get_memory_fd(struct radv_device *device,
2937 struct radv_device_memory *memory,
2940 struct radeon_bo_metadata metadata;
2942 if (memory->image) {
2943 radv_init_metadata(device, memory->image, &metadata);
2944 device->ws->buffer_set_metadata(memory->bo, &metadata);
2947 return device->ws->buffer_get_fd(device->ws, memory->bo,
2951 static VkResult radv_alloc_memory(struct radv_device *device,
2952 const VkMemoryAllocateInfo* pAllocateInfo,
2953 const VkAllocationCallbacks* pAllocator,
2954 VkDeviceMemory* pMem)
2956 struct radv_device_memory *mem;
2958 enum radeon_bo_domain domain;
2960 enum radv_mem_type mem_type_index = device->physical_device->mem_type_indices[pAllocateInfo->memoryTypeIndex];
2962 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
2964 if (pAllocateInfo->allocationSize == 0) {
2965 /* Apparently, this is allowed */
2966 *pMem = VK_NULL_HANDLE;
2970 const VkImportMemoryFdInfoKHR *import_info =
2971 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_FD_INFO_KHR);
2972 const VkMemoryDedicatedAllocateInfoKHR *dedicate_info =
2973 vk_find_struct_const(pAllocateInfo->pNext, MEMORY_DEDICATED_ALLOCATE_INFO_KHR);
2974 const VkExportMemoryAllocateInfoKHR *export_info =
2975 vk_find_struct_const(pAllocateInfo->pNext, EXPORT_MEMORY_ALLOCATE_INFO_KHR);
2976 const VkImportMemoryHostPointerInfoEXT *host_ptr_info =
2977 vk_find_struct_const(pAllocateInfo->pNext, IMPORT_MEMORY_HOST_POINTER_INFO_EXT);
2979 const struct wsi_memory_allocate_info *wsi_info =
2980 vk_find_struct_const(pAllocateInfo->pNext, WSI_MEMORY_ALLOCATE_INFO_MESA);
2982 mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
2983 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
2985 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
2987 if (wsi_info && wsi_info->implicit_sync)
2988 flags |= RADEON_FLAG_IMPLICIT_SYNC;
2990 if (dedicate_info) {
2991 mem->image = radv_image_from_handle(dedicate_info->image);
2992 mem->buffer = radv_buffer_from_handle(dedicate_info->buffer);
2998 mem->user_ptr = NULL;
3001 assert(import_info->handleType ==
3002 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
3003 import_info->handleType ==
3004 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
3005 mem->bo = device->ws->buffer_from_fd(device->ws, import_info->fd,
3008 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
3011 close(import_info->fd);
3013 } else if (host_ptr_info) {
3014 assert(host_ptr_info->handleType == VK_EXTERNAL_MEMORY_HANDLE_TYPE_HOST_ALLOCATION_BIT_EXT);
3015 assert(mem_type_index == RADV_MEM_TYPE_GTT_CACHED);
3016 mem->bo = device->ws->buffer_from_ptr(device->ws, host_ptr_info->pHostPointer,
3017 pAllocateInfo->allocationSize);
3019 result = VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR;
3022 mem->user_ptr = host_ptr_info->pHostPointer;
3025 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
3026 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE ||
3027 mem_type_index == RADV_MEM_TYPE_GTT_CACHED)
3028 domain = RADEON_DOMAIN_GTT;
3030 domain = RADEON_DOMAIN_VRAM;
3032 if (mem_type_index == RADV_MEM_TYPE_VRAM)
3033 flags |= RADEON_FLAG_NO_CPU_ACCESS;
3035 flags |= RADEON_FLAG_CPU_ACCESS;
3037 if (mem_type_index == RADV_MEM_TYPE_GTT_WRITE_COMBINE)
3038 flags |= RADEON_FLAG_GTT_WC;
3040 if (!dedicate_info && !import_info && (!export_info || !export_info->handleTypes))
3041 flags |= RADEON_FLAG_NO_INTERPROCESS_SHARING;
3043 mem->bo = device->ws->buffer_create(device->ws, alloc_size, device->physical_device->rad_info.max_alignment,
3047 result = VK_ERROR_OUT_OF_DEVICE_MEMORY;
3050 mem->type_index = mem_type_index;
3053 result = radv_bo_list_add(device, mem->bo);
3054 if (result != VK_SUCCESS)
3057 *pMem = radv_device_memory_to_handle(mem);
3062 device->ws->buffer_destroy(mem->bo);
3064 vk_free2(&device->alloc, pAllocator, mem);
3069 VkResult radv_AllocateMemory(
3071 const VkMemoryAllocateInfo* pAllocateInfo,
3072 const VkAllocationCallbacks* pAllocator,
3073 VkDeviceMemory* pMem)
3075 RADV_FROM_HANDLE(radv_device, device, _device);
3076 return radv_alloc_memory(device, pAllocateInfo, pAllocator, pMem);
3079 void radv_FreeMemory(
3081 VkDeviceMemory _mem,
3082 const VkAllocationCallbacks* pAllocator)
3084 RADV_FROM_HANDLE(radv_device, device, _device);
3085 RADV_FROM_HANDLE(radv_device_memory, mem, _mem);
3090 radv_bo_list_remove(device, mem->bo);
3091 device->ws->buffer_destroy(mem->bo);
3094 vk_free2(&device->alloc, pAllocator, mem);
3097 VkResult radv_MapMemory(
3099 VkDeviceMemory _memory,
3100 VkDeviceSize offset,
3102 VkMemoryMapFlags flags,
3105 RADV_FROM_HANDLE(radv_device, device, _device);
3106 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
3114 *ppData = mem->user_ptr;
3116 *ppData = device->ws->buffer_map(mem->bo);
3123 return vk_error(device->instance, VK_ERROR_MEMORY_MAP_FAILED);
3126 void radv_UnmapMemory(
3128 VkDeviceMemory _memory)
3130 RADV_FROM_HANDLE(radv_device, device, _device);
3131 RADV_FROM_HANDLE(radv_device_memory, mem, _memory);
3136 if (mem->user_ptr == NULL)
3137 device->ws->buffer_unmap(mem->bo);
3140 VkResult radv_FlushMappedMemoryRanges(
3142 uint32_t memoryRangeCount,
3143 const VkMappedMemoryRange* pMemoryRanges)
3148 VkResult radv_InvalidateMappedMemoryRanges(
3150 uint32_t memoryRangeCount,
3151 const VkMappedMemoryRange* pMemoryRanges)
3156 void radv_GetBufferMemoryRequirements(
3159 VkMemoryRequirements* pMemoryRequirements)
3161 RADV_FROM_HANDLE(radv_device, device, _device);
3162 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3164 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
3166 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
3167 pMemoryRequirements->alignment = 4096;
3169 pMemoryRequirements->alignment = 16;
3171 pMemoryRequirements->size = align64(buffer->size, pMemoryRequirements->alignment);
3174 void radv_GetBufferMemoryRequirements2(
3176 const VkBufferMemoryRequirementsInfo2KHR* pInfo,
3177 VkMemoryRequirements2KHR* pMemoryRequirements)
3179 radv_GetBufferMemoryRequirements(device, pInfo->buffer,
3180 &pMemoryRequirements->memoryRequirements);
3181 RADV_FROM_HANDLE(radv_buffer, buffer, pInfo->buffer);
3182 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
3183 switch (ext->sType) {
3184 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
3185 VkMemoryDedicatedRequirementsKHR *req =
3186 (VkMemoryDedicatedRequirementsKHR *) ext;
3187 req->requiresDedicatedAllocation = buffer->shareable;
3188 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
3197 void radv_GetImageMemoryRequirements(
3200 VkMemoryRequirements* pMemoryRequirements)
3202 RADV_FROM_HANDLE(radv_device, device, _device);
3203 RADV_FROM_HANDLE(radv_image, image, _image);
3205 pMemoryRequirements->memoryTypeBits = (1u << device->physical_device->memory_properties.memoryTypeCount) - 1;
3207 pMemoryRequirements->size = image->size;
3208 pMemoryRequirements->alignment = image->alignment;
3211 void radv_GetImageMemoryRequirements2(
3213 const VkImageMemoryRequirementsInfo2KHR* pInfo,
3214 VkMemoryRequirements2KHR* pMemoryRequirements)
3216 radv_GetImageMemoryRequirements(device, pInfo->image,
3217 &pMemoryRequirements->memoryRequirements);
3219 RADV_FROM_HANDLE(radv_image, image, pInfo->image);
3221 vk_foreach_struct(ext, pMemoryRequirements->pNext) {
3222 switch (ext->sType) {
3223 case VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS_KHR: {
3224 VkMemoryDedicatedRequirementsKHR *req =
3225 (VkMemoryDedicatedRequirementsKHR *) ext;
3226 req->requiresDedicatedAllocation = image->shareable;
3227 req->prefersDedicatedAllocation = req->requiresDedicatedAllocation;
3236 void radv_GetImageSparseMemoryRequirements(
3239 uint32_t* pSparseMemoryRequirementCount,
3240 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
3245 void radv_GetImageSparseMemoryRequirements2(
3247 const VkImageSparseMemoryRequirementsInfo2KHR* pInfo,
3248 uint32_t* pSparseMemoryRequirementCount,
3249 VkSparseImageMemoryRequirements2KHR* pSparseMemoryRequirements)
3254 void radv_GetDeviceMemoryCommitment(
3256 VkDeviceMemory memory,
3257 VkDeviceSize* pCommittedMemoryInBytes)
3259 *pCommittedMemoryInBytes = 0;
3262 VkResult radv_BindBufferMemory2(VkDevice device,
3263 uint32_t bindInfoCount,
3264 const VkBindBufferMemoryInfoKHR *pBindInfos)
3266 for (uint32_t i = 0; i < bindInfoCount; ++i) {
3267 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
3268 RADV_FROM_HANDLE(radv_buffer, buffer, pBindInfos[i].buffer);
3271 buffer->bo = mem->bo;
3272 buffer->offset = pBindInfos[i].memoryOffset;
3280 VkResult radv_BindBufferMemory(
3283 VkDeviceMemory memory,
3284 VkDeviceSize memoryOffset)
3286 const VkBindBufferMemoryInfoKHR info = {
3287 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
3290 .memoryOffset = memoryOffset
3293 return radv_BindBufferMemory2(device, 1, &info);
3296 VkResult radv_BindImageMemory2(VkDevice device,
3297 uint32_t bindInfoCount,
3298 const VkBindImageMemoryInfoKHR *pBindInfos)
3300 for (uint32_t i = 0; i < bindInfoCount; ++i) {
3301 RADV_FROM_HANDLE(radv_device_memory, mem, pBindInfos[i].memory);
3302 RADV_FROM_HANDLE(radv_image, image, pBindInfos[i].image);
3305 image->bo = mem->bo;
3306 image->offset = pBindInfos[i].memoryOffset;
3316 VkResult radv_BindImageMemory(
3319 VkDeviceMemory memory,
3320 VkDeviceSize memoryOffset)
3322 const VkBindImageMemoryInfoKHR info = {
3323 .sType = VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO_KHR,
3326 .memoryOffset = memoryOffset
3329 return radv_BindImageMemory2(device, 1, &info);
3334 radv_sparse_buffer_bind_memory(struct radv_device *device,
3335 const VkSparseBufferMemoryBindInfo *bind)
3337 RADV_FROM_HANDLE(radv_buffer, buffer, bind->buffer);
3339 for (uint32_t i = 0; i < bind->bindCount; ++i) {
3340 struct radv_device_memory *mem = NULL;
3342 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
3343 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
3345 device->ws->buffer_virtual_bind(buffer->bo,
3346 bind->pBinds[i].resourceOffset,
3347 bind->pBinds[i].size,
3348 mem ? mem->bo : NULL,
3349 bind->pBinds[i].memoryOffset);
3354 radv_sparse_image_opaque_bind_memory(struct radv_device *device,
3355 const VkSparseImageOpaqueMemoryBindInfo *bind)
3357 RADV_FROM_HANDLE(radv_image, image, bind->image);
3359 for (uint32_t i = 0; i < bind->bindCount; ++i) {
3360 struct radv_device_memory *mem = NULL;
3362 if (bind->pBinds[i].memory != VK_NULL_HANDLE)
3363 mem = radv_device_memory_from_handle(bind->pBinds[i].memory);
3365 device->ws->buffer_virtual_bind(image->bo,
3366 bind->pBinds[i].resourceOffset,
3367 bind->pBinds[i].size,
3368 mem ? mem->bo : NULL,
3369 bind->pBinds[i].memoryOffset);
3373 VkResult radv_QueueBindSparse(
3375 uint32_t bindInfoCount,
3376 const VkBindSparseInfo* pBindInfo,
3379 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3380 RADV_FROM_HANDLE(radv_queue, queue, _queue);
3381 struct radeon_winsys_fence *base_fence = fence ? fence->fence : NULL;
3382 bool fence_emitted = false;
3386 for (uint32_t i = 0; i < bindInfoCount; ++i) {
3387 struct radv_winsys_sem_info sem_info;
3388 for (uint32_t j = 0; j < pBindInfo[i].bufferBindCount; ++j) {
3389 radv_sparse_buffer_bind_memory(queue->device,
3390 pBindInfo[i].pBufferBinds + j);
3393 for (uint32_t j = 0; j < pBindInfo[i].imageOpaqueBindCount; ++j) {
3394 radv_sparse_image_opaque_bind_memory(queue->device,
3395 pBindInfo[i].pImageOpaqueBinds + j);
3399 result = radv_alloc_sem_info(queue->device->instance,
3401 pBindInfo[i].waitSemaphoreCount,
3402 pBindInfo[i].pWaitSemaphores,
3403 pBindInfo[i].signalSemaphoreCount,
3404 pBindInfo[i].pSignalSemaphores,
3406 if (result != VK_SUCCESS)
3409 if (pBindInfo[i].waitSemaphoreCount || pBindInfo[i].signalSemaphoreCount) {
3410 ret = queue->device->ws->cs_submit(queue->hw_ctx, queue->queue_idx,
3411 &queue->device->empty_cs[queue->queue_family_index],
3416 radv_loge("failed to submit CS %d\n", i);
3420 fence_emitted = true;
3422 fence->submitted = true;
3425 radv_free_sem_info(&sem_info);
3430 if (!fence_emitted) {
3431 result = radv_signal_fence(queue, fence);
3432 if (result != VK_SUCCESS)
3435 fence->submitted = true;
3441 VkResult radv_CreateFence(
3443 const VkFenceCreateInfo* pCreateInfo,
3444 const VkAllocationCallbacks* pAllocator,
3447 RADV_FROM_HANDLE(radv_device, device, _device);
3448 const VkExportFenceCreateInfoKHR *export =
3449 vk_find_struct_const(pCreateInfo->pNext, EXPORT_FENCE_CREATE_INFO_KHR);
3450 VkExternalFenceHandleTypeFlagsKHR handleTypes =
3451 export ? export->handleTypes : 0;
3453 struct radv_fence *fence = vk_alloc2(&device->alloc, pAllocator,
3455 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3458 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3460 fence->fence_wsi = NULL;
3461 fence->submitted = false;
3462 fence->signalled = !!(pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT);
3463 fence->temp_syncobj = 0;
3464 if (device->always_use_syncobj || handleTypes) {
3465 int ret = device->ws->create_syncobj(device->ws, &fence->syncobj);
3467 vk_free2(&device->alloc, pAllocator, fence);
3468 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3470 if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
3471 device->ws->signal_syncobj(device->ws, fence->syncobj);
3473 fence->fence = NULL;
3475 fence->fence = device->ws->create_fence();
3476 if (!fence->fence) {
3477 vk_free2(&device->alloc, pAllocator, fence);
3478 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3483 *pFence = radv_fence_to_handle(fence);
3488 void radv_DestroyFence(
3491 const VkAllocationCallbacks* pAllocator)
3493 RADV_FROM_HANDLE(radv_device, device, _device);
3494 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3499 if (fence->temp_syncobj)
3500 device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
3502 device->ws->destroy_syncobj(device->ws, fence->syncobj);
3504 device->ws->destroy_fence(fence->fence);
3505 if (fence->fence_wsi)
3506 fence->fence_wsi->destroy(fence->fence_wsi);
3507 vk_free2(&device->alloc, pAllocator, fence);
3511 static uint64_t radv_get_current_time()
3514 clock_gettime(CLOCK_MONOTONIC, &tv);
3515 return tv.tv_nsec + tv.tv_sec*1000000000ull;
3518 static uint64_t radv_get_absolute_timeout(uint64_t timeout)
3520 uint64_t current_time = radv_get_current_time();
3522 timeout = MIN2(UINT64_MAX - current_time, timeout);
3524 return current_time + timeout;
3528 static bool radv_all_fences_plain_and_submitted(uint32_t fenceCount, const VkFence *pFences)
3530 for (uint32_t i = 0; i < fenceCount; ++i) {
3531 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3532 if (fence->fence == NULL || fence->syncobj ||
3533 fence->temp_syncobj ||
3534 (!fence->signalled && !fence->submitted))
3540 static bool radv_all_fences_syncobj(uint32_t fenceCount, const VkFence *pFences)
3542 for (uint32_t i = 0; i < fenceCount; ++i) {
3543 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3544 if (fence->syncobj == 0 && fence->temp_syncobj == 0)
3550 VkResult radv_WaitForFences(
3552 uint32_t fenceCount,
3553 const VkFence* pFences,
3557 RADV_FROM_HANDLE(radv_device, device, _device);
3558 timeout = radv_get_absolute_timeout(timeout);
3560 if (device->always_use_syncobj &&
3561 radv_all_fences_syncobj(fenceCount, pFences))
3563 uint32_t *handles = malloc(sizeof(uint32_t) * fenceCount);
3565 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3567 for (uint32_t i = 0; i < fenceCount; ++i) {
3568 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3569 handles[i] = fence->temp_syncobj ? fence->temp_syncobj : fence->syncobj;
3572 bool success = device->ws->wait_syncobj(device->ws, handles, fenceCount, waitAll, timeout);
3575 return success ? VK_SUCCESS : VK_TIMEOUT;
3578 if (!waitAll && fenceCount > 1) {
3579 /* Not doing this by default for waitAll, due to needing to allocate twice. */
3580 if (device->physical_device->rad_info.drm_minor >= 10 && radv_all_fences_plain_and_submitted(fenceCount, pFences)) {
3581 uint32_t wait_count = 0;
3582 struct radeon_winsys_fence **fences = malloc(sizeof(struct radeon_winsys_fence *) * fenceCount);
3584 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3586 for (uint32_t i = 0; i < fenceCount; ++i) {
3587 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3589 if (fence->signalled) {
3594 fences[wait_count++] = fence->fence;
3597 bool success = device->ws->fences_wait(device->ws, fences, wait_count,
3598 waitAll, timeout - radv_get_current_time());
3601 return success ? VK_SUCCESS : VK_TIMEOUT;
3604 while(radv_get_current_time() <= timeout) {
3605 for (uint32_t i = 0; i < fenceCount; ++i) {
3606 if (radv_GetFenceStatus(_device, pFences[i]) == VK_SUCCESS)
3613 for (uint32_t i = 0; i < fenceCount; ++i) {
3614 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3615 bool expired = false;
3617 if (fence->temp_syncobj) {
3618 if (!device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, timeout))
3623 if (fence->syncobj) {
3624 if (!device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, timeout))
3629 if (fence->signalled)
3633 if (!fence->submitted) {
3634 while(radv_get_current_time() <= timeout &&
3638 if (!fence->submitted)
3641 /* Recheck as it may have been set by
3642 * submitting operations. */
3644 if (fence->signalled)
3648 expired = device->ws->fence_wait(device->ws,
3655 if (fence->fence_wsi) {
3656 VkResult result = fence->fence_wsi->wait(fence->fence_wsi, timeout);
3657 if (result != VK_SUCCESS)
3661 fence->signalled = true;
3667 VkResult radv_ResetFences(VkDevice _device,
3668 uint32_t fenceCount,
3669 const VkFence *pFences)
3671 RADV_FROM_HANDLE(radv_device, device, _device);
3673 for (unsigned i = 0; i < fenceCount; ++i) {
3674 RADV_FROM_HANDLE(radv_fence, fence, pFences[i]);
3675 fence->submitted = fence->signalled = false;
3677 /* Per spec, we first restore the permanent payload, and then reset, so
3678 * having a temp syncobj should not skip resetting the permanent syncobj. */
3679 if (fence->temp_syncobj) {
3680 device->ws->destroy_syncobj(device->ws, fence->temp_syncobj);
3681 fence->temp_syncobj = 0;
3684 if (fence->syncobj) {
3685 device->ws->reset_syncobj(device->ws, fence->syncobj);
3692 VkResult radv_GetFenceStatus(VkDevice _device, VkFence _fence)
3694 RADV_FROM_HANDLE(radv_device, device, _device);
3695 RADV_FROM_HANDLE(radv_fence, fence, _fence);
3697 if (fence->temp_syncobj) {
3698 bool success = device->ws->wait_syncobj(device->ws, &fence->temp_syncobj, 1, true, 0);
3699 return success ? VK_SUCCESS : VK_NOT_READY;
3702 if (fence->syncobj) {
3703 bool success = device->ws->wait_syncobj(device->ws, &fence->syncobj, 1, true, 0);
3704 return success ? VK_SUCCESS : VK_NOT_READY;
3707 if (fence->signalled)
3709 if (!fence->submitted)
3710 return VK_NOT_READY;
3712 if (!device->ws->fence_wait(device->ws, fence->fence, false, 0))
3713 return VK_NOT_READY;
3715 if (fence->fence_wsi) {
3716 VkResult result = fence->fence_wsi->wait(fence->fence_wsi, 0);
3718 if (result != VK_SUCCESS) {
3719 if (result == VK_TIMEOUT)
3720 return VK_NOT_READY;
3728 // Queue semaphore functions
3730 VkResult radv_CreateSemaphore(
3732 const VkSemaphoreCreateInfo* pCreateInfo,
3733 const VkAllocationCallbacks* pAllocator,
3734 VkSemaphore* pSemaphore)
3736 RADV_FROM_HANDLE(radv_device, device, _device);
3737 const VkExportSemaphoreCreateInfoKHR *export =
3738 vk_find_struct_const(pCreateInfo->pNext, EXPORT_SEMAPHORE_CREATE_INFO_KHR);
3739 VkExternalSemaphoreHandleTypeFlagsKHR handleTypes =
3740 export ? export->handleTypes : 0;
3742 struct radv_semaphore *sem = vk_alloc2(&device->alloc, pAllocator,
3744 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3746 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3748 sem->temp_syncobj = 0;
3749 /* create a syncobject if we are going to export this semaphore */
3750 if (device->always_use_syncobj || handleTypes) {
3751 assert (device->physical_device->rad_info.has_syncobj);
3752 int ret = device->ws->create_syncobj(device->ws, &sem->syncobj);
3754 vk_free2(&device->alloc, pAllocator, sem);
3755 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3759 sem->sem = device->ws->create_sem(device->ws);
3761 vk_free2(&device->alloc, pAllocator, sem);
3762 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3767 *pSemaphore = radv_semaphore_to_handle(sem);
3771 void radv_DestroySemaphore(
3773 VkSemaphore _semaphore,
3774 const VkAllocationCallbacks* pAllocator)
3776 RADV_FROM_HANDLE(radv_device, device, _device);
3777 RADV_FROM_HANDLE(radv_semaphore, sem, _semaphore);
3782 device->ws->destroy_syncobj(device->ws, sem->syncobj);
3784 device->ws->destroy_sem(sem->sem);
3785 vk_free2(&device->alloc, pAllocator, sem);
3788 VkResult radv_CreateEvent(
3790 const VkEventCreateInfo* pCreateInfo,
3791 const VkAllocationCallbacks* pAllocator,
3794 RADV_FROM_HANDLE(radv_device, device, _device);
3795 struct radv_event *event = vk_alloc2(&device->alloc, pAllocator,
3797 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3800 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3802 event->bo = device->ws->buffer_create(device->ws, 8, 8,
3804 RADEON_FLAG_VA_UNCACHED | RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING);
3806 vk_free2(&device->alloc, pAllocator, event);
3807 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
3810 event->map = (uint64_t*)device->ws->buffer_map(event->bo);
3812 *pEvent = radv_event_to_handle(event);
3817 void radv_DestroyEvent(
3820 const VkAllocationCallbacks* pAllocator)
3822 RADV_FROM_HANDLE(radv_device, device, _device);
3823 RADV_FROM_HANDLE(radv_event, event, _event);
3827 device->ws->buffer_destroy(event->bo);
3828 vk_free2(&device->alloc, pAllocator, event);
3831 VkResult radv_GetEventStatus(
3835 RADV_FROM_HANDLE(radv_event, event, _event);
3837 if (*event->map == 1)
3838 return VK_EVENT_SET;
3839 return VK_EVENT_RESET;
3842 VkResult radv_SetEvent(
3846 RADV_FROM_HANDLE(radv_event, event, _event);
3852 VkResult radv_ResetEvent(
3856 RADV_FROM_HANDLE(radv_event, event, _event);
3862 VkResult radv_CreateBuffer(
3864 const VkBufferCreateInfo* pCreateInfo,
3865 const VkAllocationCallbacks* pAllocator,
3868 RADV_FROM_HANDLE(radv_device, device, _device);
3869 struct radv_buffer *buffer;
3871 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
3873 buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
3874 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
3876 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
3878 buffer->size = pCreateInfo->size;
3879 buffer->usage = pCreateInfo->usage;
3882 buffer->flags = pCreateInfo->flags;
3884 buffer->shareable = vk_find_struct_const(pCreateInfo->pNext,
3885 EXTERNAL_MEMORY_BUFFER_CREATE_INFO_KHR) != NULL;
3887 if (pCreateInfo->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT) {
3888 buffer->bo = device->ws->buffer_create(device->ws,
3889 align64(buffer->size, 4096),
3890 4096, 0, RADEON_FLAG_VIRTUAL);
3892 vk_free2(&device->alloc, pAllocator, buffer);
3893 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
3897 *pBuffer = radv_buffer_to_handle(buffer);
3902 void radv_DestroyBuffer(
3905 const VkAllocationCallbacks* pAllocator)
3907 RADV_FROM_HANDLE(radv_device, device, _device);
3908 RADV_FROM_HANDLE(radv_buffer, buffer, _buffer);
3913 if (buffer->flags & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)
3914 device->ws->buffer_destroy(buffer->bo);
3916 vk_free2(&device->alloc, pAllocator, buffer);
3919 static inline unsigned
3920 si_tile_mode_index(const struct radv_image *image, unsigned level, bool stencil)
3923 return image->surface.u.legacy.stencil_tiling_index[level];
3925 return image->surface.u.legacy.tiling_index[level];
3928 static uint32_t radv_surface_max_layer_count(struct radv_image_view *iview)
3930 return iview->type == VK_IMAGE_VIEW_TYPE_3D ? iview->extent.depth : (iview->base_layer + iview->layer_count);
3934 radv_init_dcc_control_reg(struct radv_device *device,
3935 struct radv_image_view *iview)
3937 unsigned max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_256B;
3938 unsigned min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_32B;
3939 unsigned max_compressed_block_size;
3940 unsigned independent_64b_blocks;
3942 if (!radv_image_has_dcc(iview->image))
3945 if (iview->image->info.samples > 1) {
3946 if (iview->image->surface.bpe == 1)
3947 max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
3948 else if (iview->image->surface.bpe == 2)
3949 max_uncompressed_block_size = V_028C78_MAX_BLOCK_SIZE_128B;
3952 if (!device->physical_device->rad_info.has_dedicated_vram) {
3953 /* amdvlk: [min-compressed-block-size] should be set to 32 for
3954 * dGPU and 64 for APU because all of our APUs to date use
3955 * DIMMs which have a request granularity size of 64B while all
3956 * other chips have a 32B request size.
3958 min_compressed_block_size = V_028C78_MIN_BLOCK_SIZE_64B;
3961 if (iview->image->usage & (VK_IMAGE_USAGE_SAMPLED_BIT |
3962 VK_IMAGE_USAGE_TRANSFER_SRC_BIT |
3963 VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) {
3964 /* If this DCC image is potentially going to be used in texture
3965 * fetches, we need some special settings.
3967 independent_64b_blocks = 1;
3968 max_compressed_block_size = V_028C78_MAX_BLOCK_SIZE_64B;
3970 /* MAX_UNCOMPRESSED_BLOCK_SIZE must be >=
3971 * MAX_COMPRESSED_BLOCK_SIZE. Set MAX_COMPRESSED_BLOCK_SIZE as
3972 * big as possible for better compression state.
3974 independent_64b_blocks = 0;
3975 max_compressed_block_size = max_uncompressed_block_size;
3978 return S_028C78_MAX_UNCOMPRESSED_BLOCK_SIZE(max_uncompressed_block_size) |
3979 S_028C78_MAX_COMPRESSED_BLOCK_SIZE(max_compressed_block_size) |
3980 S_028C78_MIN_COMPRESSED_BLOCK_SIZE(min_compressed_block_size) |
3981 S_028C78_INDEPENDENT_64B_BLOCKS(independent_64b_blocks);
3985 radv_initialise_color_surface(struct radv_device *device,
3986 struct radv_color_buffer_info *cb,
3987 struct radv_image_view *iview)
3989 const struct vk_format_description *desc;
3990 unsigned ntype, format, swap, endian;
3991 unsigned blend_clamp = 0, blend_bypass = 0;
3993 const struct radeon_surf *surf = &iview->image->surface;
3995 desc = vk_format_description(iview->vk_format);
3997 memset(cb, 0, sizeof(*cb));
3999 /* Intensity is implemented as Red, so treat it that way. */
4000 cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
4002 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
4004 cb->cb_color_base = va >> 8;
4006 if (device->physical_device->rad_info.chip_class >= GFX9) {
4007 struct gfx9_surf_meta_flags meta;
4008 if (iview->image->dcc_offset)
4009 meta = iview->image->surface.u.gfx9.dcc;
4011 meta = iview->image->surface.u.gfx9.cmask;
4013 cb->cb_color_attrib |= S_028C74_COLOR_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
4014 S_028C74_FMASK_SW_MODE(iview->image->surface.u.gfx9.fmask.swizzle_mode) |
4015 S_028C74_RB_ALIGNED(meta.rb_aligned) |
4016 S_028C74_PIPE_ALIGNED(meta.pipe_aligned);
4018 cb->cb_color_base += iview->image->surface.u.gfx9.surf_offset >> 8;
4019 cb->cb_color_base |= iview->image->surface.tile_swizzle;
4021 const struct legacy_surf_level *level_info = &surf->u.legacy.level[iview->base_mip];
4022 unsigned pitch_tile_max, slice_tile_max, tile_mode_index;
4024 cb->cb_color_base += level_info->offset >> 8;
4025 if (level_info->mode == RADEON_SURF_MODE_2D)
4026 cb->cb_color_base |= iview->image->surface.tile_swizzle;
4028 pitch_tile_max = level_info->nblk_x / 8 - 1;
4029 slice_tile_max = (level_info->nblk_x * level_info->nblk_y) / 64 - 1;
4030 tile_mode_index = si_tile_mode_index(iview->image, iview->base_mip, false);
4032 cb->cb_color_pitch = S_028C64_TILE_MAX(pitch_tile_max);
4033 cb->cb_color_slice = S_028C68_TILE_MAX(slice_tile_max);
4034 cb->cb_color_cmask_slice = iview->image->cmask.slice_tile_max;
4036 cb->cb_color_attrib |= S_028C74_TILE_MODE_INDEX(tile_mode_index);
4038 if (radv_image_has_fmask(iview->image)) {
4039 if (device->physical_device->rad_info.chip_class >= CIK)
4040 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(iview->image->fmask.pitch_in_pixels / 8 - 1);
4041 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(iview->image->fmask.tile_mode_index);
4042 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(iview->image->fmask.slice_tile_max);
4044 /* This must be set for fast clear to work without FMASK. */
4045 if (device->physical_device->rad_info.chip_class >= CIK)
4046 cb->cb_color_pitch |= S_028C64_FMASK_TILE_MAX(pitch_tile_max);
4047 cb->cb_color_attrib |= S_028C74_FMASK_TILE_MODE_INDEX(tile_mode_index);
4048 cb->cb_color_fmask_slice = S_028C88_TILE_MAX(slice_tile_max);
4052 /* CMASK variables */
4053 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
4054 va += iview->image->cmask.offset;
4055 cb->cb_color_cmask = va >> 8;
4057 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
4058 va += iview->image->dcc_offset;
4059 cb->cb_dcc_base = va >> 8;
4060 cb->cb_dcc_base |= iview->image->surface.tile_swizzle;
4062 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
4063 cb->cb_color_view = S_028C6C_SLICE_START(iview->base_layer) |
4064 S_028C6C_SLICE_MAX(max_slice);
4066 if (iview->image->info.samples > 1) {
4067 unsigned log_samples = util_logbase2(iview->image->info.samples);
4069 cb->cb_color_attrib |= S_028C74_NUM_SAMPLES(log_samples) |
4070 S_028C74_NUM_FRAGMENTS(log_samples);
4073 if (radv_image_has_fmask(iview->image)) {
4074 va = radv_buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
4075 cb->cb_color_fmask = va >> 8;
4076 cb->cb_color_fmask |= iview->image->fmask.tile_swizzle;
4078 cb->cb_color_fmask = cb->cb_color_base;
4081 ntype = radv_translate_color_numformat(iview->vk_format,
4083 vk_format_get_first_non_void_channel(iview->vk_format));
4084 format = radv_translate_colorformat(iview->vk_format);
4085 if (format == V_028C70_COLOR_INVALID || ntype == ~0u)
4086 radv_finishme("Illegal color\n");
4087 swap = radv_translate_colorswap(iview->vk_format, FALSE);
4088 endian = radv_colorformat_endian_swap(format);
4090 /* blend clamp should be set for all NORM/SRGB types */
4091 if (ntype == V_028C70_NUMBER_UNORM ||
4092 ntype == V_028C70_NUMBER_SNORM ||
4093 ntype == V_028C70_NUMBER_SRGB)
4096 /* set blend bypass according to docs if SINT/UINT or
4097 8/24 COLOR variants */
4098 if (ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT ||
4099 format == V_028C70_COLOR_8_24 || format == V_028C70_COLOR_24_8 ||
4100 format == V_028C70_COLOR_X24_8_32_FLOAT) {
4105 if ((ntype == V_028C70_NUMBER_UINT || ntype == V_028C70_NUMBER_SINT) &&
4106 (format == V_028C70_COLOR_8 ||
4107 format == V_028C70_COLOR_8_8 ||
4108 format == V_028C70_COLOR_8_8_8_8))
4109 ->color_is_int8 = true;
4111 cb->cb_color_info = S_028C70_FORMAT(format) |
4112 S_028C70_COMP_SWAP(swap) |
4113 S_028C70_BLEND_CLAMP(blend_clamp) |
4114 S_028C70_BLEND_BYPASS(blend_bypass) |
4115 S_028C70_SIMPLE_FLOAT(1) |
4116 S_028C70_ROUND_MODE(ntype != V_028C70_NUMBER_UNORM &&
4117 ntype != V_028C70_NUMBER_SNORM &&
4118 ntype != V_028C70_NUMBER_SRGB &&
4119 format != V_028C70_COLOR_8_24 &&
4120 format != V_028C70_COLOR_24_8) |
4121 S_028C70_NUMBER_TYPE(ntype) |
4122 S_028C70_ENDIAN(endian);
4123 if (radv_image_has_fmask(iview->image)) {
4124 cb->cb_color_info |= S_028C70_COMPRESSION(1);
4125 if (device->physical_device->rad_info.chip_class == SI) {
4126 unsigned fmask_bankh = util_logbase2(iview->image->fmask.bank_height);
4127 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(fmask_bankh);
4131 if (radv_image_has_cmask(iview->image) &&
4132 !(device->instance->debug_flags & RADV_DEBUG_NO_FAST_CLEARS))
4133 cb->cb_color_info |= S_028C70_FAST_CLEAR(1);
4135 if (radv_dcc_enabled(iview->image, iview->base_mip))
4136 cb->cb_color_info |= S_028C70_DCC_ENABLE(1);
4138 cb->cb_dcc_control = radv_init_dcc_control_reg(device, iview);
4140 /* This must be set for fast clear to work without FMASK. */
4141 if (!radv_image_has_fmask(iview->image) &&
4142 device->physical_device->rad_info.chip_class == SI) {
4143 unsigned bankh = util_logbase2(iview->image->surface.u.legacy.bankh);
4144 cb->cb_color_attrib |= S_028C74_FMASK_BANK_HEIGHT(bankh);
4147 if (device->physical_device->rad_info.chip_class >= GFX9) {
4148 unsigned mip0_depth = iview->image->type == VK_IMAGE_TYPE_3D ?
4149 (iview->extent.depth - 1) : (iview->image->info.array_size - 1);
4151 cb->cb_color_view |= S_028C6C_MIP_LEVEL(iview->base_mip);
4152 cb->cb_color_attrib |= S_028C74_MIP0_DEPTH(mip0_depth) |
4153 S_028C74_RESOURCE_TYPE(iview->image->surface.u.gfx9.resource_type);
4154 cb->cb_color_attrib2 = S_028C68_MIP0_WIDTH(iview->extent.width - 1) |
4155 S_028C68_MIP0_HEIGHT(iview->extent.height - 1) |
4156 S_028C68_MAX_MIP(iview->image->info.levels - 1);
4161 radv_calc_decompress_on_z_planes(struct radv_device *device,
4162 struct radv_image_view *iview)
4164 unsigned max_zplanes = 0;
4166 assert(radv_image_is_tc_compat_htile(iview->image));
4168 if (device->physical_device->rad_info.chip_class >= GFX9) {
4169 /* Default value for 32-bit depth surfaces. */
4172 if (iview->vk_format == VK_FORMAT_D16_UNORM &&
4173 iview->image->info.samples > 1)
4176 max_zplanes = max_zplanes + 1;
4178 if (iview->vk_format == VK_FORMAT_D16_UNORM) {
4179 /* Do not enable Z plane compression for 16-bit depth
4180 * surfaces because isn't supported on GFX8. Only
4181 * 32-bit depth surfaces are supported by the hardware.
4182 * This allows to maintain shader compatibility and to
4183 * reduce the number of depth decompressions.
4187 if (iview->image->info.samples <= 1)
4189 else if (iview->image->info.samples <= 4)
4200 radv_initialise_ds_surface(struct radv_device *device,
4201 struct radv_ds_buffer_info *ds,
4202 struct radv_image_view *iview)
4204 unsigned level = iview->base_mip;
4205 unsigned format, stencil_format;
4206 uint64_t va, s_offs, z_offs;
4207 bool stencil_only = false;
4208 memset(ds, 0, sizeof(*ds));
4209 switch (iview->image->vk_format) {
4210 case VK_FORMAT_D24_UNORM_S8_UINT:
4211 case VK_FORMAT_X8_D24_UNORM_PACK32:
4212 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-24);
4213 ds->offset_scale = 2.0f;
4215 case VK_FORMAT_D16_UNORM:
4216 case VK_FORMAT_D16_UNORM_S8_UINT:
4217 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-16);
4218 ds->offset_scale = 4.0f;
4220 case VK_FORMAT_D32_SFLOAT:
4221 case VK_FORMAT_D32_SFLOAT_S8_UINT:
4222 ds->pa_su_poly_offset_db_fmt_cntl = S_028B78_POLY_OFFSET_NEG_NUM_DB_BITS(-23) |
4223 S_028B78_POLY_OFFSET_DB_IS_FLOAT_FMT(1);
4224 ds->offset_scale = 1.0f;
4226 case VK_FORMAT_S8_UINT:
4227 stencil_only = true;
4233 format = radv_translate_dbformat(iview->image->vk_format);
4234 stencil_format = iview->image->surface.has_stencil ?
4235 V_028044_STENCIL_8 : V_028044_STENCIL_INVALID;
4237 uint32_t max_slice = radv_surface_max_layer_count(iview) - 1;
4238 ds->db_depth_view = S_028008_SLICE_START(iview->base_layer) |
4239 S_028008_SLICE_MAX(max_slice);
4241 ds->db_htile_data_base = 0;
4242 ds->db_htile_surface = 0;
4244 va = radv_buffer_get_va(iview->bo) + iview->image->offset;
4245 s_offs = z_offs = va;
4247 if (device->physical_device->rad_info.chip_class >= GFX9) {
4248 assert(iview->image->surface.u.gfx9.surf_offset == 0);
4249 s_offs += iview->image->surface.u.gfx9.stencil_offset;
4251 ds->db_z_info = S_028038_FORMAT(format) |
4252 S_028038_NUM_SAMPLES(util_logbase2(iview->image->info.samples)) |
4253 S_028038_SW_MODE(iview->image->surface.u.gfx9.surf.swizzle_mode) |
4254 S_028038_MAXMIP(iview->image->info.levels - 1) |
4255 S_028038_ZRANGE_PRECISION(1);
4256 ds->db_stencil_info = S_02803C_FORMAT(stencil_format) |
4257 S_02803C_SW_MODE(iview->image->surface.u.gfx9.stencil.swizzle_mode);
4259 ds->db_z_info2 = S_028068_EPITCH(iview->image->surface.u.gfx9.surf.epitch);
4260 ds->db_stencil_info2 = S_02806C_EPITCH(iview->image->surface.u.gfx9.stencil.epitch);
4261 ds->db_depth_view |= S_028008_MIPID(level);
4263 ds->db_depth_size = S_02801C_X_MAX(iview->image->info.width - 1) |
4264 S_02801C_Y_MAX(iview->image->info.height - 1);
4266 if (radv_htile_enabled(iview->image, level)) {
4267 ds->db_z_info |= S_028038_TILE_SURFACE_ENABLE(1);
4269 if (radv_image_is_tc_compat_htile(iview->image)) {
4270 unsigned max_zplanes =
4271 radv_calc_decompress_on_z_planes(device, iview);
4273 ds->db_z_info |= S_028038_DECOMPRESS_ON_N_ZPLANES(max_zplanes) |
4274 S_028038_ITERATE_FLUSH(1);
4275 ds->db_stencil_info |= S_02803C_ITERATE_FLUSH(1);
4278 if (!iview->image->surface.has_stencil)
4279 /* Use all of the htile_buffer for depth if there's no stencil. */
4280 ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
4281 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
4282 iview->image->htile_offset;
4283 ds->db_htile_data_base = va >> 8;
4284 ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
4285 S_028ABC_PIPE_ALIGNED(iview->image->surface.u.gfx9.htile.pipe_aligned) |
4286 S_028ABC_RB_ALIGNED(iview->image->surface.u.gfx9.htile.rb_aligned);
4289 const struct legacy_surf_level *level_info = &iview->image->surface.u.legacy.level[level];
4292 level_info = &iview->image->surface.u.legacy.stencil_level[level];
4294 z_offs += iview->image->surface.u.legacy.level[level].offset;
4295 s_offs += iview->image->surface.u.legacy.stencil_level[level].offset;
4297 ds->db_depth_info = S_02803C_ADDR5_SWIZZLE_MASK(!radv_image_is_tc_compat_htile(iview->image));
4298 ds->db_z_info = S_028040_FORMAT(format) | S_028040_ZRANGE_PRECISION(1);
4299 ds->db_stencil_info = S_028044_FORMAT(stencil_format);
4301 if (iview->image->info.samples > 1)
4302 ds->db_z_info |= S_028040_NUM_SAMPLES(util_logbase2(iview->image->info.samples));
4304 if (device->physical_device->rad_info.chip_class >= CIK) {
4305 struct radeon_info *info = &device->physical_device->rad_info;
4306 unsigned tiling_index = iview->image->surface.u.legacy.tiling_index[level];
4307 unsigned stencil_index = iview->image->surface.u.legacy.stencil_tiling_index[level];
4308 unsigned macro_index = iview->image->surface.u.legacy.macro_tile_index;
4309 unsigned tile_mode = info->si_tile_mode_array[tiling_index];
4310 unsigned stencil_tile_mode = info->si_tile_mode_array[stencil_index];
4311 unsigned macro_mode = info->cik_macrotile_mode_array[macro_index];
4314 tile_mode = stencil_tile_mode;
4316 ds->db_depth_info |=
4317 S_02803C_ARRAY_MODE(G_009910_ARRAY_MODE(tile_mode)) |
4318 S_02803C_PIPE_CONFIG(G_009910_PIPE_CONFIG(tile_mode)) |
4319 S_02803C_BANK_WIDTH(G_009990_BANK_WIDTH(macro_mode)) |
4320 S_02803C_BANK_HEIGHT(G_009990_BANK_HEIGHT(macro_mode)) |
4321 S_02803C_MACRO_TILE_ASPECT(G_009990_MACRO_TILE_ASPECT(macro_mode)) |
4322 S_02803C_NUM_BANKS(G_009990_NUM_BANKS(macro_mode));
4323 ds->db_z_info |= S_028040_TILE_SPLIT(G_009910_TILE_SPLIT(tile_mode));
4324 ds->db_stencil_info |= S_028044_TILE_SPLIT(G_009910_TILE_SPLIT(stencil_tile_mode));
4326 unsigned tile_mode_index = si_tile_mode_index(iview->image, level, false);
4327 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
4328 tile_mode_index = si_tile_mode_index(iview->image, level, true);
4329 ds->db_stencil_info |= S_028044_TILE_MODE_INDEX(tile_mode_index);
4331 ds->db_z_info |= S_028040_TILE_MODE_INDEX(tile_mode_index);
4334 ds->db_depth_size = S_028058_PITCH_TILE_MAX((level_info->nblk_x / 8) - 1) |
4335 S_028058_HEIGHT_TILE_MAX((level_info->nblk_y / 8) - 1);
4336 ds->db_depth_slice = S_02805C_SLICE_TILE_MAX((level_info->nblk_x * level_info->nblk_y) / 64 - 1);
4338 if (radv_htile_enabled(iview->image, level)) {
4339 ds->db_z_info |= S_028040_TILE_SURFACE_ENABLE(1);
4341 if (!iview->image->surface.has_stencil &&
4342 !radv_image_is_tc_compat_htile(iview->image))
4343 /* Use all of the htile_buffer for depth if there's no stencil. */
4344 ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
4346 va = radv_buffer_get_va(iview->bo) + iview->image->offset +
4347 iview->image->htile_offset;
4348 ds->db_htile_data_base = va >> 8;
4349 ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
4351 if (radv_image_is_tc_compat_htile(iview->image)) {
4352 unsigned max_zplanes =
4353 radv_calc_decompress_on_z_planes(device, iview);
4355 ds->db_htile_surface |= S_028ABC_TC_COMPATIBLE(1);
4356 ds->db_z_info |= S_028040_DECOMPRESS_ON_N_ZPLANES(max_zplanes);
4361 ds->db_z_read_base = ds->db_z_write_base = z_offs >> 8;
4362 ds->db_stencil_read_base = ds->db_stencil_write_base = s_offs >> 8;
4365 VkResult radv_CreateFramebuffer(
4367 const VkFramebufferCreateInfo* pCreateInfo,
4368 const VkAllocationCallbacks* pAllocator,
4369 VkFramebuffer* pFramebuffer)
4371 RADV_FROM_HANDLE(radv_device, device, _device);
4372 struct radv_framebuffer *framebuffer;
4374 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
4376 size_t size = sizeof(*framebuffer) +
4377 sizeof(struct radv_attachment_info) * pCreateInfo->attachmentCount;
4378 framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
4379 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4380 if (framebuffer == NULL)
4381 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4383 framebuffer->attachment_count = pCreateInfo->attachmentCount;
4384 framebuffer->width = pCreateInfo->width;
4385 framebuffer->height = pCreateInfo->height;
4386 framebuffer->layers = pCreateInfo->layers;
4387 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
4388 VkImageView _iview = pCreateInfo->pAttachments[i];
4389 struct radv_image_view *iview = radv_image_view_from_handle(_iview);
4390 framebuffer->attachments[i].attachment = iview;
4391 if (iview->aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
4392 radv_initialise_color_surface(device, &framebuffer->attachments[i].cb, iview);
4393 } else if (iview->aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
4394 radv_initialise_ds_surface(device, &framebuffer->attachments[i].ds, iview);
4396 framebuffer->width = MIN2(framebuffer->width, iview->extent.width);
4397 framebuffer->height = MIN2(framebuffer->height, iview->extent.height);
4398 framebuffer->layers = MIN2(framebuffer->layers, radv_surface_max_layer_count(iview));
4401 *pFramebuffer = radv_framebuffer_to_handle(framebuffer);
4405 void radv_DestroyFramebuffer(
4408 const VkAllocationCallbacks* pAllocator)
4410 RADV_FROM_HANDLE(radv_device, device, _device);
4411 RADV_FROM_HANDLE(radv_framebuffer, fb, _fb);
4415 vk_free2(&device->alloc, pAllocator, fb);
4418 static unsigned radv_tex_wrap(VkSamplerAddressMode address_mode)
4420 switch (address_mode) {
4421 case VK_SAMPLER_ADDRESS_MODE_REPEAT:
4422 return V_008F30_SQ_TEX_WRAP;
4423 case VK_SAMPLER_ADDRESS_MODE_MIRRORED_REPEAT:
4424 return V_008F30_SQ_TEX_MIRROR;
4425 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_EDGE:
4426 return V_008F30_SQ_TEX_CLAMP_LAST_TEXEL;
4427 case VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER:
4428 return V_008F30_SQ_TEX_CLAMP_BORDER;
4429 case VK_SAMPLER_ADDRESS_MODE_MIRROR_CLAMP_TO_EDGE:
4430 return V_008F30_SQ_TEX_MIRROR_ONCE_LAST_TEXEL;
4432 unreachable("illegal tex wrap mode");
4438 radv_tex_compare(VkCompareOp op)
4441 case VK_COMPARE_OP_NEVER:
4442 return V_008F30_SQ_TEX_DEPTH_COMPARE_NEVER;
4443 case VK_COMPARE_OP_LESS:
4444 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESS;
4445 case VK_COMPARE_OP_EQUAL:
4446 return V_008F30_SQ_TEX_DEPTH_COMPARE_EQUAL;
4447 case VK_COMPARE_OP_LESS_OR_EQUAL:
4448 return V_008F30_SQ_TEX_DEPTH_COMPARE_LESSEQUAL;
4449 case VK_COMPARE_OP_GREATER:
4450 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATER;
4451 case VK_COMPARE_OP_NOT_EQUAL:
4452 return V_008F30_SQ_TEX_DEPTH_COMPARE_NOTEQUAL;
4453 case VK_COMPARE_OP_GREATER_OR_EQUAL:
4454 return V_008F30_SQ_TEX_DEPTH_COMPARE_GREATEREQUAL;
4455 case VK_COMPARE_OP_ALWAYS:
4456 return V_008F30_SQ_TEX_DEPTH_COMPARE_ALWAYS;
4458 unreachable("illegal compare mode");
4464 radv_tex_filter(VkFilter filter, unsigned max_ansio)
4467 case VK_FILTER_NEAREST:
4468 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_POINT :
4469 V_008F38_SQ_TEX_XY_FILTER_POINT);
4470 case VK_FILTER_LINEAR:
4471 return (max_ansio > 1 ? V_008F38_SQ_TEX_XY_FILTER_ANISO_BILINEAR :
4472 V_008F38_SQ_TEX_XY_FILTER_BILINEAR);
4473 case VK_FILTER_CUBIC_IMG:
4475 fprintf(stderr, "illegal texture filter");
4481 radv_tex_mipfilter(VkSamplerMipmapMode mode)
4484 case VK_SAMPLER_MIPMAP_MODE_NEAREST:
4485 return V_008F38_SQ_TEX_Z_FILTER_POINT;
4486 case VK_SAMPLER_MIPMAP_MODE_LINEAR:
4487 return V_008F38_SQ_TEX_Z_FILTER_LINEAR;
4489 return V_008F38_SQ_TEX_Z_FILTER_NONE;
4494 radv_tex_bordercolor(VkBorderColor bcolor)
4497 case VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK:
4498 case VK_BORDER_COLOR_INT_TRANSPARENT_BLACK:
4499 return V_008F3C_SQ_TEX_BORDER_COLOR_TRANS_BLACK;
4500 case VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK:
4501 case VK_BORDER_COLOR_INT_OPAQUE_BLACK:
4502 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_BLACK;
4503 case VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE:
4504 case VK_BORDER_COLOR_INT_OPAQUE_WHITE:
4505 return V_008F3C_SQ_TEX_BORDER_COLOR_OPAQUE_WHITE;
4513 radv_tex_aniso_filter(unsigned filter)
4527 radv_tex_filter_mode(VkSamplerReductionModeEXT mode)
4530 case VK_SAMPLER_REDUCTION_MODE_WEIGHTED_AVERAGE_EXT:
4531 return SQ_IMG_FILTER_MODE_BLEND;
4532 case VK_SAMPLER_REDUCTION_MODE_MIN_EXT:
4533 return SQ_IMG_FILTER_MODE_MIN;
4534 case VK_SAMPLER_REDUCTION_MODE_MAX_EXT:
4535 return SQ_IMG_FILTER_MODE_MAX;
4543 radv_get_max_anisotropy(struct radv_device *device,
4544 const VkSamplerCreateInfo *pCreateInfo)
4546 if (device->force_aniso >= 0)
4547 return device->force_aniso;
4549 if (pCreateInfo->anisotropyEnable &&
4550 pCreateInfo->maxAnisotropy > 1.0f)
4551 return (uint32_t)pCreateInfo->maxAnisotropy;
4557 radv_init_sampler(struct radv_device *device,
4558 struct radv_sampler *sampler,
4559 const VkSamplerCreateInfo *pCreateInfo)
4561 uint32_t max_aniso = radv_get_max_anisotropy(device, pCreateInfo);
4562 uint32_t max_aniso_ratio = radv_tex_aniso_filter(max_aniso);
4563 bool is_vi = (device->physical_device->rad_info.chip_class >= VI);
4564 unsigned filter_mode = SQ_IMG_FILTER_MODE_BLEND;
4566 const struct VkSamplerReductionModeCreateInfoEXT *sampler_reduction =
4567 vk_find_struct_const(pCreateInfo->pNext,
4568 SAMPLER_REDUCTION_MODE_CREATE_INFO_EXT);
4569 if (sampler_reduction)
4570 filter_mode = radv_tex_filter_mode(sampler_reduction->reductionMode);
4572 sampler->state[0] = (S_008F30_CLAMP_X(radv_tex_wrap(pCreateInfo->addressModeU)) |
4573 S_008F30_CLAMP_Y(radv_tex_wrap(pCreateInfo->addressModeV)) |
4574 S_008F30_CLAMP_Z(radv_tex_wrap(pCreateInfo->addressModeW)) |
4575 S_008F30_MAX_ANISO_RATIO(max_aniso_ratio) |
4576 S_008F30_DEPTH_COMPARE_FUNC(radv_tex_compare(pCreateInfo->compareOp)) |
4577 S_008F30_FORCE_UNNORMALIZED(pCreateInfo->unnormalizedCoordinates ? 1 : 0) |
4578 S_008F30_ANISO_THRESHOLD(max_aniso_ratio >> 1) |
4579 S_008F30_ANISO_BIAS(max_aniso_ratio) |
4580 S_008F30_DISABLE_CUBE_WRAP(0) |
4581 S_008F30_COMPAT_MODE(is_vi) |
4582 S_008F30_FILTER_MODE(filter_mode));
4583 sampler->state[1] = (S_008F34_MIN_LOD(S_FIXED(CLAMP(pCreateInfo->minLod, 0, 15), 8)) |
4584 S_008F34_MAX_LOD(S_FIXED(CLAMP(pCreateInfo->maxLod, 0, 15), 8)) |
4585 S_008F34_PERF_MIP(max_aniso_ratio ? max_aniso_ratio + 6 : 0));
4586 sampler->state[2] = (S_008F38_LOD_BIAS(S_FIXED(CLAMP(pCreateInfo->mipLodBias, -16, 16), 8)) |
4587 S_008F38_XY_MAG_FILTER(radv_tex_filter(pCreateInfo->magFilter, max_aniso)) |
4588 S_008F38_XY_MIN_FILTER(radv_tex_filter(pCreateInfo->minFilter, max_aniso)) |
4589 S_008F38_MIP_FILTER(radv_tex_mipfilter(pCreateInfo->mipmapMode)) |
4590 S_008F38_MIP_POINT_PRECLAMP(0) |
4591 S_008F38_DISABLE_LSB_CEIL(device->physical_device->rad_info.chip_class <= VI) |
4592 S_008F38_FILTER_PREC_FIX(1) |
4593 S_008F38_ANISO_OVERRIDE(is_vi));
4594 sampler->state[3] = (S_008F3C_BORDER_COLOR_PTR(0) |
4595 S_008F3C_BORDER_COLOR_TYPE(radv_tex_bordercolor(pCreateInfo->borderColor)));
4598 VkResult radv_CreateSampler(
4600 const VkSamplerCreateInfo* pCreateInfo,
4601 const VkAllocationCallbacks* pAllocator,
4602 VkSampler* pSampler)
4604 RADV_FROM_HANDLE(radv_device, device, _device);
4605 struct radv_sampler *sampler;
4607 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO);
4609 sampler = vk_alloc2(&device->alloc, pAllocator, sizeof(*sampler), 8,
4610 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
4612 return vk_error(device->instance, VK_ERROR_OUT_OF_HOST_MEMORY);
4614 radv_init_sampler(device, sampler, pCreateInfo);
4615 *pSampler = radv_sampler_to_handle(sampler);
4620 void radv_DestroySampler(
4623 const VkAllocationCallbacks* pAllocator)
4625 RADV_FROM_HANDLE(radv_device, device, _device);
4626 RADV_FROM_HANDLE(radv_sampler, sampler, _sampler);
4630 vk_free2(&device->alloc, pAllocator, sampler);
4633 /* vk_icd.h does not declare this function, so we declare it here to
4634 * suppress Wmissing-prototypes.
4636 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
4637 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion);
4639 PUBLIC VKAPI_ATTR VkResult VKAPI_CALL
4640 vk_icdNegotiateLoaderICDInterfaceVersion(uint32_t *pSupportedVersion)
4642 /* For the full details on loader interface versioning, see
4643 * <https://github.com/KhronosGroup/Vulkan-LoaderAndValidationLayers/blob/master/loader/LoaderAndLayerInterface.md>.
4644 * What follows is a condensed summary, to help you navigate the large and
4645 * confusing official doc.
4647 * - Loader interface v0 is incompatible with later versions. We don't
4650 * - In loader interface v1:
4651 * - The first ICD entrypoint called by the loader is
4652 * vk_icdGetInstanceProcAddr(). The ICD must statically expose this
4654 * - The ICD must statically expose no other Vulkan symbol unless it is
4655 * linked with -Bsymbolic.
4656 * - Each dispatchable Vulkan handle created by the ICD must be
4657 * a pointer to a struct whose first member is VK_LOADER_DATA. The
4658 * ICD must initialize VK_LOADER_DATA.loadMagic to ICD_LOADER_MAGIC.
4659 * - The loader implements vkCreate{PLATFORM}SurfaceKHR() and
4660 * vkDestroySurfaceKHR(). The ICD must be capable of working with
4661 * such loader-managed surfaces.
4663 * - Loader interface v2 differs from v1 in:
4664 * - The first ICD entrypoint called by the loader is
4665 * vk_icdNegotiateLoaderICDInterfaceVersion(). The ICD must
4666 * statically expose this entrypoint.
4668 * - Loader interface v3 differs from v2 in:
4669 * - The ICD must implement vkCreate{PLATFORM}SurfaceKHR(),
4670 * vkDestroySurfaceKHR(), and other API which uses VKSurfaceKHR,
4671 * because the loader no longer does so.
4673 *pSupportedVersion = MIN2(*pSupportedVersion, 3u);
4677 VkResult radv_GetMemoryFdKHR(VkDevice _device,
4678 const VkMemoryGetFdInfoKHR *pGetFdInfo,
4681 RADV_FROM_HANDLE(radv_device, device, _device);
4682 RADV_FROM_HANDLE(radv_device_memory, memory, pGetFdInfo->memory);
4684 assert(pGetFdInfo->sType == VK_STRUCTURE_TYPE_MEMORY_GET_FD_INFO_KHR);
4686 /* At the moment, we support only the below handle types. */
4687 assert(pGetFdInfo->handleType ==
4688 VK_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
4689 pGetFdInfo->handleType ==
4690 VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT);
4692 bool ret = radv_get_memory_fd(device, memory, pFD);
4694 return vk_error(device->instance, VK_ERROR_OUT_OF_DEVICE_MEMORY);
4698 VkResult radv_GetMemoryFdPropertiesKHR(VkDevice _device,
4699 VkExternalMemoryHandleTypeFlagBitsKHR handleType,
4701 VkMemoryFdPropertiesKHR *pMemoryFdProperties)
4703 RADV_FROM_HANDLE(radv_device, device, _device);
4705 switch (handleType) {
4706 case VK_EXTERNAL_MEMORY_HANDLE_TYPE_DMA_BUF_BIT_EXT:
4707 pMemoryFdProperties->memoryTypeBits = (1 << RADV_MEM_TYPE_COUNT) - 1;
4711 /* The valid usage section for this function says:
4713 * "handleType must not be one of the handle types defined as
4716 * So opaque handle types fall into the default "unsupported" case.
4718 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4722 static VkResult radv_import_opaque_fd(struct radv_device *device,
4726 uint32_t syncobj_handle = 0;
4727 int ret = device->ws->import_syncobj(device->ws, fd, &syncobj_handle);
4729 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4732 device->ws->destroy_syncobj(device->ws, *syncobj);
4734 *syncobj = syncobj_handle;
4740 static VkResult radv_import_sync_fd(struct radv_device *device,
4744 /* If we create a syncobj we do it locally so that if we have an error, we don't
4745 * leave a syncobj in an undetermined state in the fence. */
4746 uint32_t syncobj_handle = *syncobj;
4747 if (!syncobj_handle) {
4748 int ret = device->ws->create_syncobj(device->ws, &syncobj_handle);
4750 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4755 device->ws->signal_syncobj(device->ws, syncobj_handle);
4757 int ret = device->ws->import_syncobj_from_sync_file(device->ws, syncobj_handle, fd);
4759 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4762 *syncobj = syncobj_handle;
4769 VkResult radv_ImportSemaphoreFdKHR(VkDevice _device,
4770 const VkImportSemaphoreFdInfoKHR *pImportSemaphoreFdInfo)
4772 RADV_FROM_HANDLE(radv_device, device, _device);
4773 RADV_FROM_HANDLE(radv_semaphore, sem, pImportSemaphoreFdInfo->semaphore);
4774 uint32_t *syncobj_dst = NULL;
4776 if (pImportSemaphoreFdInfo->flags & VK_SEMAPHORE_IMPORT_TEMPORARY_BIT_KHR) {
4777 syncobj_dst = &sem->temp_syncobj;
4779 syncobj_dst = &sem->syncobj;
4782 switch(pImportSemaphoreFdInfo->handleType) {
4783 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4784 return radv_import_opaque_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
4785 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4786 return radv_import_sync_fd(device, pImportSemaphoreFdInfo->fd, syncobj_dst);
4788 unreachable("Unhandled semaphore handle type");
4792 VkResult radv_GetSemaphoreFdKHR(VkDevice _device,
4793 const VkSemaphoreGetFdInfoKHR *pGetFdInfo,
4796 RADV_FROM_HANDLE(radv_device, device, _device);
4797 RADV_FROM_HANDLE(radv_semaphore, sem, pGetFdInfo->semaphore);
4799 uint32_t syncobj_handle;
4801 if (sem->temp_syncobj)
4802 syncobj_handle = sem->temp_syncobj;
4804 syncobj_handle = sem->syncobj;
4806 switch(pGetFdInfo->handleType) {
4807 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4808 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
4810 case VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4811 ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
4813 if (sem->temp_syncobj) {
4814 close (sem->temp_syncobj);
4815 sem->temp_syncobj = 0;
4817 device->ws->reset_syncobj(device->ws, syncobj_handle);
4822 unreachable("Unhandled semaphore handle type");
4826 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4830 void radv_GetPhysicalDeviceExternalSemaphoreProperties(
4831 VkPhysicalDevice physicalDevice,
4832 const VkPhysicalDeviceExternalSemaphoreInfoKHR* pExternalSemaphoreInfo,
4833 VkExternalSemaphorePropertiesKHR* pExternalSemaphoreProperties)
4835 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
4837 /* Require has_syncobj_wait_for_submit for the syncobj signal ioctl introduced at virtually the same time */
4838 if (pdevice->rad_info.has_syncobj_wait_for_submit &&
4839 (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
4840 pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR)) {
4841 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4842 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4843 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
4844 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4845 } else if (pExternalSemaphoreInfo->handleType == VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR) {
4846 pExternalSemaphoreProperties->exportFromImportedHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
4847 pExternalSemaphoreProperties->compatibleHandleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR;
4848 pExternalSemaphoreProperties->externalSemaphoreFeatures = VK_EXTERNAL_SEMAPHORE_FEATURE_EXPORTABLE_BIT_KHR |
4849 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4851 pExternalSemaphoreProperties->exportFromImportedHandleTypes = 0;
4852 pExternalSemaphoreProperties->compatibleHandleTypes = 0;
4853 pExternalSemaphoreProperties->externalSemaphoreFeatures = 0;
4857 VkResult radv_ImportFenceFdKHR(VkDevice _device,
4858 const VkImportFenceFdInfoKHR *pImportFenceFdInfo)
4860 RADV_FROM_HANDLE(radv_device, device, _device);
4861 RADV_FROM_HANDLE(radv_fence, fence, pImportFenceFdInfo->fence);
4862 uint32_t *syncobj_dst = NULL;
4865 if (pImportFenceFdInfo->flags & VK_FENCE_IMPORT_TEMPORARY_BIT_KHR) {
4866 syncobj_dst = &fence->temp_syncobj;
4868 syncobj_dst = &fence->syncobj;
4871 switch(pImportFenceFdInfo->handleType) {
4872 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4873 return radv_import_opaque_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
4874 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4875 return radv_import_sync_fd(device, pImportFenceFdInfo->fd, syncobj_dst);
4877 unreachable("Unhandled fence handle type");
4881 VkResult radv_GetFenceFdKHR(VkDevice _device,
4882 const VkFenceGetFdInfoKHR *pGetFdInfo,
4885 RADV_FROM_HANDLE(radv_device, device, _device);
4886 RADV_FROM_HANDLE(radv_fence, fence, pGetFdInfo->fence);
4888 uint32_t syncobj_handle;
4890 if (fence->temp_syncobj)
4891 syncobj_handle = fence->temp_syncobj;
4893 syncobj_handle = fence->syncobj;
4895 switch(pGetFdInfo->handleType) {
4896 case VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR:
4897 ret = device->ws->export_syncobj(device->ws, syncobj_handle, pFd);
4899 case VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR:
4900 ret = device->ws->export_syncobj_to_sync_file(device->ws, syncobj_handle, pFd);
4902 if (fence->temp_syncobj) {
4903 close (fence->temp_syncobj);
4904 fence->temp_syncobj = 0;
4906 device->ws->reset_syncobj(device->ws, syncobj_handle);
4911 unreachable("Unhandled fence handle type");
4915 return vk_error(device->instance, VK_ERROR_INVALID_EXTERNAL_HANDLE_KHR);
4919 void radv_GetPhysicalDeviceExternalFenceProperties(
4920 VkPhysicalDevice physicalDevice,
4921 const VkPhysicalDeviceExternalFenceInfoKHR* pExternalFenceInfo,
4922 VkExternalFencePropertiesKHR* pExternalFenceProperties)
4924 RADV_FROM_HANDLE(radv_physical_device, pdevice, physicalDevice);
4926 if (pdevice->rad_info.has_syncobj_wait_for_submit &&
4927 (pExternalFenceInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR ||
4928 pExternalFenceInfo->handleType == VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR)) {
4929 pExternalFenceProperties->exportFromImportedHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4930 pExternalFenceProperties->compatibleHandleTypes = VK_EXTERNAL_FENCE_HANDLE_TYPE_OPAQUE_FD_BIT_KHR | VK_EXTERNAL_FENCE_HANDLE_TYPE_SYNC_FD_BIT_KHR;
4931 pExternalFenceProperties->externalFenceFeatures = VK_EXTERNAL_FENCE_FEATURE_EXPORTABLE_BIT_KHR |
4932 VK_EXTERNAL_SEMAPHORE_FEATURE_IMPORTABLE_BIT_KHR;
4934 pExternalFenceProperties->exportFromImportedHandleTypes = 0;
4935 pExternalFenceProperties->compatibleHandleTypes = 0;
4936 pExternalFenceProperties->externalFenceFeatures = 0;
4941 radv_CreateDebugReportCallbackEXT(VkInstance _instance,
4942 const VkDebugReportCallbackCreateInfoEXT* pCreateInfo,
4943 const VkAllocationCallbacks* pAllocator,
4944 VkDebugReportCallbackEXT* pCallback)
4946 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4947 return vk_create_debug_report_callback(&instance->debug_report_callbacks,
4948 pCreateInfo, pAllocator, &instance->alloc,
4953 radv_DestroyDebugReportCallbackEXT(VkInstance _instance,
4954 VkDebugReportCallbackEXT _callback,
4955 const VkAllocationCallbacks* pAllocator)
4957 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4958 vk_destroy_debug_report_callback(&instance->debug_report_callbacks,
4959 _callback, pAllocator, &instance->alloc);
4963 radv_DebugReportMessageEXT(VkInstance _instance,
4964 VkDebugReportFlagsEXT flags,
4965 VkDebugReportObjectTypeEXT objectType,
4968 int32_t messageCode,
4969 const char* pLayerPrefix,
4970 const char* pMessage)
4972 RADV_FROM_HANDLE(radv_instance, instance, _instance);
4973 vk_debug_report(&instance->debug_report_callbacks, flags, objectType,
4974 object, location, messageCode, pLayerPrefix, pMessage);
4978 radv_GetDeviceGroupPeerMemoryFeatures(
4981 uint32_t localDeviceIndex,
4982 uint32_t remoteDeviceIndex,
4983 VkPeerMemoryFeatureFlags* pPeerMemoryFeatures)
4985 assert(localDeviceIndex == remoteDeviceIndex);
4987 *pPeerMemoryFeatures = VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT |
4988 VK_PEER_MEMORY_FEATURE_COPY_DST_BIT |
4989 VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT |
4990 VK_PEER_MEMORY_FEATURE_GENERIC_DST_BIT;
4993 static const VkTimeDomainEXT radv_time_domains[] = {
4994 VK_TIME_DOMAIN_DEVICE_EXT,
4995 VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT,
4996 VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT,
4999 VkResult radv_GetPhysicalDeviceCalibrateableTimeDomainsEXT(
5000 VkPhysicalDevice physicalDevice,
5001 uint32_t *pTimeDomainCount,
5002 VkTimeDomainEXT *pTimeDomains)
5005 VK_OUTARRAY_MAKE(out, pTimeDomains, pTimeDomainCount);
5007 for (d = 0; d < ARRAY_SIZE(radv_time_domains); d++) {
5008 vk_outarray_append(&out, i) {
5009 *i = radv_time_domains[d];
5013 return vk_outarray_status(&out);
5017 radv_clock_gettime(clockid_t clock_id)
5019 struct timespec current;
5022 ret = clock_gettime(clock_id, ¤t);
5023 if (ret < 0 && clock_id == CLOCK_MONOTONIC_RAW)
5024 ret = clock_gettime(CLOCK_MONOTONIC, ¤t);
5028 return (uint64_t) current.tv_sec * 1000000000ULL + current.tv_nsec;
5031 VkResult radv_GetCalibratedTimestampsEXT(
5033 uint32_t timestampCount,
5034 const VkCalibratedTimestampInfoEXT *pTimestampInfos,
5035 uint64_t *pTimestamps,
5036 uint64_t *pMaxDeviation)
5038 RADV_FROM_HANDLE(radv_device, device, _device);
5039 uint32_t clock_crystal_freq = device->physical_device->rad_info.clock_crystal_freq;
5041 uint64_t begin, end;
5042 uint64_t max_clock_period = 0;
5044 begin = radv_clock_gettime(CLOCK_MONOTONIC_RAW);
5046 for (d = 0; d < timestampCount; d++) {
5047 switch (pTimestampInfos[d].timeDomain) {
5048 case VK_TIME_DOMAIN_DEVICE_EXT:
5049 pTimestamps[d] = device->ws->query_value(device->ws,
5051 uint64_t device_period = DIV_ROUND_UP(1000000, clock_crystal_freq);
5052 max_clock_period = MAX2(max_clock_period, device_period);
5054 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_EXT:
5055 pTimestamps[d] = radv_clock_gettime(CLOCK_MONOTONIC);
5056 max_clock_period = MAX2(max_clock_period, 1);
5059 case VK_TIME_DOMAIN_CLOCK_MONOTONIC_RAW_EXT:
5060 pTimestamps[d] = begin;
5068 end = radv_clock_gettime(CLOCK_MONOTONIC_RAW);
5071 * The maximum deviation is the sum of the interval over which we
5072 * perform the sampling and the maximum period of any sampled
5073 * clock. That's because the maximum skew between any two sampled
5074 * clock edges is when the sampled clock with the largest period is
5075 * sampled at the end of that period but right at the beginning of the
5076 * sampling interval and some other clock is sampled right at the
5077 * begining of its sampling period and right at the end of the
5078 * sampling interval. Let's assume the GPU has the longest clock
5079 * period and that the application is sampling GPU and monotonic:
5082 * w x y z 0 1 2 3 4 5 6 7 8 9 a b c d e f
5083 * Raw -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
5087 * GPU -----_____-----_____-----_____-----_____
5090 * x y z 0 1 2 3 4 5 6 7 8 9 a b c
5091 * Monotonic -_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-
5093 * Interval <----------------->
5094 * Deviation <-------------------------->
5098 * m = read(monotonic) 2
5101 * We round the sample interval up by one tick to cover sampling error
5102 * in the interval clock
5105 uint64_t sample_interval = end - begin + 1;
5107 *pMaxDeviation = sample_interval + max_clock_period;