2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "anv_private.h"
31 #include "anv_timestamp.h"
32 #include "util/strtod.h"
33 #include "util/debug.h"
35 #include "genxml/gen7_pack.h"
37 struct anv_dispatch_table dtable;
40 compiler_debug_log(void *data, const char *fmt, ...)
44 compiler_perf_log(void *data, const char *fmt, ...)
49 if (unlikely(INTEL_DEBUG & DEBUG_PERF))
50 vfprintf(stderr, fmt, args);
56 anv_physical_device_init(struct anv_physical_device *device,
57 struct anv_instance *instance,
63 fd = open(path, O_RDWR | O_CLOEXEC);
65 return vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
66 "failed to open %s: %m", path);
68 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
69 device->instance = instance;
71 assert(strlen(path) < ARRAY_SIZE(device->path));
72 strncpy(device->path, path, ARRAY_SIZE(device->path));
74 device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
75 if (!device->chipset_id) {
76 result = VK_ERROR_INITIALIZATION_FAILED;
80 device->name = brw_get_device_name(device->chipset_id);
81 device->info = brw_get_device_info(device->chipset_id);
83 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
84 "failed to get device info");
88 if (device->info->is_haswell) {
89 fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
90 } else if (device->info->gen == 7 && !device->info->is_baytrail) {
91 fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
92 } else if (device->info->gen == 7 && device->info->is_baytrail) {
93 fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
94 } else if (device->info->gen >= 8) {
95 /* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
96 * supported as anything */
98 result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
99 "Vulkan not yet supported on %s", device->name);
103 device->cmd_parser_version = -1;
104 if (device->info->gen == 7) {
105 device->cmd_parser_version =
106 anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
107 if (device->cmd_parser_version == -1) {
108 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
109 "failed to get command parser version");
114 if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
115 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
116 "failed to get aperture size: %m");
120 if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
121 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
122 "kernel missing gem wait");
126 if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
127 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
128 "kernel missing execbuf2");
132 if (!device->info->has_llc &&
133 anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
134 result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
135 "kernel missing wc mmap");
139 bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
143 brw_process_intel_debug_variable();
145 device->compiler = brw_compiler_create(NULL, device->info);
146 if (device->compiler == NULL) {
147 result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
150 device->compiler->shader_debug_log = compiler_debug_log;
151 device->compiler->shader_perf_log = compiler_perf_log;
153 result = anv_init_wsi(device);
154 if (result != VK_SUCCESS)
157 /* XXX: Actually detect bit6 swizzling */
158 isl_device_init(&device->isl_dev, device->info, swizzled);
168 anv_physical_device_finish(struct anv_physical_device *device)
170 anv_finish_wsi(device);
171 ralloc_free(device->compiler);
174 static const VkExtensionProperties global_extensions[] = {
176 .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
179 #ifdef VK_USE_PLATFORM_XCB_KHR
181 .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
185 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
187 .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
193 static const VkExtensionProperties device_extensions[] = {
195 .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
201 default_alloc_func(void *pUserData, size_t size, size_t align,
202 VkSystemAllocationScope allocationScope)
208 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
209 size_t align, VkSystemAllocationScope allocationScope)
211 return realloc(pOriginal, size);
215 default_free_func(void *pUserData, void *pMemory)
220 static const VkAllocationCallbacks default_alloc = {
222 .pfnAllocation = default_alloc_func,
223 .pfnReallocation = default_realloc_func,
224 .pfnFree = default_free_func,
227 VkResult anv_CreateInstance(
228 const VkInstanceCreateInfo* pCreateInfo,
229 const VkAllocationCallbacks* pAllocator,
230 VkInstance* pInstance)
232 struct anv_instance *instance;
234 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
236 uint32_t client_version;
237 if (pCreateInfo->pApplicationInfo &&
238 pCreateInfo->pApplicationInfo->apiVersion != 0) {
239 client_version = pCreateInfo->pApplicationInfo->apiVersion;
241 client_version = VK_MAKE_VERSION(1, 0, 0);
244 if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
245 client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
246 return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
247 "Client requested version %d.%d.%d",
248 VK_VERSION_MAJOR(client_version),
249 VK_VERSION_MINOR(client_version),
250 VK_VERSION_PATCH(client_version));
253 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
255 for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
256 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
257 global_extensions[j].extensionName) == 0) {
263 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
266 instance = anv_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
267 VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
269 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
271 instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
274 instance->alloc = *pAllocator;
276 instance->alloc = default_alloc;
278 instance->apiVersion = client_version;
279 instance->physicalDeviceCount = -1;
283 VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
285 *pInstance = anv_instance_to_handle(instance);
290 void anv_DestroyInstance(
291 VkInstance _instance,
292 const VkAllocationCallbacks* pAllocator)
294 ANV_FROM_HANDLE(anv_instance, instance, _instance);
296 if (instance->physicalDeviceCount > 0) {
297 /* We support at most one physical device. */
298 assert(instance->physicalDeviceCount == 1);
299 anv_physical_device_finish(&instance->physicalDevice);
302 VG(VALGRIND_DESTROY_MEMPOOL(instance));
306 anv_free(&instance->alloc, instance);
309 VkResult anv_EnumeratePhysicalDevices(
310 VkInstance _instance,
311 uint32_t* pPhysicalDeviceCount,
312 VkPhysicalDevice* pPhysicalDevices)
314 ANV_FROM_HANDLE(anv_instance, instance, _instance);
317 if (instance->physicalDeviceCount < 0) {
319 for (unsigned i = 0; i < 8; i++) {
320 snprintf(path, sizeof(path), "/dev/dri/renderD%d", 128 + i);
321 result = anv_physical_device_init(&instance->physicalDevice,
323 if (result == VK_SUCCESS)
327 if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
328 instance->physicalDeviceCount = 0;
329 } else if (result == VK_SUCCESS) {
330 instance->physicalDeviceCount = 1;
336 /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
337 * otherwise it's an inout parameter.
339 * The Vulkan spec (git aaed022) says:
341 * pPhysicalDeviceCount is a pointer to an unsigned integer variable
342 * that is initialized with the number of devices the application is
343 * prepared to receive handles to. pname:pPhysicalDevices is pointer to
344 * an array of at least this many VkPhysicalDevice handles [...].
346 * Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
347 * overwrites the contents of the variable pointed to by
348 * pPhysicalDeviceCount with the number of physical devices in in the
349 * instance; otherwise, vkEnumeratePhysicalDevices overwrites
350 * pPhysicalDeviceCount with the number of physical handles written to
353 if (!pPhysicalDevices) {
354 *pPhysicalDeviceCount = instance->physicalDeviceCount;
355 } else if (*pPhysicalDeviceCount >= 1) {
356 pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
357 *pPhysicalDeviceCount = 1;
359 *pPhysicalDeviceCount = 0;
365 void anv_GetPhysicalDeviceFeatures(
366 VkPhysicalDevice physicalDevice,
367 VkPhysicalDeviceFeatures* pFeatures)
369 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
371 *pFeatures = (VkPhysicalDeviceFeatures) {
372 .robustBufferAccess = true,
373 .fullDrawIndexUint32 = true,
374 .imageCubeArray = false,
375 .independentBlend = true,
376 .geometryShader = true,
377 .tessellationShader = false,
378 .sampleRateShading = false,
379 .dualSrcBlend = true,
381 .multiDrawIndirect = false,
382 .drawIndirectFirstInstance = false,
384 .depthBiasClamp = false,
385 .fillModeNonSolid = true,
386 .depthBounds = false,
390 .multiViewport = true,
391 .samplerAnisotropy = false, /* FINISHME */
392 .textureCompressionETC2 = pdevice->info->gen >= 8 ||
393 pdevice->info->is_baytrail,
394 .textureCompressionASTC_LDR = pdevice->info->gen >= 9, /* FINISHME CHV */
395 .textureCompressionBC = true,
396 .occlusionQueryPrecise = true,
397 .pipelineStatisticsQuery = false,
398 .fragmentStoresAndAtomics = true,
399 .shaderTessellationAndGeometryPointSize = true,
400 .shaderImageGatherExtended = false,
401 .shaderStorageImageExtendedFormats = false,
402 .shaderStorageImageMultisample = false,
403 .shaderUniformBufferArrayDynamicIndexing = true,
404 .shaderSampledImageArrayDynamicIndexing = true,
405 .shaderStorageBufferArrayDynamicIndexing = true,
406 .shaderStorageImageArrayDynamicIndexing = true,
407 .shaderStorageImageReadWithoutFormat = false,
408 .shaderStorageImageWriteWithoutFormat = true,
409 .shaderClipDistance = false,
410 .shaderCullDistance = false,
411 .shaderFloat64 = false,
412 .shaderInt64 = false,
413 .shaderInt16 = false,
415 .variableMultisampleRate = false,
416 .inheritedQueries = false,
419 /* We can't do image stores in vec4 shaders */
420 pFeatures->vertexPipelineStoresAndAtomics =
421 pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
422 pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
426 anv_device_get_cache_uuid(void *uuid)
428 memset(uuid, 0, VK_UUID_SIZE);
429 snprintf(uuid, VK_UUID_SIZE, "anv-%s", ANV_TIMESTAMP);
432 void anv_GetPhysicalDeviceProperties(
433 VkPhysicalDevice physicalDevice,
434 VkPhysicalDeviceProperties* pProperties)
436 ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
437 const struct brw_device_info *devinfo = pdevice->info;
439 const float time_stamp_base = devinfo->gen >= 9 ? 83.333 : 80.0;
441 /* See assertions made when programming the buffer surface state. */
442 const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
443 (1ul << 30) : (1ul << 27);
445 VkSampleCountFlags sample_counts =
446 isl_device_get_sample_counts(&pdevice->isl_dev);
448 VkPhysicalDeviceLimits limits = {
449 .maxImageDimension1D = (1 << 14),
450 .maxImageDimension2D = (1 << 14),
451 .maxImageDimension3D = (1 << 11),
452 .maxImageDimensionCube = (1 << 14),
453 .maxImageArrayLayers = (1 << 11),
454 .maxTexelBufferElements = 128 * 1024 * 1024,
455 .maxUniformBufferRange = (1ul << 27),
456 .maxStorageBufferRange = max_raw_buffer_sz,
457 .maxPushConstantsSize = MAX_PUSH_CONSTANTS_SIZE,
458 .maxMemoryAllocationCount = UINT32_MAX,
459 .maxSamplerAllocationCount = 64 * 1024,
460 .bufferImageGranularity = 64, /* A cache line */
461 .sparseAddressSpaceSize = 0,
462 .maxBoundDescriptorSets = MAX_SETS,
463 .maxPerStageDescriptorSamplers = 64,
464 .maxPerStageDescriptorUniformBuffers = 64,
465 .maxPerStageDescriptorStorageBuffers = 64,
466 .maxPerStageDescriptorSampledImages = 64,
467 .maxPerStageDescriptorStorageImages = 64,
468 .maxPerStageDescriptorInputAttachments = 64,
469 .maxPerStageResources = 128,
470 .maxDescriptorSetSamplers = 256,
471 .maxDescriptorSetUniformBuffers = 256,
472 .maxDescriptorSetUniformBuffersDynamic = 256,
473 .maxDescriptorSetStorageBuffers = 256,
474 .maxDescriptorSetStorageBuffersDynamic = 256,
475 .maxDescriptorSetSampledImages = 256,
476 .maxDescriptorSetStorageImages = 256,
477 .maxDescriptorSetInputAttachments = 256,
478 .maxVertexInputAttributes = 32,
479 .maxVertexInputBindings = 32,
480 .maxVertexInputAttributeOffset = 2047,
481 .maxVertexInputBindingStride = 2048,
482 .maxVertexOutputComponents = 128,
483 .maxTessellationGenerationLevel = 0,
484 .maxTessellationPatchSize = 0,
485 .maxTessellationControlPerVertexInputComponents = 0,
486 .maxTessellationControlPerVertexOutputComponents = 0,
487 .maxTessellationControlPerPatchOutputComponents = 0,
488 .maxTessellationControlTotalOutputComponents = 0,
489 .maxTessellationEvaluationInputComponents = 0,
490 .maxTessellationEvaluationOutputComponents = 0,
491 .maxGeometryShaderInvocations = 32,
492 .maxGeometryInputComponents = 64,
493 .maxGeometryOutputComponents = 128,
494 .maxGeometryOutputVertices = 256,
495 .maxGeometryTotalOutputComponents = 1024,
496 .maxFragmentInputComponents = 128,
497 .maxFragmentOutputAttachments = 8,
498 .maxFragmentDualSrcAttachments = 2,
499 .maxFragmentCombinedOutputResources = 8,
500 .maxComputeSharedMemorySize = 32768,
501 .maxComputeWorkGroupCount = { 65535, 65535, 65535 },
502 .maxComputeWorkGroupInvocations = 16 * devinfo->max_cs_threads,
503 .maxComputeWorkGroupSize = {
504 16 * devinfo->max_cs_threads,
505 16 * devinfo->max_cs_threads,
506 16 * devinfo->max_cs_threads,
508 .subPixelPrecisionBits = 4 /* FIXME */,
509 .subTexelPrecisionBits = 4 /* FIXME */,
510 .mipmapPrecisionBits = 4 /* FIXME */,
511 .maxDrawIndexedIndexValue = UINT32_MAX,
512 .maxDrawIndirectCount = UINT32_MAX,
513 .maxSamplerLodBias = 16,
514 .maxSamplerAnisotropy = 16,
515 .maxViewports = MAX_VIEWPORTS,
516 .maxViewportDimensions = { (1 << 14), (1 << 14) },
517 .viewportBoundsRange = { INT16_MIN, INT16_MAX },
518 .viewportSubPixelBits = 13, /* We take a float? */
519 .minMemoryMapAlignment = 4096, /* A page */
520 .minTexelBufferOffsetAlignment = 1,
521 .minUniformBufferOffsetAlignment = 1,
522 .minStorageBufferOffsetAlignment = 1,
523 .minTexelOffset = -8,
525 .minTexelGatherOffset = -8,
526 .maxTexelGatherOffset = 7,
527 .minInterpolationOffset = 0, /* FIXME */
528 .maxInterpolationOffset = 0, /* FIXME */
529 .subPixelInterpolationOffsetBits = 0, /* FIXME */
530 .maxFramebufferWidth = (1 << 14),
531 .maxFramebufferHeight = (1 << 14),
532 .maxFramebufferLayers = (1 << 10),
533 .framebufferColorSampleCounts = sample_counts,
534 .framebufferDepthSampleCounts = sample_counts,
535 .framebufferStencilSampleCounts = sample_counts,
536 .framebufferNoAttachmentsSampleCounts = sample_counts,
537 .maxColorAttachments = MAX_RTS,
538 .sampledImageColorSampleCounts = sample_counts,
539 .sampledImageIntegerSampleCounts = VK_SAMPLE_COUNT_1_BIT,
540 .sampledImageDepthSampleCounts = sample_counts,
541 .sampledImageStencilSampleCounts = sample_counts,
542 .storageImageSampleCounts = VK_SAMPLE_COUNT_1_BIT,
543 .maxSampleMaskWords = 1,
544 .timestampComputeAndGraphics = false,
545 .timestampPeriod = time_stamp_base / (1000 * 1000 * 1000),
546 .maxClipDistances = 0 /* FIXME */,
547 .maxCullDistances = 0 /* FIXME */,
548 .maxCombinedClipAndCullDistances = 0 /* FIXME */,
549 .discreteQueuePriorities = 1,
550 .pointSizeRange = { 0.125, 255.875 },
551 .lineWidthRange = { 0.0, 7.9921875 },
552 .pointSizeGranularity = (1.0 / 8.0),
553 .lineWidthGranularity = (1.0 / 128.0),
554 .strictLines = false, /* FINISHME */
555 .standardSampleLocations = true,
556 .optimalBufferCopyOffsetAlignment = 128,
557 .optimalBufferCopyRowPitchAlignment = 128,
558 .nonCoherentAtomSize = 64,
561 *pProperties = (VkPhysicalDeviceProperties) {
562 .apiVersion = VK_MAKE_VERSION(1, 0, 5),
565 .deviceID = pdevice->chipset_id,
566 .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
568 .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
571 strcpy(pProperties->deviceName, pdevice->name);
572 anv_device_get_cache_uuid(pProperties->pipelineCacheUUID);
575 void anv_GetPhysicalDeviceQueueFamilyProperties(
576 VkPhysicalDevice physicalDevice,
578 VkQueueFamilyProperties* pQueueFamilyProperties)
580 if (pQueueFamilyProperties == NULL) {
585 assert(*pCount >= 1);
587 *pQueueFamilyProperties = (VkQueueFamilyProperties) {
588 .queueFlags = VK_QUEUE_GRAPHICS_BIT |
589 VK_QUEUE_COMPUTE_BIT |
590 VK_QUEUE_TRANSFER_BIT,
592 .timestampValidBits = 36, /* XXX: Real value here */
593 .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
597 void anv_GetPhysicalDeviceMemoryProperties(
598 VkPhysicalDevice physicalDevice,
599 VkPhysicalDeviceMemoryProperties* pMemoryProperties)
601 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
602 VkDeviceSize heap_size;
604 /* Reserve some wiggle room for the driver by exposing only 75% of the
605 * aperture to the heap.
607 heap_size = 3 * physical_device->aperture_size / 4;
609 if (physical_device->info->has_llc) {
610 /* Big core GPUs share LLC with the CPU and thus one memory type can be
611 * both cached and coherent at the same time.
613 pMemoryProperties->memoryTypeCount = 1;
614 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
615 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
616 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
617 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
618 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
622 /* The spec requires that we expose a host-visible, coherent memory
623 * type, but Atom GPUs don't share LLC. Thus we offer two memory types
624 * to give the application a choice between cached, but not coherent and
625 * coherent but uncached (WC though).
627 pMemoryProperties->memoryTypeCount = 2;
628 pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
629 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
630 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
631 VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
634 pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
635 .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
636 VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
637 VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
642 pMemoryProperties->memoryHeapCount = 1;
643 pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
645 .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
649 PFN_vkVoidFunction anv_GetInstanceProcAddr(
653 return anv_lookup_entrypoint(pName);
656 /* With version 1+ of the loader interface the ICD should expose
657 * vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
660 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
665 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
669 return anv_GetInstanceProcAddr(instance, pName);
672 PFN_vkVoidFunction anv_GetDeviceProcAddr(
676 return anv_lookup_entrypoint(pName);
680 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
682 queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
683 queue->device = device;
684 queue->pool = &device->surface_state_pool;
690 anv_queue_finish(struct anv_queue *queue)
694 static struct anv_state
695 anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
697 struct anv_state state;
699 state = anv_state_pool_alloc(pool, size, align);
700 memcpy(state.map, p, size);
702 if (!pool->block_pool->device->info.has_llc)
703 anv_state_clflush(state);
708 struct gen8_border_color {
713 /* Pad out to 64 bytes */
718 anv_device_init_border_colors(struct anv_device *device)
720 static const struct gen8_border_color border_colors[] = {
721 [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
722 [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] = { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
723 [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] = { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
724 [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] = { .uint32 = { 0, 0, 0, 0 } },
725 [VK_BORDER_COLOR_INT_OPAQUE_BLACK] = { .uint32 = { 0, 0, 0, 1 } },
726 [VK_BORDER_COLOR_INT_OPAQUE_WHITE] = { .uint32 = { 1, 1, 1, 1 } },
729 device->border_colors = anv_state_pool_emit_data(&device->dynamic_state_pool,
730 sizeof(border_colors), 64,
735 anv_device_submit_simple_batch(struct anv_device *device,
736 struct anv_batch *batch)
738 struct drm_i915_gem_execbuffer2 execbuf;
739 struct drm_i915_gem_exec_object2 exec2_objects[1];
741 VkResult result = VK_SUCCESS;
746 /* Kernel driver requires 8 byte aligned batch length */
747 size = align_u32(batch->next - batch->start, 8);
748 result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
749 if (result != VK_SUCCESS)
752 memcpy(bo.map, batch->start, size);
753 if (!device->info.has_llc)
754 anv_clflush_range(bo.map, size);
756 exec2_objects[0].handle = bo.gem_handle;
757 exec2_objects[0].relocation_count = 0;
758 exec2_objects[0].relocs_ptr = 0;
759 exec2_objects[0].alignment = 0;
760 exec2_objects[0].offset = bo.offset;
761 exec2_objects[0].flags = 0;
762 exec2_objects[0].rsvd1 = 0;
763 exec2_objects[0].rsvd2 = 0;
765 execbuf.buffers_ptr = (uintptr_t) exec2_objects;
766 execbuf.buffer_count = 1;
767 execbuf.batch_start_offset = 0;
768 execbuf.batch_len = size;
769 execbuf.cliprects_ptr = 0;
770 execbuf.num_cliprects = 0;
775 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
776 execbuf.rsvd1 = device->context_id;
779 ret = anv_gem_execbuffer(device, &execbuf);
781 /* We don't know the real error. */
782 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
787 ret = anv_gem_wait(device, bo.gem_handle, &timeout);
789 /* We don't know the real error. */
790 result = vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY, "execbuf2 failed: %m");
795 anv_bo_pool_free(&device->batch_bo_pool, &bo);
800 VkResult anv_CreateDevice(
801 VkPhysicalDevice physicalDevice,
802 const VkDeviceCreateInfo* pCreateInfo,
803 const VkAllocationCallbacks* pAllocator,
806 ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
808 struct anv_device *device;
810 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
812 for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
814 for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
815 if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
816 device_extensions[j].extensionName) == 0) {
822 return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
825 anv_set_dispatch_devinfo(physical_device->info);
827 device = anv_alloc2(&physical_device->instance->alloc, pAllocator,
829 VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
831 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
833 device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
834 device->instance = physical_device->instance;
835 device->chipset_id = physical_device->chipset_id;
838 device->alloc = *pAllocator;
840 device->alloc = physical_device->instance->alloc;
842 /* XXX(chadv): Can we dup() physicalDevice->fd here? */
843 device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
844 if (device->fd == -1) {
845 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
849 device->context_id = anv_gem_create_context(device);
850 if (device->context_id == -1) {
851 result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
855 device->info = *physical_device->info;
856 device->isl_dev = physical_device->isl_dev;
858 /* On Broadwell and later, we can use batch chaining to more efficiently
859 * implement growing command buffers. Prior to Haswell, the kernel
860 * command parser gets in the way and we have to fall back to growing
863 device->can_chain_batches = device->info.gen >= 8;
865 device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
866 pCreateInfo->pEnabledFeatures->robustBufferAccess;
868 pthread_mutex_init(&device->mutex, NULL);
870 anv_bo_pool_init(&device->batch_bo_pool, device);
872 anv_block_pool_init(&device->dynamic_state_block_pool, device, 16384);
874 anv_state_pool_init(&device->dynamic_state_pool,
875 &device->dynamic_state_block_pool);
877 anv_block_pool_init(&device->instruction_block_pool, device, 128 * 1024);
878 anv_state_pool_init(&device->instruction_state_pool,
879 &device->instruction_block_pool);
880 anv_pipeline_cache_init(&device->default_pipeline_cache, device);
882 anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
884 anv_state_pool_init(&device->surface_state_pool,
885 &device->surface_state_block_pool);
887 anv_bo_init_new(&device->workaround_bo, device, 1024);
889 anv_scratch_pool_init(device, &device->scratch_pool);
891 anv_queue_init(device, &device->queue);
893 switch (device->info.gen) {
895 if (!device->info.is_haswell)
896 result = gen7_init_device_state(device);
898 result = gen75_init_device_state(device);
901 result = gen8_init_device_state(device);
904 result = gen9_init_device_state(device);
907 /* Shouldn't get here as we don't create physical devices for any other
909 unreachable("unhandled gen");
911 if (result != VK_SUCCESS)
914 result = anv_device_init_meta(device);
915 if (result != VK_SUCCESS)
918 anv_device_init_border_colors(device);
920 *pDevice = anv_device_to_handle(device);
927 anv_free(&device->alloc, device);
932 void anv_DestroyDevice(
934 const VkAllocationCallbacks* pAllocator)
936 ANV_FROM_HANDLE(anv_device, device, _device);
938 anv_queue_finish(&device->queue);
940 anv_device_finish_meta(device);
943 /* We only need to free these to prevent valgrind errors. The backing
944 * BO will go away in a couple of lines so we don't actually leak.
946 anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
949 anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
950 anv_gem_close(device, device->workaround_bo.gem_handle);
952 anv_bo_pool_finish(&device->batch_bo_pool);
953 anv_state_pool_finish(&device->dynamic_state_pool);
954 anv_block_pool_finish(&device->dynamic_state_block_pool);
955 anv_state_pool_finish(&device->instruction_state_pool);
956 anv_block_pool_finish(&device->instruction_block_pool);
957 anv_state_pool_finish(&device->surface_state_pool);
958 anv_block_pool_finish(&device->surface_state_block_pool);
959 anv_scratch_pool_finish(device, &device->scratch_pool);
963 pthread_mutex_destroy(&device->mutex);
965 anv_free(&device->alloc, device);
968 VkResult anv_EnumerateInstanceExtensionProperties(
969 const char* pLayerName,
970 uint32_t* pPropertyCount,
971 VkExtensionProperties* pProperties)
973 if (pProperties == NULL) {
974 *pPropertyCount = ARRAY_SIZE(global_extensions);
978 assert(*pPropertyCount >= ARRAY_SIZE(global_extensions));
980 *pPropertyCount = ARRAY_SIZE(global_extensions);
981 memcpy(pProperties, global_extensions, sizeof(global_extensions));
986 VkResult anv_EnumerateDeviceExtensionProperties(
987 VkPhysicalDevice physicalDevice,
988 const char* pLayerName,
989 uint32_t* pPropertyCount,
990 VkExtensionProperties* pProperties)
992 if (pProperties == NULL) {
993 *pPropertyCount = ARRAY_SIZE(device_extensions);
997 assert(*pPropertyCount >= ARRAY_SIZE(device_extensions));
999 *pPropertyCount = ARRAY_SIZE(device_extensions);
1000 memcpy(pProperties, device_extensions, sizeof(device_extensions));
1005 VkResult anv_EnumerateInstanceLayerProperties(
1006 uint32_t* pPropertyCount,
1007 VkLayerProperties* pProperties)
1009 if (pProperties == NULL) {
1010 *pPropertyCount = 0;
1014 /* None supported at this time */
1015 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1018 VkResult anv_EnumerateDeviceLayerProperties(
1019 VkPhysicalDevice physicalDevice,
1020 uint32_t* pPropertyCount,
1021 VkLayerProperties* pProperties)
1023 if (pProperties == NULL) {
1024 *pPropertyCount = 0;
1028 /* None supported at this time */
1029 return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1032 void anv_GetDeviceQueue(
1034 uint32_t queueNodeIndex,
1035 uint32_t queueIndex,
1038 ANV_FROM_HANDLE(anv_device, device, _device);
1040 assert(queueIndex == 0);
1042 *pQueue = anv_queue_to_handle(&device->queue);
1045 VkResult anv_QueueSubmit(
1047 uint32_t submitCount,
1048 const VkSubmitInfo* pSubmits,
1051 ANV_FROM_HANDLE(anv_queue, queue, _queue);
1052 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1053 struct anv_device *device = queue->device;
1056 for (uint32_t i = 0; i < submitCount; i++) {
1057 for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1058 ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
1059 pSubmits[i].pCommandBuffers[j]);
1060 assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1062 ret = anv_gem_execbuffer(device, &cmd_buffer->execbuf2.execbuf);
1064 /* We don't know the real error. */
1065 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1066 "execbuf2 failed: %m");
1069 for (uint32_t k = 0; k < cmd_buffer->execbuf2.bo_count; k++)
1070 cmd_buffer->execbuf2.bos[k]->offset = cmd_buffer->execbuf2.objects[k].offset;
1075 ret = anv_gem_execbuffer(device, &fence->execbuf);
1077 /* We don't know the real error. */
1078 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1079 "execbuf2 failed: %m");
1086 VkResult anv_QueueWaitIdle(
1089 ANV_FROM_HANDLE(anv_queue, queue, _queue);
1091 return ANV_CALL(DeviceWaitIdle)(anv_device_to_handle(queue->device));
1094 VkResult anv_DeviceWaitIdle(
1097 ANV_FROM_HANDLE(anv_device, device, _device);
1098 struct anv_batch batch;
1101 batch.start = batch.next = cmds;
1102 batch.end = (void *) cmds + sizeof(cmds);
1104 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1105 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
1107 return anv_device_submit_simple_batch(device, &batch);
1111 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
1113 bo->gem_handle = anv_gem_create(device, size);
1114 if (!bo->gem_handle)
1115 return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
1121 bo->is_winsys_bo = false;
1126 VkResult anv_AllocateMemory(
1128 const VkMemoryAllocateInfo* pAllocateInfo,
1129 const VkAllocationCallbacks* pAllocator,
1130 VkDeviceMemory* pMem)
1132 ANV_FROM_HANDLE(anv_device, device, _device);
1133 struct anv_device_memory *mem;
1136 assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1138 if (pAllocateInfo->allocationSize == 0) {
1139 /* Apparently, this is allowed */
1140 *pMem = VK_NULL_HANDLE;
1144 /* We support exactly one memory heap. */
1145 assert(pAllocateInfo->memoryTypeIndex == 0 ||
1146 (!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
1148 /* FINISHME: Fail if allocation request exceeds heap size. */
1150 mem = anv_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1151 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1153 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1155 /* The kernel is going to give us whole pages anyway */
1156 uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
1158 result = anv_bo_init_new(&mem->bo, device, alloc_size);
1159 if (result != VK_SUCCESS)
1162 mem->type_index = pAllocateInfo->memoryTypeIndex;
1164 *pMem = anv_device_memory_to_handle(mem);
1169 anv_free2(&device->alloc, pAllocator, mem);
1174 void anv_FreeMemory(
1176 VkDeviceMemory _mem,
1177 const VkAllocationCallbacks* pAllocator)
1179 ANV_FROM_HANDLE(anv_device, device, _device);
1180 ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1186 anv_gem_munmap(mem->bo.map, mem->bo.size);
1188 if (mem->bo.gem_handle != 0)
1189 anv_gem_close(device, mem->bo.gem_handle);
1191 anv_free2(&device->alloc, pAllocator, mem);
1194 VkResult anv_MapMemory(
1196 VkDeviceMemory _memory,
1197 VkDeviceSize offset,
1199 VkMemoryMapFlags flags,
1202 ANV_FROM_HANDLE(anv_device, device, _device);
1203 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1210 if (size == VK_WHOLE_SIZE)
1211 size = mem->bo.size - offset;
1213 /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
1214 * takes a VkDeviceMemory pointer, it seems like only one map of the memory
1215 * at a time is valid. We could just mmap up front and return an offset
1216 * pointer here, but that may exhaust virtual memory on 32 bit
1219 uint32_t gem_flags = 0;
1220 if (!device->info.has_llc && mem->type_index == 0)
1221 gem_flags |= I915_MMAP_WC;
1223 /* GEM will fail to map if the offset isn't 4k-aligned. Round down. */
1224 uint64_t map_offset = offset & ~4095ull;
1225 assert(offset >= map_offset);
1226 uint64_t map_size = (offset + size) - map_offset;
1228 /* Let's map whole pages */
1229 map_size = align_u64(map_size, 4096);
1231 mem->map = anv_gem_mmap(device, mem->bo.gem_handle,
1232 map_offset, map_size, gem_flags);
1233 mem->map_size = map_size;
1235 *ppData = mem->map + (offset - map_offset);
1240 void anv_UnmapMemory(
1242 VkDeviceMemory _memory)
1244 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1249 anv_gem_munmap(mem->map, mem->map_size);
1253 clflush_mapped_ranges(struct anv_device *device,
1255 const VkMappedMemoryRange *ranges)
1257 for (uint32_t i = 0; i < count; i++) {
1258 ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
1259 void *p = mem->map + (ranges[i].offset & ~CACHELINE_MASK);
1262 if (ranges[i].offset + ranges[i].size > mem->map_size)
1263 end = mem->map + mem->map_size;
1265 end = mem->map + ranges[i].offset + ranges[i].size;
1268 __builtin_ia32_clflush(p);
1269 p += CACHELINE_SIZE;
1274 VkResult anv_FlushMappedMemoryRanges(
1276 uint32_t memoryRangeCount,
1277 const VkMappedMemoryRange* pMemoryRanges)
1279 ANV_FROM_HANDLE(anv_device, device, _device);
1281 if (device->info.has_llc)
1284 /* Make sure the writes we're flushing have landed. */
1285 __builtin_ia32_mfence();
1287 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
1292 VkResult anv_InvalidateMappedMemoryRanges(
1294 uint32_t memoryRangeCount,
1295 const VkMappedMemoryRange* pMemoryRanges)
1297 ANV_FROM_HANDLE(anv_device, device, _device);
1299 if (device->info.has_llc)
1302 clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
1304 /* Make sure no reads get moved up above the invalidate. */
1305 __builtin_ia32_mfence();
1310 void anv_GetBufferMemoryRequirements(
1313 VkMemoryRequirements* pMemoryRequirements)
1315 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1317 /* The Vulkan spec (git aaed022) says:
1319 * memoryTypeBits is a bitfield and contains one bit set for every
1320 * supported memory type for the resource. The bit `1<<i` is set if and
1321 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1322 * structure for the physical device is supported.
1324 * We support exactly one memory type.
1326 pMemoryRequirements->memoryTypeBits = 1;
1328 pMemoryRequirements->size = buffer->size;
1329 pMemoryRequirements->alignment = 16;
1332 void anv_GetImageMemoryRequirements(
1335 VkMemoryRequirements* pMemoryRequirements)
1337 ANV_FROM_HANDLE(anv_image, image, _image);
1339 /* The Vulkan spec (git aaed022) says:
1341 * memoryTypeBits is a bitfield and contains one bit set for every
1342 * supported memory type for the resource. The bit `1<<i` is set if and
1343 * only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1344 * structure for the physical device is supported.
1346 * We support exactly one memory type.
1348 pMemoryRequirements->memoryTypeBits = 1;
1350 pMemoryRequirements->size = image->size;
1351 pMemoryRequirements->alignment = image->alignment;
1354 void anv_GetImageSparseMemoryRequirements(
1357 uint32_t* pSparseMemoryRequirementCount,
1358 VkSparseImageMemoryRequirements* pSparseMemoryRequirements)
1363 void anv_GetDeviceMemoryCommitment(
1365 VkDeviceMemory memory,
1366 VkDeviceSize* pCommittedMemoryInBytes)
1368 *pCommittedMemoryInBytes = 0;
1371 VkResult anv_BindBufferMemory(
1374 VkDeviceMemory _memory,
1375 VkDeviceSize memoryOffset)
1377 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1378 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1381 buffer->bo = &mem->bo;
1382 buffer->offset = memoryOffset;
1391 VkResult anv_BindImageMemory(
1394 VkDeviceMemory _memory,
1395 VkDeviceSize memoryOffset)
1397 ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1398 ANV_FROM_HANDLE(anv_image, image, _image);
1401 image->bo = &mem->bo;
1402 image->offset = memoryOffset;
1411 VkResult anv_QueueBindSparse(
1413 uint32_t bindInfoCount,
1414 const VkBindSparseInfo* pBindInfo,
1417 stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
1420 VkResult anv_CreateFence(
1422 const VkFenceCreateInfo* pCreateInfo,
1423 const VkAllocationCallbacks* pAllocator,
1426 ANV_FROM_HANDLE(anv_device, device, _device);
1427 struct anv_bo fence_bo;
1428 struct anv_fence *fence;
1429 struct anv_batch batch;
1432 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1434 result = anv_bo_pool_alloc(&device->batch_bo_pool, &fence_bo, 4096);
1435 if (result != VK_SUCCESS)
1438 /* Fences are small. Just store the CPU data structure in the BO. */
1439 fence = fence_bo.map;
1440 fence->bo = fence_bo;
1442 /* Place the batch after the CPU data but on its own cache line. */
1443 const uint32_t batch_offset = align_u32(sizeof(*fence), CACHELINE_SIZE);
1444 batch.next = batch.start = fence->bo.map + batch_offset;
1445 batch.end = fence->bo.map + fence->bo.size;
1446 anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1447 anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
1449 if (!device->info.has_llc) {
1450 assert(((uintptr_t) batch.start & CACHELINE_MASK) == 0);
1451 assert(batch.next - batch.start <= CACHELINE_SIZE);
1452 __builtin_ia32_mfence();
1453 __builtin_ia32_clflush(batch.start);
1456 fence->exec2_objects[0].handle = fence->bo.gem_handle;
1457 fence->exec2_objects[0].relocation_count = 0;
1458 fence->exec2_objects[0].relocs_ptr = 0;
1459 fence->exec2_objects[0].alignment = 0;
1460 fence->exec2_objects[0].offset = fence->bo.offset;
1461 fence->exec2_objects[0].flags = 0;
1462 fence->exec2_objects[0].rsvd1 = 0;
1463 fence->exec2_objects[0].rsvd2 = 0;
1465 fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1466 fence->execbuf.buffer_count = 1;
1467 fence->execbuf.batch_start_offset = batch.start - fence->bo.map;
1468 fence->execbuf.batch_len = batch.next - batch.start;
1469 fence->execbuf.cliprects_ptr = 0;
1470 fence->execbuf.num_cliprects = 0;
1471 fence->execbuf.DR1 = 0;
1472 fence->execbuf.DR4 = 0;
1474 fence->execbuf.flags =
1475 I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1476 fence->execbuf.rsvd1 = device->context_id;
1477 fence->execbuf.rsvd2 = 0;
1479 fence->ready = false;
1481 *pFence = anv_fence_to_handle(fence);
1486 void anv_DestroyFence(
1489 const VkAllocationCallbacks* pAllocator)
1491 ANV_FROM_HANDLE(anv_device, device, _device);
1492 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1494 assert(fence->bo.map == fence);
1495 anv_bo_pool_free(&device->batch_bo_pool, &fence->bo);
1498 VkResult anv_ResetFences(
1500 uint32_t fenceCount,
1501 const VkFence* pFences)
1503 for (uint32_t i = 0; i < fenceCount; i++) {
1504 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1505 fence->ready = false;
1511 VkResult anv_GetFenceStatus(
1515 ANV_FROM_HANDLE(anv_device, device, _device);
1516 ANV_FROM_HANDLE(anv_fence, fence, _fence);
1523 ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1525 fence->ready = true;
1529 return VK_NOT_READY;
1532 VkResult anv_WaitForFences(
1534 uint32_t fenceCount,
1535 const VkFence* pFences,
1539 ANV_FROM_HANDLE(anv_device, device, _device);
1541 /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
1542 * to block indefinitely timeouts <= 0. Unfortunately, this was broken
1543 * for a couple of kernel releases. Since there's no way to know
1544 * whether or not the kernel we're using is one of the broken ones, the
1545 * best we can do is to clamp the timeout to INT64_MAX. This limits the
1546 * maximum timeout from 584 years to 292 years - likely not a big deal.
1548 if (timeout > INT64_MAX)
1549 timeout = INT64_MAX;
1551 int64_t t = timeout;
1553 /* FIXME: handle !waitAll */
1555 for (uint32_t i = 0; i < fenceCount; i++) {
1556 ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1557 int ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1558 if (ret == -1 && errno == ETIME) {
1560 } else if (ret == -1) {
1561 /* We don't know the real error. */
1562 return vk_errorf(VK_ERROR_OUT_OF_DEVICE_MEMORY,
1563 "gem wait failed: %m");
1570 // Queue semaphore functions
1572 VkResult anv_CreateSemaphore(
1574 const VkSemaphoreCreateInfo* pCreateInfo,
1575 const VkAllocationCallbacks* pAllocator,
1576 VkSemaphore* pSemaphore)
1578 /* The DRM execbuffer ioctl always execute in-oder, even between different
1579 * rings. As such, there's nothing to do for the user space semaphore.
1582 *pSemaphore = (VkSemaphore)1;
1587 void anv_DestroySemaphore(
1589 VkSemaphore semaphore,
1590 const VkAllocationCallbacks* pAllocator)
1596 VkResult anv_CreateEvent(
1598 const VkEventCreateInfo* pCreateInfo,
1599 const VkAllocationCallbacks* pAllocator,
1602 ANV_FROM_HANDLE(anv_device, device, _device);
1603 struct anv_state state;
1604 struct anv_event *event;
1606 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
1608 state = anv_state_pool_alloc(&device->dynamic_state_pool,
1611 event->state = state;
1612 event->semaphore = VK_EVENT_RESET;
1614 if (!device->info.has_llc) {
1615 /* Make sure the writes we're flushing have landed. */
1616 __builtin_ia32_mfence();
1617 __builtin_ia32_clflush(event);
1620 *pEvent = anv_event_to_handle(event);
1625 void anv_DestroyEvent(
1628 const VkAllocationCallbacks* pAllocator)
1630 ANV_FROM_HANDLE(anv_device, device, _device);
1631 ANV_FROM_HANDLE(anv_event, event, _event);
1633 anv_state_pool_free(&device->dynamic_state_pool, event->state);
1636 VkResult anv_GetEventStatus(
1640 ANV_FROM_HANDLE(anv_device, device, _device);
1641 ANV_FROM_HANDLE(anv_event, event, _event);
1643 if (!device->info.has_llc) {
1644 /* Invalidate read cache before reading event written by GPU. */
1645 __builtin_ia32_clflush(event);
1646 __builtin_ia32_mfence();
1650 return event->semaphore;
1653 VkResult anv_SetEvent(
1657 ANV_FROM_HANDLE(anv_device, device, _device);
1658 ANV_FROM_HANDLE(anv_event, event, _event);
1660 event->semaphore = VK_EVENT_SET;
1662 if (!device->info.has_llc) {
1663 /* Make sure the writes we're flushing have landed. */
1664 __builtin_ia32_mfence();
1665 __builtin_ia32_clflush(event);
1671 VkResult anv_ResetEvent(
1675 ANV_FROM_HANDLE(anv_device, device, _device);
1676 ANV_FROM_HANDLE(anv_event, event, _event);
1678 event->semaphore = VK_EVENT_RESET;
1680 if (!device->info.has_llc) {
1681 /* Make sure the writes we're flushing have landed. */
1682 __builtin_ia32_mfence();
1683 __builtin_ia32_clflush(event);
1691 VkResult anv_CreateBuffer(
1693 const VkBufferCreateInfo* pCreateInfo,
1694 const VkAllocationCallbacks* pAllocator,
1697 ANV_FROM_HANDLE(anv_device, device, _device);
1698 struct anv_buffer *buffer;
1700 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1702 buffer = anv_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1703 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1705 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1707 buffer->size = pCreateInfo->size;
1708 buffer->usage = pCreateInfo->usage;
1712 *pBuffer = anv_buffer_to_handle(buffer);
1717 void anv_DestroyBuffer(
1720 const VkAllocationCallbacks* pAllocator)
1722 ANV_FROM_HANDLE(anv_device, device, _device);
1723 ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1725 anv_free2(&device->alloc, pAllocator, buffer);
1729 anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
1730 enum isl_format format,
1731 uint32_t offset, uint32_t range, uint32_t stride)
1733 isl_buffer_fill_state(&device->isl_dev, state.map,
1735 .mocs = device->default_mocs,
1740 if (!device->info.has_llc)
1741 anv_state_clflush(state);
1744 void anv_DestroySampler(
1747 const VkAllocationCallbacks* pAllocator)
1749 ANV_FROM_HANDLE(anv_device, device, _device);
1750 ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1752 anv_free2(&device->alloc, pAllocator, sampler);
1755 VkResult anv_CreateFramebuffer(
1757 const VkFramebufferCreateInfo* pCreateInfo,
1758 const VkAllocationCallbacks* pAllocator,
1759 VkFramebuffer* pFramebuffer)
1761 ANV_FROM_HANDLE(anv_device, device, _device);
1762 struct anv_framebuffer *framebuffer;
1764 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1766 size_t size = sizeof(*framebuffer) +
1767 sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
1768 framebuffer = anv_alloc2(&device->alloc, pAllocator, size, 8,
1769 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1770 if (framebuffer == NULL)
1771 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1773 framebuffer->attachment_count = pCreateInfo->attachmentCount;
1774 for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1775 VkImageView _iview = pCreateInfo->pAttachments[i];
1776 framebuffer->attachments[i] = anv_image_view_from_handle(_iview);
1779 framebuffer->width = pCreateInfo->width;
1780 framebuffer->height = pCreateInfo->height;
1781 framebuffer->layers = pCreateInfo->layers;
1783 *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
1788 void anv_DestroyFramebuffer(
1791 const VkAllocationCallbacks* pAllocator)
1793 ANV_FROM_HANDLE(anv_device, device, _device);
1794 ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
1796 anv_free2(&device->alloc, pAllocator, fb);