OSDN Git Service

0b440fb6cfce9562b5b93945e9008ad02cf062e5
[android-x86/external-mesa.git] / src / intel / vulkan / anv_device.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <dlfcn.h>
25 #include <assert.h>
26 #include <stdbool.h>
27 #include <string.h>
28 #include <sys/mman.h>
29 #include <sys/stat.h>
30 #include <unistd.h>
31 #include <fcntl.h>
32
33 #include "anv_private.h"
34 #include "util/strtod.h"
35 #include "util/debug.h"
36
37 #include "genxml/gen7_pack.h"
38
39 struct anv_dispatch_table dtable;
40
41 static void
42 compiler_debug_log(void *data, const char *fmt, ...)
43 { }
44
45 static void
46 compiler_perf_log(void *data, const char *fmt, ...)
47 {
48    va_list args;
49    va_start(args, fmt);
50
51    if (unlikely(INTEL_DEBUG & DEBUG_PERF))
52       vfprintf(stderr, fmt, args);
53
54    va_end(args);
55 }
56
57 static bool
58 anv_get_function_timestamp(void *ptr, uint32_t* timestamp)
59 {
60    Dl_info info;
61    struct stat st;
62    if (!dladdr(ptr, &info) || !info.dli_fname)
63       return false;
64
65    if (stat(info.dli_fname, &st))
66       return false;
67
68    *timestamp = st.st_mtim.tv_sec;
69    return true;
70 }
71
72 static bool
73 anv_device_get_cache_uuid(void *uuid)
74 {
75    uint32_t timestamp;
76
77    memset(uuid, 0, VK_UUID_SIZE);
78    if (anv_get_function_timestamp(anv_device_get_cache_uuid, &timestamp))
79          return false;
80
81    snprintf(uuid, VK_UUID_SIZE, "anv-%d", timestamp);
82    return true;
83 }
84
85 static VkResult
86 anv_physical_device_init(struct anv_physical_device *device,
87                          struct anv_instance *instance,
88                          const char *path)
89 {
90    VkResult result;
91    int fd;
92
93    fd = open(path, O_RDWR | O_CLOEXEC);
94    if (fd < 0)
95       return vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
96
97    device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
98    device->instance = instance;
99
100    assert(strlen(path) < ARRAY_SIZE(device->path));
101    strncpy(device->path, path, ARRAY_SIZE(device->path));
102
103    device->chipset_id = anv_gem_get_param(fd, I915_PARAM_CHIPSET_ID);
104    if (!device->chipset_id) {
105       result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
106       goto fail;
107    }
108
109    device->name = gen_get_device_name(device->chipset_id);
110    if (!gen_get_device_info(device->chipset_id, &device->info)) {
111       result = vk_error(VK_ERROR_INCOMPATIBLE_DRIVER);
112       goto fail;
113    }
114
115    if (device->info.is_haswell) {
116       fprintf(stderr, "WARNING: Haswell Vulkan support is incomplete\n");
117    } else if (device->info.gen == 7 && !device->info.is_baytrail) {
118       fprintf(stderr, "WARNING: Ivy Bridge Vulkan support is incomplete\n");
119    } else if (device->info.gen == 7 && device->info.is_baytrail) {
120       fprintf(stderr, "WARNING: Bay Trail Vulkan support is incomplete\n");
121    } else if (device->info.gen >= 8) {
122       /* Broadwell, Cherryview, Skylake, Broxton, Kabylake is as fully
123        * supported as anything */
124    } else {
125       result = vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
126                          "Vulkan not yet supported on %s", device->name);
127       goto fail;
128    }
129
130    device->cmd_parser_version = -1;
131    if (device->info.gen == 7) {
132       device->cmd_parser_version =
133          anv_gem_get_param(fd, I915_PARAM_CMD_PARSER_VERSION);
134       if (device->cmd_parser_version == -1) {
135          result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
136                             "failed to get command parser version");
137          goto fail;
138       }
139    }
140
141    if (anv_gem_get_aperture(fd, &device->aperture_size) == -1) {
142       result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
143                          "failed to get aperture size: %m");
144       goto fail;
145    }
146
147    if (!anv_gem_get_param(fd, I915_PARAM_HAS_WAIT_TIMEOUT)) {
148       result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
149                          "kernel missing gem wait");
150       goto fail;
151    }
152
153    if (!anv_gem_get_param(fd, I915_PARAM_HAS_EXECBUF2)) {
154       result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
155                          "kernel missing execbuf2");
156       goto fail;
157    }
158
159    if (!device->info.has_llc &&
160        anv_gem_get_param(fd, I915_PARAM_MMAP_VERSION) < 1) {
161       result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
162                          "kernel missing wc mmap");
163       goto fail;
164    }
165
166    if (!anv_device_get_cache_uuid(device->uuid)) {
167       result = vk_errorf(VK_ERROR_INITIALIZATION_FAILED,
168                          "cannot generate UUID");
169       goto fail;
170    }
171    bool swizzled = anv_gem_get_bit6_swizzle(fd, I915_TILING_X);
172
173    /* GENs prior to 8 do not support EU/Subslice info */
174    if (device->info.gen >= 8) {
175       device->subslice_total = anv_gem_get_param(fd, I915_PARAM_SUBSLICE_TOTAL);
176       device->eu_total = anv_gem_get_param(fd, I915_PARAM_EU_TOTAL);
177
178       /* Without this information, we cannot get the right Braswell
179        * brandstrings, and we have to use conservative numbers for GPGPU on
180        * many platforms, but otherwise, things will just work.
181        */
182       if (device->subslice_total < 1 || device->eu_total < 1) {
183          fprintf(stderr, "WARNING: Kernel 4.1 required to properly"
184                          " query GPU properties.\n");
185       }
186    } else if (device->info.gen == 7) {
187       device->subslice_total = 1 << (device->info.gt - 1);
188    }
189
190    if (device->info.is_cherryview &&
191        device->subslice_total > 0 && device->eu_total > 0) {
192       /* Logical CS threads = EUs per subslice * 7 threads per EU */
193       uint32_t max_cs_threads = device->eu_total / device->subslice_total * 7;
194
195       /* Fuse configurations may give more threads than expected, never less. */
196       if (max_cs_threads > device->info.max_cs_threads)
197          device->info.max_cs_threads = max_cs_threads;
198    }
199
200    brw_process_intel_debug_variable();
201
202    device->compiler = brw_compiler_create(NULL, &device->info);
203    if (device->compiler == NULL) {
204       result = vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
205       goto fail;
206    }
207    device->compiler->shader_debug_log = compiler_debug_log;
208    device->compiler->shader_perf_log = compiler_perf_log;
209
210    result = anv_init_wsi(device);
211    if (result != VK_SUCCESS) {
212       ralloc_free(device->compiler);
213       goto fail;
214    }
215
216    isl_device_init(&device->isl_dev, &device->info, swizzled);
217
218    close(fd);
219    return VK_SUCCESS;
220
221 fail:
222    close(fd);
223    return result;
224 }
225
226 static void
227 anv_physical_device_finish(struct anv_physical_device *device)
228 {
229    anv_finish_wsi(device);
230    ralloc_free(device->compiler);
231 }
232
233 static const VkExtensionProperties global_extensions[] = {
234    {
235       .extensionName = VK_KHR_SURFACE_EXTENSION_NAME,
236       .specVersion = 25,
237    },
238 #ifdef VK_USE_PLATFORM_XCB_KHR
239    {
240       .extensionName = VK_KHR_XCB_SURFACE_EXTENSION_NAME,
241       .specVersion = 6,
242    },
243 #endif
244 #ifdef VK_USE_PLATFORM_XLIB_KHR
245    {
246       .extensionName = VK_KHR_XLIB_SURFACE_EXTENSION_NAME,
247       .specVersion = 6,
248    },
249 #endif
250 #ifdef VK_USE_PLATFORM_WAYLAND_KHR
251    {
252       .extensionName = VK_KHR_WAYLAND_SURFACE_EXTENSION_NAME,
253       .specVersion = 5,
254    },
255 #endif
256 };
257
258 static const VkExtensionProperties device_extensions[] = {
259    {
260       .extensionName = VK_KHR_SWAPCHAIN_EXTENSION_NAME,
261       .specVersion = 68,
262    },
263 };
264
265 static void *
266 default_alloc_func(void *pUserData, size_t size, size_t align,
267                    VkSystemAllocationScope allocationScope)
268 {
269    return malloc(size);
270 }
271
272 static void *
273 default_realloc_func(void *pUserData, void *pOriginal, size_t size,
274                      size_t align, VkSystemAllocationScope allocationScope)
275 {
276    return realloc(pOriginal, size);
277 }
278
279 static void
280 default_free_func(void *pUserData, void *pMemory)
281 {
282    free(pMemory);
283 }
284
285 static const VkAllocationCallbacks default_alloc = {
286    .pUserData = NULL,
287    .pfnAllocation = default_alloc_func,
288    .pfnReallocation = default_realloc_func,
289    .pfnFree = default_free_func,
290 };
291
292 VkResult anv_CreateInstance(
293     const VkInstanceCreateInfo*                 pCreateInfo,
294     const VkAllocationCallbacks*                pAllocator,
295     VkInstance*                                 pInstance)
296 {
297    struct anv_instance *instance;
298
299    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO);
300
301    uint32_t client_version;
302    if (pCreateInfo->pApplicationInfo &&
303        pCreateInfo->pApplicationInfo->apiVersion != 0) {
304       client_version = pCreateInfo->pApplicationInfo->apiVersion;
305    } else {
306       client_version = VK_MAKE_VERSION(1, 0, 0);
307    }
308
309    if (VK_MAKE_VERSION(1, 0, 0) > client_version ||
310        client_version > VK_MAKE_VERSION(1, 0, 0xfff)) {
311       return vk_errorf(VK_ERROR_INCOMPATIBLE_DRIVER,
312                        "Client requested version %d.%d.%d",
313                        VK_VERSION_MAJOR(client_version),
314                        VK_VERSION_MINOR(client_version),
315                        VK_VERSION_PATCH(client_version));
316    }
317
318    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
319       bool found = false;
320       for (uint32_t j = 0; j < ARRAY_SIZE(global_extensions); j++) {
321          if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
322                     global_extensions[j].extensionName) == 0) {
323             found = true;
324             break;
325          }
326       }
327       if (!found)
328          return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
329    }
330
331    instance = vk_alloc2(&default_alloc, pAllocator, sizeof(*instance), 8,
332                          VK_SYSTEM_ALLOCATION_SCOPE_INSTANCE);
333    if (!instance)
334       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
335
336    instance->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
337
338    if (pAllocator)
339       instance->alloc = *pAllocator;
340    else
341       instance->alloc = default_alloc;
342
343    instance->apiVersion = client_version;
344    instance->physicalDeviceCount = -1;
345
346    _mesa_locale_init();
347
348    VG(VALGRIND_CREATE_MEMPOOL(instance, 0, false));
349
350    *pInstance = anv_instance_to_handle(instance);
351
352    return VK_SUCCESS;
353 }
354
355 void anv_DestroyInstance(
356     VkInstance                                  _instance,
357     const VkAllocationCallbacks*                pAllocator)
358 {
359    ANV_FROM_HANDLE(anv_instance, instance, _instance);
360
361    if (instance->physicalDeviceCount > 0) {
362       /* We support at most one physical device. */
363       assert(instance->physicalDeviceCount == 1);
364       anv_physical_device_finish(&instance->physicalDevice);
365    }
366
367    VG(VALGRIND_DESTROY_MEMPOOL(instance));
368
369    _mesa_locale_fini();
370
371    vk_free(&instance->alloc, instance);
372 }
373
374 VkResult anv_EnumeratePhysicalDevices(
375     VkInstance                                  _instance,
376     uint32_t*                                   pPhysicalDeviceCount,
377     VkPhysicalDevice*                           pPhysicalDevices)
378 {
379    ANV_FROM_HANDLE(anv_instance, instance, _instance);
380    VkResult result;
381
382    if (instance->physicalDeviceCount < 0) {
383       char path[20];
384       for (unsigned i = 0; i < 8; i++) {
385          snprintf(path, sizeof(path), "/dev/dri/renderD%d", 128 + i);
386          result = anv_physical_device_init(&instance->physicalDevice,
387                                            instance, path);
388          if (result != VK_ERROR_INCOMPATIBLE_DRIVER)
389             break;
390       }
391
392       if (result == VK_ERROR_INCOMPATIBLE_DRIVER) {
393          instance->physicalDeviceCount = 0;
394       } else if (result == VK_SUCCESS) {
395          instance->physicalDeviceCount = 1;
396       } else {
397          return result;
398       }
399    }
400
401    /* pPhysicalDeviceCount is an out parameter if pPhysicalDevices is NULL;
402     * otherwise it's an inout parameter.
403     *
404     * The Vulkan spec (git aaed022) says:
405     *
406     *    pPhysicalDeviceCount is a pointer to an unsigned integer variable
407     *    that is initialized with the number of devices the application is
408     *    prepared to receive handles to. pname:pPhysicalDevices is pointer to
409     *    an array of at least this many VkPhysicalDevice handles [...].
410     *
411     *    Upon success, if pPhysicalDevices is NULL, vkEnumeratePhysicalDevices
412     *    overwrites the contents of the variable pointed to by
413     *    pPhysicalDeviceCount with the number of physical devices in in the
414     *    instance; otherwise, vkEnumeratePhysicalDevices overwrites
415     *    pPhysicalDeviceCount with the number of physical handles written to
416     *    pPhysicalDevices.
417     */
418    if (!pPhysicalDevices) {
419       *pPhysicalDeviceCount = instance->physicalDeviceCount;
420    } else if (*pPhysicalDeviceCount >= 1) {
421       pPhysicalDevices[0] = anv_physical_device_to_handle(&instance->physicalDevice);
422       *pPhysicalDeviceCount = 1;
423    } else if (*pPhysicalDeviceCount < instance->physicalDeviceCount) {
424       return VK_INCOMPLETE;
425    } else {
426       *pPhysicalDeviceCount = 0;
427    }
428
429    return VK_SUCCESS;
430 }
431
432 void anv_GetPhysicalDeviceFeatures(
433     VkPhysicalDevice                            physicalDevice,
434     VkPhysicalDeviceFeatures*                   pFeatures)
435 {
436    ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
437
438    *pFeatures = (VkPhysicalDeviceFeatures) {
439       .robustBufferAccess                       = true,
440       .fullDrawIndexUint32                      = true,
441       .imageCubeArray                           = false,
442       .independentBlend                         = true,
443       .geometryShader                           = true,
444       .tessellationShader                       = false,
445       .sampleRateShading                        = true,
446       .dualSrcBlend                             = true,
447       .logicOp                                  = true,
448       .multiDrawIndirect                        = false,
449       .drawIndirectFirstInstance                = false,
450       .depthClamp                               = true,
451       .depthBiasClamp                           = false,
452       .fillModeNonSolid                         = true,
453       .depthBounds                              = false,
454       .wideLines                                = true,
455       .largePoints                              = true,
456       .alphaToOne                               = true,
457       .multiViewport                            = true,
458       .samplerAnisotropy                        = true,
459       .textureCompressionETC2                   = pdevice->info.gen >= 8 ||
460                                                   pdevice->info.is_baytrail,
461       .textureCompressionASTC_LDR               = pdevice->info.gen >= 9, /* FINISHME CHV */
462       .textureCompressionBC                     = true,
463       .occlusionQueryPrecise                    = true,
464       .pipelineStatisticsQuery                  = false,
465       .fragmentStoresAndAtomics                 = true,
466       .shaderTessellationAndGeometryPointSize   = true,
467       .shaderImageGatherExtended                = false,
468       .shaderStorageImageExtendedFormats        = false,
469       .shaderStorageImageMultisample            = false,
470       .shaderUniformBufferArrayDynamicIndexing  = true,
471       .shaderSampledImageArrayDynamicIndexing   = true,
472       .shaderStorageBufferArrayDynamicIndexing  = true,
473       .shaderStorageImageArrayDynamicIndexing   = true,
474       .shaderStorageImageReadWithoutFormat      = false,
475       .shaderStorageImageWriteWithoutFormat     = true,
476       .shaderClipDistance                       = true,
477       .shaderCullDistance                       = true,
478       .shaderFloat64                            = false,
479       .shaderInt64                              = false,
480       .shaderInt16                              = false,
481       .alphaToOne                               = true,
482       .variableMultisampleRate                  = false,
483       .inheritedQueries                         = false,
484    };
485
486    /* We can't do image stores in vec4 shaders */
487    pFeatures->vertexPipelineStoresAndAtomics =
488       pdevice->compiler->scalar_stage[MESA_SHADER_VERTEX] &&
489       pdevice->compiler->scalar_stage[MESA_SHADER_GEOMETRY];
490 }
491
492 void anv_GetPhysicalDeviceProperties(
493     VkPhysicalDevice                            physicalDevice,
494     VkPhysicalDeviceProperties*                 pProperties)
495 {
496    ANV_FROM_HANDLE(anv_physical_device, pdevice, physicalDevice);
497    const struct gen_device_info *devinfo = &pdevice->info;
498
499    const float time_stamp_base = devinfo->gen >= 9 ? 83.333 : 80.0;
500
501    /* See assertions made when programming the buffer surface state. */
502    const uint32_t max_raw_buffer_sz = devinfo->gen >= 7 ?
503                                       (1ul << 30) : (1ul << 27);
504
505    VkSampleCountFlags sample_counts =
506       isl_device_get_sample_counts(&pdevice->isl_dev);
507
508    VkPhysicalDeviceLimits limits = {
509       .maxImageDimension1D                      = (1 << 14),
510       .maxImageDimension2D                      = (1 << 14),
511       .maxImageDimension3D                      = (1 << 11),
512       .maxImageDimensionCube                    = (1 << 14),
513       .maxImageArrayLayers                      = (1 << 11),
514       .maxTexelBufferElements                   = 128 * 1024 * 1024,
515       .maxUniformBufferRange                    = (1ul << 27),
516       .maxStorageBufferRange                    = max_raw_buffer_sz,
517       .maxPushConstantsSize                     = MAX_PUSH_CONSTANTS_SIZE,
518       .maxMemoryAllocationCount                 = UINT32_MAX,
519       .maxSamplerAllocationCount                = 64 * 1024,
520       .bufferImageGranularity                   = 64, /* A cache line */
521       .sparseAddressSpaceSize                   = 0,
522       .maxBoundDescriptorSets                   = MAX_SETS,
523       .maxPerStageDescriptorSamplers            = 64,
524       .maxPerStageDescriptorUniformBuffers      = 64,
525       .maxPerStageDescriptorStorageBuffers      = 64,
526       .maxPerStageDescriptorSampledImages       = 64,
527       .maxPerStageDescriptorStorageImages       = 64,
528       .maxPerStageDescriptorInputAttachments    = 64,
529       .maxPerStageResources                     = 128,
530       .maxDescriptorSetSamplers                 = 256,
531       .maxDescriptorSetUniformBuffers           = 256,
532       .maxDescriptorSetUniformBuffersDynamic    = 256,
533       .maxDescriptorSetStorageBuffers           = 256,
534       .maxDescriptorSetStorageBuffersDynamic    = 256,
535       .maxDescriptorSetSampledImages            = 256,
536       .maxDescriptorSetStorageImages            = 256,
537       .maxDescriptorSetInputAttachments         = 256,
538       .maxVertexInputAttributes                 = 32,
539       .maxVertexInputBindings                   = 32,
540       .maxVertexInputAttributeOffset            = 2047,
541       .maxVertexInputBindingStride              = 2048,
542       .maxVertexOutputComponents                = 128,
543       .maxTessellationGenerationLevel           = 0,
544       .maxTessellationPatchSize                 = 0,
545       .maxTessellationControlPerVertexInputComponents = 0,
546       .maxTessellationControlPerVertexOutputComponents = 0,
547       .maxTessellationControlPerPatchOutputComponents = 0,
548       .maxTessellationControlTotalOutputComponents = 0,
549       .maxTessellationEvaluationInputComponents = 0,
550       .maxTessellationEvaluationOutputComponents = 0,
551       .maxGeometryShaderInvocations             = 32,
552       .maxGeometryInputComponents               = 64,
553       .maxGeometryOutputComponents              = 128,
554       .maxGeometryOutputVertices                = 256,
555       .maxGeometryTotalOutputComponents         = 1024,
556       .maxFragmentInputComponents               = 128,
557       .maxFragmentOutputAttachments             = 8,
558       .maxFragmentDualSrcAttachments            = 2,
559       .maxFragmentCombinedOutputResources       = 8,
560       .maxComputeSharedMemorySize               = 32768,
561       .maxComputeWorkGroupCount                 = { 65535, 65535, 65535 },
562       .maxComputeWorkGroupInvocations           = 16 * devinfo->max_cs_threads,
563       .maxComputeWorkGroupSize = {
564          16 * devinfo->max_cs_threads,
565          16 * devinfo->max_cs_threads,
566          16 * devinfo->max_cs_threads,
567       },
568       .subPixelPrecisionBits                    = 4 /* FIXME */,
569       .subTexelPrecisionBits                    = 4 /* FIXME */,
570       .mipmapPrecisionBits                      = 4 /* FIXME */,
571       .maxDrawIndexedIndexValue                 = UINT32_MAX,
572       .maxDrawIndirectCount                     = UINT32_MAX,
573       .maxSamplerLodBias                        = 16,
574       .maxSamplerAnisotropy                     = 16,
575       .maxViewports                             = MAX_VIEWPORTS,
576       .maxViewportDimensions                    = { (1 << 14), (1 << 14) },
577       .viewportBoundsRange                      = { INT16_MIN, INT16_MAX },
578       .viewportSubPixelBits                     = 13, /* We take a float? */
579       .minMemoryMapAlignment                    = 4096, /* A page */
580       .minTexelBufferOffsetAlignment            = 1,
581       .minUniformBufferOffsetAlignment          = 1,
582       .minStorageBufferOffsetAlignment          = 1,
583       .minTexelOffset                           = -8,
584       .maxTexelOffset                           = 7,
585       .minTexelGatherOffset                     = -8,
586       .maxTexelGatherOffset                     = 7,
587       .minInterpolationOffset                   = -0.5,
588       .maxInterpolationOffset                   = 0.4375,
589       .subPixelInterpolationOffsetBits          = 4,
590       .maxFramebufferWidth                      = (1 << 14),
591       .maxFramebufferHeight                     = (1 << 14),
592       .maxFramebufferLayers                     = (1 << 10),
593       .framebufferColorSampleCounts             = sample_counts,
594       .framebufferDepthSampleCounts             = sample_counts,
595       .framebufferStencilSampleCounts           = sample_counts,
596       .framebufferNoAttachmentsSampleCounts     = sample_counts,
597       .maxColorAttachments                      = MAX_RTS,
598       .sampledImageColorSampleCounts            = sample_counts,
599       .sampledImageIntegerSampleCounts          = VK_SAMPLE_COUNT_1_BIT,
600       .sampledImageDepthSampleCounts            = sample_counts,
601       .sampledImageStencilSampleCounts          = sample_counts,
602       .storageImageSampleCounts                 = VK_SAMPLE_COUNT_1_BIT,
603       .maxSampleMaskWords                       = 1,
604       .timestampComputeAndGraphics              = false,
605       .timestampPeriod                          = time_stamp_base,
606       .maxClipDistances                         = 8,
607       .maxCullDistances                         = 8,
608       .maxCombinedClipAndCullDistances          = 8,
609       .discreteQueuePriorities                  = 1,
610       .pointSizeRange                           = { 0.125, 255.875 },
611       .lineWidthRange                           = { 0.0, 7.9921875 },
612       .pointSizeGranularity                     = (1.0 / 8.0),
613       .lineWidthGranularity                     = (1.0 / 128.0),
614       .strictLines                              = false, /* FINISHME */
615       .standardSampleLocations                  = true,
616       .optimalBufferCopyOffsetAlignment         = 128,
617       .optimalBufferCopyRowPitchAlignment       = 128,
618       .nonCoherentAtomSize                      = 64,
619    };
620
621    *pProperties = (VkPhysicalDeviceProperties) {
622       .apiVersion = VK_MAKE_VERSION(1, 0, 5),
623       .driverVersion = 1,
624       .vendorID = 0x8086,
625       .deviceID = pdevice->chipset_id,
626       .deviceType = VK_PHYSICAL_DEVICE_TYPE_INTEGRATED_GPU,
627       .limits = limits,
628       .sparseProperties = {0}, /* Broadwell doesn't do sparse. */
629    };
630
631    strcpy(pProperties->deviceName, pdevice->name);
632    memcpy(pProperties->pipelineCacheUUID, pdevice->uuid, VK_UUID_SIZE);
633 }
634
635 void anv_GetPhysicalDeviceQueueFamilyProperties(
636     VkPhysicalDevice                            physicalDevice,
637     uint32_t*                                   pCount,
638     VkQueueFamilyProperties*                    pQueueFamilyProperties)
639 {
640    if (pQueueFamilyProperties == NULL) {
641       *pCount = 1;
642       return;
643    }
644
645    assert(*pCount >= 1);
646
647    *pQueueFamilyProperties = (VkQueueFamilyProperties) {
648       .queueFlags = VK_QUEUE_GRAPHICS_BIT |
649                     VK_QUEUE_COMPUTE_BIT |
650                     VK_QUEUE_TRANSFER_BIT,
651       .queueCount = 1,
652       .timestampValidBits = 36, /* XXX: Real value here */
653       .minImageTransferGranularity = (VkExtent3D) { 1, 1, 1 },
654    };
655 }
656
657 void anv_GetPhysicalDeviceMemoryProperties(
658     VkPhysicalDevice                            physicalDevice,
659     VkPhysicalDeviceMemoryProperties*           pMemoryProperties)
660 {
661    ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
662    VkDeviceSize heap_size;
663
664    /* Reserve some wiggle room for the driver by exposing only 75% of the
665     * aperture to the heap.
666     */
667    heap_size = 3 * physical_device->aperture_size / 4;
668
669    if (physical_device->info.has_llc) {
670       /* Big core GPUs share LLC with the CPU and thus one memory type can be
671        * both cached and coherent at the same time.
672        */
673       pMemoryProperties->memoryTypeCount = 1;
674       pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
675          .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
676                           VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
677                           VK_MEMORY_PROPERTY_HOST_COHERENT_BIT |
678                           VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
679          .heapIndex = 0,
680       };
681    } else {
682       /* The spec requires that we expose a host-visible, coherent memory
683        * type, but Atom GPUs don't share LLC. Thus we offer two memory types
684        * to give the application a choice between cached, but not coherent and
685        * coherent but uncached (WC though).
686        */
687       pMemoryProperties->memoryTypeCount = 2;
688       pMemoryProperties->memoryTypes[0] = (VkMemoryType) {
689          .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
690                           VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
691                           VK_MEMORY_PROPERTY_HOST_COHERENT_BIT,
692          .heapIndex = 0,
693       };
694       pMemoryProperties->memoryTypes[1] = (VkMemoryType) {
695          .propertyFlags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT |
696                           VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
697                           VK_MEMORY_PROPERTY_HOST_CACHED_BIT,
698          .heapIndex = 0,
699       };
700    }
701
702    pMemoryProperties->memoryHeapCount = 1;
703    pMemoryProperties->memoryHeaps[0] = (VkMemoryHeap) {
704       .size = heap_size,
705       .flags = VK_MEMORY_HEAP_DEVICE_LOCAL_BIT,
706    };
707 }
708
709 PFN_vkVoidFunction anv_GetInstanceProcAddr(
710     VkInstance                                  instance,
711     const char*                                 pName)
712 {
713    return anv_lookup_entrypoint(NULL, pName);
714 }
715
716 /* With version 1+ of the loader interface the ICD should expose
717  * vk_icdGetInstanceProcAddr to work around certain LD_PRELOAD issues seen in apps.
718  */
719 PUBLIC
720 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
721     VkInstance                                  instance,
722     const char*                                 pName);
723
724 PUBLIC
725 VKAPI_ATTR PFN_vkVoidFunction VKAPI_CALL vk_icdGetInstanceProcAddr(
726     VkInstance                                  instance,
727     const char*                                 pName)
728 {
729    return anv_GetInstanceProcAddr(instance, pName);
730 }
731
732 PFN_vkVoidFunction anv_GetDeviceProcAddr(
733     VkDevice                                    _device,
734     const char*                                 pName)
735 {
736    ANV_FROM_HANDLE(anv_device, device, _device);
737    return anv_lookup_entrypoint(&device->info, pName);
738 }
739
740 static void
741 anv_queue_init(struct anv_device *device, struct anv_queue *queue)
742 {
743    queue->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
744    queue->device = device;
745    queue->pool = &device->surface_state_pool;
746 }
747
748 static void
749 anv_queue_finish(struct anv_queue *queue)
750 {
751 }
752
753 static struct anv_state
754 anv_state_pool_emit_data(struct anv_state_pool *pool, size_t size, size_t align, const void *p)
755 {
756    struct anv_state state;
757
758    state = anv_state_pool_alloc(pool, size, align);
759    memcpy(state.map, p, size);
760
761    if (!pool->block_pool->device->info.has_llc)
762       anv_state_clflush(state);
763
764    return state;
765 }
766
767 struct gen8_border_color {
768    union {
769       float float32[4];
770       uint32_t uint32[4];
771    };
772    /* Pad out to 64 bytes */
773    uint32_t _pad[12];
774 };
775
776 static void
777 anv_device_init_border_colors(struct anv_device *device)
778 {
779    static const struct gen8_border_color border_colors[] = {
780       [VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK] =  { .float32 = { 0.0, 0.0, 0.0, 0.0 } },
781       [VK_BORDER_COLOR_FLOAT_OPAQUE_BLACK] =       { .float32 = { 0.0, 0.0, 0.0, 1.0 } },
782       [VK_BORDER_COLOR_FLOAT_OPAQUE_WHITE] =       { .float32 = { 1.0, 1.0, 1.0, 1.0 } },
783       [VK_BORDER_COLOR_INT_TRANSPARENT_BLACK] =    { .uint32 = { 0, 0, 0, 0 } },
784       [VK_BORDER_COLOR_INT_OPAQUE_BLACK] =         { .uint32 = { 0, 0, 0, 1 } },
785       [VK_BORDER_COLOR_INT_OPAQUE_WHITE] =         { .uint32 = { 1, 1, 1, 1 } },
786    };
787
788    device->border_colors = anv_state_pool_emit_data(&device->dynamic_state_pool,
789                                                     sizeof(border_colors), 64,
790                                                     border_colors);
791 }
792
793 VkResult
794 anv_device_submit_simple_batch(struct anv_device *device,
795                                struct anv_batch *batch)
796 {
797    struct drm_i915_gem_execbuffer2 execbuf;
798    struct drm_i915_gem_exec_object2 exec2_objects[1];
799    struct anv_bo bo, *exec_bos[1];
800    VkResult result = VK_SUCCESS;
801    uint32_t size;
802    int64_t timeout;
803    int ret;
804
805    /* Kernel driver requires 8 byte aligned batch length */
806    size = align_u32(batch->next - batch->start, 8);
807    result = anv_bo_pool_alloc(&device->batch_bo_pool, &bo, size);
808    if (result != VK_SUCCESS)
809       return result;
810
811    memcpy(bo.map, batch->start, size);
812    if (!device->info.has_llc)
813       anv_clflush_range(bo.map, size);
814
815    exec_bos[0] = &bo;
816    exec2_objects[0].handle = bo.gem_handle;
817    exec2_objects[0].relocation_count = 0;
818    exec2_objects[0].relocs_ptr = 0;
819    exec2_objects[0].alignment = 0;
820    exec2_objects[0].offset = bo.offset;
821    exec2_objects[0].flags = 0;
822    exec2_objects[0].rsvd1 = 0;
823    exec2_objects[0].rsvd2 = 0;
824
825    execbuf.buffers_ptr = (uintptr_t) exec2_objects;
826    execbuf.buffer_count = 1;
827    execbuf.batch_start_offset = 0;
828    execbuf.batch_len = size;
829    execbuf.cliprects_ptr = 0;
830    execbuf.num_cliprects = 0;
831    execbuf.DR1 = 0;
832    execbuf.DR4 = 0;
833
834    execbuf.flags =
835       I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
836    execbuf.rsvd1 = device->context_id;
837    execbuf.rsvd2 = 0;
838
839    result = anv_device_execbuf(device, &execbuf, exec_bos);
840    if (result != VK_SUCCESS)
841       goto fail;
842
843    timeout = INT64_MAX;
844    ret = anv_gem_wait(device, bo.gem_handle, &timeout);
845    if (ret != 0) {
846       /* We don't know the real error. */
847       result = vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
848       goto fail;
849    }
850
851  fail:
852    anv_bo_pool_free(&device->batch_bo_pool, &bo);
853
854    return result;
855 }
856
857 VkResult anv_CreateDevice(
858     VkPhysicalDevice                            physicalDevice,
859     const VkDeviceCreateInfo*                   pCreateInfo,
860     const VkAllocationCallbacks*                pAllocator,
861     VkDevice*                                   pDevice)
862 {
863    ANV_FROM_HANDLE(anv_physical_device, physical_device, physicalDevice);
864    VkResult result;
865    struct anv_device *device;
866
867    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO);
868
869    for (uint32_t i = 0; i < pCreateInfo->enabledExtensionCount; i++) {
870       bool found = false;
871       for (uint32_t j = 0; j < ARRAY_SIZE(device_extensions); j++) {
872          if (strcmp(pCreateInfo->ppEnabledExtensionNames[i],
873                     device_extensions[j].extensionName) == 0) {
874             found = true;
875             break;
876          }
877       }
878       if (!found)
879          return vk_error(VK_ERROR_EXTENSION_NOT_PRESENT);
880    }
881
882    device = vk_alloc2(&physical_device->instance->alloc, pAllocator,
883                        sizeof(*device), 8,
884                        VK_SYSTEM_ALLOCATION_SCOPE_DEVICE);
885    if (!device)
886       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
887
888    device->_loader_data.loaderMagic = ICD_LOADER_MAGIC;
889    device->instance = physical_device->instance;
890    device->chipset_id = physical_device->chipset_id;
891
892    if (pAllocator)
893       device->alloc = *pAllocator;
894    else
895       device->alloc = physical_device->instance->alloc;
896
897    /* XXX(chadv): Can we dup() physicalDevice->fd here? */
898    device->fd = open(physical_device->path, O_RDWR | O_CLOEXEC);
899    if (device->fd == -1) {
900       result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
901       goto fail_device;
902    }
903
904    device->context_id = anv_gem_create_context(device);
905    if (device->context_id == -1) {
906       result = vk_error(VK_ERROR_INITIALIZATION_FAILED);
907       goto fail_fd;
908    }
909
910    device->info = physical_device->info;
911    device->isl_dev = physical_device->isl_dev;
912
913    /* On Broadwell and later, we can use batch chaining to more efficiently
914     * implement growing command buffers.  Prior to Haswell, the kernel
915     * command parser gets in the way and we have to fall back to growing
916     * the batch.
917     */
918    device->can_chain_batches = device->info.gen >= 8;
919
920    device->robust_buffer_access = pCreateInfo->pEnabledFeatures &&
921       pCreateInfo->pEnabledFeatures->robustBufferAccess;
922
923    pthread_mutex_init(&device->mutex, NULL);
924
925    pthread_condattr_t condattr;
926    pthread_condattr_init(&condattr);
927    pthread_condattr_setclock(&condattr, CLOCK_MONOTONIC);
928    pthread_cond_init(&device->queue_submit, NULL);
929    pthread_condattr_destroy(&condattr);
930
931    anv_bo_pool_init(&device->batch_bo_pool, device);
932
933    anv_block_pool_init(&device->dynamic_state_block_pool, device, 16384);
934
935    anv_state_pool_init(&device->dynamic_state_pool,
936                        &device->dynamic_state_block_pool);
937
938    anv_block_pool_init(&device->instruction_block_pool, device, 128 * 1024);
939    anv_state_pool_init(&device->instruction_state_pool,
940                        &device->instruction_block_pool);
941
942    anv_block_pool_init(&device->surface_state_block_pool, device, 4096);
943
944    anv_state_pool_init(&device->surface_state_pool,
945                        &device->surface_state_block_pool);
946
947    anv_bo_init_new(&device->workaround_bo, device, 1024);
948
949    anv_scratch_pool_init(device, &device->scratch_pool);
950
951    anv_queue_init(device, &device->queue);
952
953    switch (device->info.gen) {
954    case 7:
955       if (!device->info.is_haswell)
956          result = gen7_init_device_state(device);
957       else
958          result = gen75_init_device_state(device);
959       break;
960    case 8:
961       result = gen8_init_device_state(device);
962       break;
963    case 9:
964       result = gen9_init_device_state(device);
965       break;
966    default:
967       /* Shouldn't get here as we don't create physical devices for any other
968        * gens. */
969       unreachable("unhandled gen");
970    }
971    if (result != VK_SUCCESS)
972       goto fail_fd;
973
974    anv_device_init_blorp(device);
975
976    anv_device_init_border_colors(device);
977
978    *pDevice = anv_device_to_handle(device);
979
980    return VK_SUCCESS;
981
982  fail_fd:
983    close(device->fd);
984  fail_device:
985    vk_free(&device->alloc, device);
986
987    return result;
988 }
989
990 void anv_DestroyDevice(
991     VkDevice                                    _device,
992     const VkAllocationCallbacks*                pAllocator)
993 {
994    ANV_FROM_HANDLE(anv_device, device, _device);
995
996    anv_queue_finish(&device->queue);
997
998    anv_device_finish_blorp(device);
999
1000 #ifdef HAVE_VALGRIND
1001    /* We only need to free these to prevent valgrind errors.  The backing
1002     * BO will go away in a couple of lines so we don't actually leak.
1003     */
1004    anv_state_pool_free(&device->dynamic_state_pool, device->border_colors);
1005 #endif
1006
1007    anv_gem_munmap(device->workaround_bo.map, device->workaround_bo.size);
1008    anv_gem_close(device, device->workaround_bo.gem_handle);
1009
1010    anv_bo_pool_finish(&device->batch_bo_pool);
1011    anv_state_pool_finish(&device->dynamic_state_pool);
1012    anv_block_pool_finish(&device->dynamic_state_block_pool);
1013    anv_state_pool_finish(&device->instruction_state_pool);
1014    anv_block_pool_finish(&device->instruction_block_pool);
1015    anv_state_pool_finish(&device->surface_state_pool);
1016    anv_block_pool_finish(&device->surface_state_block_pool);
1017    anv_scratch_pool_finish(device, &device->scratch_pool);
1018
1019    close(device->fd);
1020
1021    pthread_mutex_destroy(&device->mutex);
1022
1023    vk_free(&device->alloc, device);
1024 }
1025
1026 VkResult anv_EnumerateInstanceExtensionProperties(
1027     const char*                                 pLayerName,
1028     uint32_t*                                   pPropertyCount,
1029     VkExtensionProperties*                      pProperties)
1030 {
1031    if (pProperties == NULL) {
1032       *pPropertyCount = ARRAY_SIZE(global_extensions);
1033       return VK_SUCCESS;
1034    }
1035
1036    *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(global_extensions));
1037    typed_memcpy(pProperties, global_extensions, *pPropertyCount);
1038
1039    if (*pPropertyCount < ARRAY_SIZE(global_extensions))
1040       return VK_INCOMPLETE;
1041
1042    return VK_SUCCESS;
1043 }
1044
1045 VkResult anv_EnumerateDeviceExtensionProperties(
1046     VkPhysicalDevice                            physicalDevice,
1047     const char*                                 pLayerName,
1048     uint32_t*                                   pPropertyCount,
1049     VkExtensionProperties*                      pProperties)
1050 {
1051    if (pProperties == NULL) {
1052       *pPropertyCount = ARRAY_SIZE(device_extensions);
1053       return VK_SUCCESS;
1054    }
1055
1056    *pPropertyCount = MIN2(*pPropertyCount, ARRAY_SIZE(device_extensions));
1057    typed_memcpy(pProperties, device_extensions, *pPropertyCount);
1058
1059    if (*pPropertyCount < ARRAY_SIZE(device_extensions))
1060       return VK_INCOMPLETE;
1061
1062    return VK_SUCCESS;
1063 }
1064
1065 VkResult anv_EnumerateInstanceLayerProperties(
1066     uint32_t*                                   pPropertyCount,
1067     VkLayerProperties*                          pProperties)
1068 {
1069    if (pProperties == NULL) {
1070       *pPropertyCount = 0;
1071       return VK_SUCCESS;
1072    }
1073
1074    /* None supported at this time */
1075    return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1076 }
1077
1078 VkResult anv_EnumerateDeviceLayerProperties(
1079     VkPhysicalDevice                            physicalDevice,
1080     uint32_t*                                   pPropertyCount,
1081     VkLayerProperties*                          pProperties)
1082 {
1083    if (pProperties == NULL) {
1084       *pPropertyCount = 0;
1085       return VK_SUCCESS;
1086    }
1087
1088    /* None supported at this time */
1089    return vk_error(VK_ERROR_LAYER_NOT_PRESENT);
1090 }
1091
1092 void anv_GetDeviceQueue(
1093     VkDevice                                    _device,
1094     uint32_t                                    queueNodeIndex,
1095     uint32_t                                    queueIndex,
1096     VkQueue*                                    pQueue)
1097 {
1098    ANV_FROM_HANDLE(anv_device, device, _device);
1099
1100    assert(queueIndex == 0);
1101
1102    *pQueue = anv_queue_to_handle(&device->queue);
1103 }
1104
1105 VkResult
1106 anv_device_execbuf(struct anv_device *device,
1107                    struct drm_i915_gem_execbuffer2 *execbuf,
1108                    struct anv_bo **execbuf_bos)
1109 {
1110    int ret = anv_gem_execbuffer(device, execbuf);
1111    if (ret != 0) {
1112       /* We don't know the real error. */
1113       return vk_errorf(VK_ERROR_DEVICE_LOST, "execbuf2 failed: %m");
1114    }
1115
1116    struct drm_i915_gem_exec_object2 *objects =
1117       (void *)(uintptr_t)execbuf->buffers_ptr;
1118    for (uint32_t k = 0; k < execbuf->buffer_count; k++)
1119       execbuf_bos[k]->offset = objects[k].offset;
1120
1121    return VK_SUCCESS;
1122 }
1123
1124 VkResult anv_QueueSubmit(
1125     VkQueue                                     _queue,
1126     uint32_t                                    submitCount,
1127     const VkSubmitInfo*                         pSubmits,
1128     VkFence                                     _fence)
1129 {
1130    ANV_FROM_HANDLE(anv_queue, queue, _queue);
1131    ANV_FROM_HANDLE(anv_fence, fence, _fence);
1132    struct anv_device *device = queue->device;
1133    VkResult result = VK_SUCCESS;
1134
1135    /* We lock around QueueSubmit for three main reasons:
1136     *
1137     *  1) When a block pool is resized, we create a new gem handle with a
1138     *     different size and, in the case of surface states, possibly a
1139     *     different center offset but we re-use the same anv_bo struct when
1140     *     we do so.  If this happens in the middle of setting up an execbuf,
1141     *     we could end up with our list of BOs out of sync with our list of
1142     *     gem handles.
1143     *
1144     *  2) The algorithm we use for building the list of unique buffers isn't
1145     *     thread-safe.  While the client is supposed to syncronize around
1146     *     QueueSubmit, this would be extremely difficult to debug if it ever
1147     *     came up in the wild due to a broken app.  It's better to play it
1148     *     safe and just lock around QueueSubmit.
1149     *
1150     *  3)  The anv_cmd_buffer_execbuf function may perform relocations in
1151     *      userspace.  Due to the fact that the surface state buffer is shared
1152     *      between batches, we can't afford to have that happen from multiple
1153     *      threads at the same time.  Even though the user is supposed to
1154     *      ensure this doesn't happen, we play it safe as in (2) above.
1155     *
1156     * Since the only other things that ever take the device lock such as block
1157     * pool resize only rarely happen, this will almost never be contended so
1158     * taking a lock isn't really an expensive operation in this case.
1159     */
1160    pthread_mutex_lock(&device->mutex);
1161
1162    for (uint32_t i = 0; i < submitCount; i++) {
1163       for (uint32_t j = 0; j < pSubmits[i].commandBufferCount; j++) {
1164          ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer,
1165                          pSubmits[i].pCommandBuffers[j]);
1166          assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1167
1168          result = anv_cmd_buffer_execbuf(device, cmd_buffer);
1169          if (result != VK_SUCCESS)
1170             goto out;
1171       }
1172    }
1173
1174    if (fence) {
1175       struct anv_bo *fence_bo = &fence->bo;
1176       result = anv_device_execbuf(device, &fence->execbuf, &fence_bo);
1177       if (result != VK_SUCCESS)
1178          goto out;
1179
1180       /* Update the fence and wake up any waiters */
1181       assert(fence->state == ANV_FENCE_STATE_RESET);
1182       fence->state = ANV_FENCE_STATE_SUBMITTED;
1183       pthread_cond_broadcast(&device->queue_submit);
1184    }
1185
1186 out:
1187    pthread_mutex_unlock(&device->mutex);
1188
1189    return result;
1190 }
1191
1192 VkResult anv_QueueWaitIdle(
1193     VkQueue                                     _queue)
1194 {
1195    ANV_FROM_HANDLE(anv_queue, queue, _queue);
1196
1197    return anv_DeviceWaitIdle(anv_device_to_handle(queue->device));
1198 }
1199
1200 VkResult anv_DeviceWaitIdle(
1201     VkDevice                                    _device)
1202 {
1203    ANV_FROM_HANDLE(anv_device, device, _device);
1204    struct anv_batch batch;
1205
1206    uint32_t cmds[8];
1207    batch.start = batch.next = cmds;
1208    batch.end = (void *) cmds + sizeof(cmds);
1209
1210    anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1211    anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
1212
1213    return anv_device_submit_simple_batch(device, &batch);
1214 }
1215
1216 VkResult
1217 anv_bo_init_new(struct anv_bo *bo, struct anv_device *device, uint64_t size)
1218 {
1219    uint32_t gem_handle = anv_gem_create(device, size);
1220    if (!gem_handle)
1221       return vk_error(VK_ERROR_OUT_OF_DEVICE_MEMORY);
1222
1223    anv_bo_init(bo, gem_handle, size);
1224
1225    return VK_SUCCESS;
1226 }
1227
1228 VkResult anv_AllocateMemory(
1229     VkDevice                                    _device,
1230     const VkMemoryAllocateInfo*                 pAllocateInfo,
1231     const VkAllocationCallbacks*                pAllocator,
1232     VkDeviceMemory*                             pMem)
1233 {
1234    ANV_FROM_HANDLE(anv_device, device, _device);
1235    struct anv_device_memory *mem;
1236    VkResult result;
1237
1238    assert(pAllocateInfo->sType == VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO);
1239
1240    if (pAllocateInfo->allocationSize == 0) {
1241       /* Apparently, this is allowed */
1242       *pMem = VK_NULL_HANDLE;
1243       return VK_SUCCESS;
1244    }
1245
1246    /* We support exactly one memory heap. */
1247    assert(pAllocateInfo->memoryTypeIndex == 0 ||
1248           (!device->info.has_llc && pAllocateInfo->memoryTypeIndex < 2));
1249
1250    /* FINISHME: Fail if allocation request exceeds heap size. */
1251
1252    mem = vk_alloc2(&device->alloc, pAllocator, sizeof(*mem), 8,
1253                     VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1254    if (mem == NULL)
1255       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1256
1257    /* The kernel is going to give us whole pages anyway */
1258    uint64_t alloc_size = align_u64(pAllocateInfo->allocationSize, 4096);
1259
1260    result = anv_bo_init_new(&mem->bo, device, alloc_size);
1261    if (result != VK_SUCCESS)
1262       goto fail;
1263
1264    mem->type_index = pAllocateInfo->memoryTypeIndex;
1265
1266    mem->map = NULL;
1267    mem->map_size = 0;
1268
1269    *pMem = anv_device_memory_to_handle(mem);
1270
1271    return VK_SUCCESS;
1272
1273  fail:
1274    vk_free2(&device->alloc, pAllocator, mem);
1275
1276    return result;
1277 }
1278
1279 void anv_FreeMemory(
1280     VkDevice                                    _device,
1281     VkDeviceMemory                              _mem,
1282     const VkAllocationCallbacks*                pAllocator)
1283 {
1284    ANV_FROM_HANDLE(anv_device, device, _device);
1285    ANV_FROM_HANDLE(anv_device_memory, mem, _mem);
1286
1287    if (mem == NULL)
1288       return;
1289
1290    if (mem->map)
1291       anv_UnmapMemory(_device, _mem);
1292
1293    if (mem->bo.map)
1294       anv_gem_munmap(mem->bo.map, mem->bo.size);
1295
1296    if (mem->bo.gem_handle != 0)
1297       anv_gem_close(device, mem->bo.gem_handle);
1298
1299    vk_free2(&device->alloc, pAllocator, mem);
1300 }
1301
1302 VkResult anv_MapMemory(
1303     VkDevice                                    _device,
1304     VkDeviceMemory                              _memory,
1305     VkDeviceSize                                offset,
1306     VkDeviceSize                                size,
1307     VkMemoryMapFlags                            flags,
1308     void**                                      ppData)
1309 {
1310    ANV_FROM_HANDLE(anv_device, device, _device);
1311    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1312
1313    if (mem == NULL) {
1314       *ppData = NULL;
1315       return VK_SUCCESS;
1316    }
1317
1318    if (size == VK_WHOLE_SIZE)
1319       size = mem->bo.size - offset;
1320
1321    /* From the Vulkan spec version 1.0.32 docs for MapMemory:
1322     *
1323     *  * If size is not equal to VK_WHOLE_SIZE, size must be greater than 0
1324     *    assert(size != 0);
1325     *  * If size is not equal to VK_WHOLE_SIZE, size must be less than or
1326     *    equal to the size of the memory minus offset
1327     */
1328    assert(size > 0);
1329    assert(offset + size <= mem->bo.size);
1330
1331    /* FIXME: Is this supposed to be thread safe? Since vkUnmapMemory() only
1332     * takes a VkDeviceMemory pointer, it seems like only one map of the memory
1333     * at a time is valid. We could just mmap up front and return an offset
1334     * pointer here, but that may exhaust virtual memory on 32 bit
1335     * userspace. */
1336
1337    uint32_t gem_flags = 0;
1338    if (!device->info.has_llc && mem->type_index == 0)
1339       gem_flags |= I915_MMAP_WC;
1340
1341    /* GEM will fail to map if the offset isn't 4k-aligned.  Round down. */
1342    uint64_t map_offset = offset & ~4095ull;
1343    assert(offset >= map_offset);
1344    uint64_t map_size = (offset + size) - map_offset;
1345
1346    /* Let's map whole pages */
1347    map_size = align_u64(map_size, 4096);
1348
1349    void *map = anv_gem_mmap(device, mem->bo.gem_handle,
1350                             map_offset, map_size, gem_flags);
1351    if (map == MAP_FAILED)
1352       return vk_error(VK_ERROR_MEMORY_MAP_FAILED);
1353
1354    mem->map = map;
1355    mem->map_size = map_size;
1356
1357    *ppData = mem->map + (offset - map_offset);
1358
1359    return VK_SUCCESS;
1360 }
1361
1362 void anv_UnmapMemory(
1363     VkDevice                                    _device,
1364     VkDeviceMemory                              _memory)
1365 {
1366    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1367
1368    if (mem == NULL)
1369       return;
1370
1371    anv_gem_munmap(mem->map, mem->map_size);
1372
1373    mem->map = NULL;
1374    mem->map_size = 0;
1375 }
1376
1377 static void
1378 clflush_mapped_ranges(struct anv_device         *device,
1379                       uint32_t                   count,
1380                       const VkMappedMemoryRange *ranges)
1381 {
1382    for (uint32_t i = 0; i < count; i++) {
1383       ANV_FROM_HANDLE(anv_device_memory, mem, ranges[i].memory);
1384       void *p = mem->map + (ranges[i].offset & ~CACHELINE_MASK);
1385       void *end;
1386
1387       if (ranges[i].offset + ranges[i].size > mem->map_size)
1388          end = mem->map + mem->map_size;
1389       else
1390          end = mem->map + ranges[i].offset + ranges[i].size;
1391
1392       while (p < end) {
1393          __builtin_ia32_clflush(p);
1394          p += CACHELINE_SIZE;
1395       }
1396    }
1397 }
1398
1399 VkResult anv_FlushMappedMemoryRanges(
1400     VkDevice                                    _device,
1401     uint32_t                                    memoryRangeCount,
1402     const VkMappedMemoryRange*                  pMemoryRanges)
1403 {
1404    ANV_FROM_HANDLE(anv_device, device, _device);
1405
1406    if (device->info.has_llc)
1407       return VK_SUCCESS;
1408
1409    /* Make sure the writes we're flushing have landed. */
1410    __builtin_ia32_mfence();
1411
1412    clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
1413
1414    return VK_SUCCESS;
1415 }
1416
1417 VkResult anv_InvalidateMappedMemoryRanges(
1418     VkDevice                                    _device,
1419     uint32_t                                    memoryRangeCount,
1420     const VkMappedMemoryRange*                  pMemoryRanges)
1421 {
1422    ANV_FROM_HANDLE(anv_device, device, _device);
1423
1424    if (device->info.has_llc)
1425       return VK_SUCCESS;
1426
1427    clflush_mapped_ranges(device, memoryRangeCount, pMemoryRanges);
1428
1429    /* Make sure no reads get moved up above the invalidate. */
1430    __builtin_ia32_mfence();
1431
1432    return VK_SUCCESS;
1433 }
1434
1435 void anv_GetBufferMemoryRequirements(
1436     VkDevice                                    device,
1437     VkBuffer                                    _buffer,
1438     VkMemoryRequirements*                       pMemoryRequirements)
1439 {
1440    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1441
1442    /* The Vulkan spec (git aaed022) says:
1443     *
1444     *    memoryTypeBits is a bitfield and contains one bit set for every
1445     *    supported memory type for the resource. The bit `1<<i` is set if and
1446     *    only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1447     *    structure for the physical device is supported.
1448     *
1449     * We support exactly one memory type.
1450     */
1451    pMemoryRequirements->memoryTypeBits = 1;
1452
1453    pMemoryRequirements->size = buffer->size;
1454    pMemoryRequirements->alignment = 16;
1455 }
1456
1457 void anv_GetImageMemoryRequirements(
1458     VkDevice                                    device,
1459     VkImage                                     _image,
1460     VkMemoryRequirements*                       pMemoryRequirements)
1461 {
1462    ANV_FROM_HANDLE(anv_image, image, _image);
1463
1464    /* The Vulkan spec (git aaed022) says:
1465     *
1466     *    memoryTypeBits is a bitfield and contains one bit set for every
1467     *    supported memory type for the resource. The bit `1<<i` is set if and
1468     *    only if the memory type `i` in the VkPhysicalDeviceMemoryProperties
1469     *    structure for the physical device is supported.
1470     *
1471     * We support exactly one memory type.
1472     */
1473    pMemoryRequirements->memoryTypeBits = 1;
1474
1475    pMemoryRequirements->size = image->size;
1476    pMemoryRequirements->alignment = image->alignment;
1477 }
1478
1479 void anv_GetImageSparseMemoryRequirements(
1480     VkDevice                                    device,
1481     VkImage                                     image,
1482     uint32_t*                                   pSparseMemoryRequirementCount,
1483     VkSparseImageMemoryRequirements*            pSparseMemoryRequirements)
1484 {
1485    stub();
1486 }
1487
1488 void anv_GetDeviceMemoryCommitment(
1489     VkDevice                                    device,
1490     VkDeviceMemory                              memory,
1491     VkDeviceSize*                               pCommittedMemoryInBytes)
1492 {
1493    *pCommittedMemoryInBytes = 0;
1494 }
1495
1496 VkResult anv_BindBufferMemory(
1497     VkDevice                                    device,
1498     VkBuffer                                    _buffer,
1499     VkDeviceMemory                              _memory,
1500     VkDeviceSize                                memoryOffset)
1501 {
1502    ANV_FROM_HANDLE(anv_device_memory, mem, _memory);
1503    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1504
1505    if (mem) {
1506       buffer->bo = &mem->bo;
1507       buffer->offset = memoryOffset;
1508    } else {
1509       buffer->bo = NULL;
1510       buffer->offset = 0;
1511    }
1512
1513    return VK_SUCCESS;
1514 }
1515
1516 VkResult anv_QueueBindSparse(
1517     VkQueue                                     queue,
1518     uint32_t                                    bindInfoCount,
1519     const VkBindSparseInfo*                     pBindInfo,
1520     VkFence                                     fence)
1521 {
1522    stub_return(VK_ERROR_INCOMPATIBLE_DRIVER);
1523 }
1524
1525 VkResult anv_CreateFence(
1526     VkDevice                                    _device,
1527     const VkFenceCreateInfo*                    pCreateInfo,
1528     const VkAllocationCallbacks*                pAllocator,
1529     VkFence*                                    pFence)
1530 {
1531    ANV_FROM_HANDLE(anv_device, device, _device);
1532    struct anv_bo fence_bo;
1533    struct anv_fence *fence;
1534    struct anv_batch batch;
1535    VkResult result;
1536
1537    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FENCE_CREATE_INFO);
1538
1539    result = anv_bo_pool_alloc(&device->batch_bo_pool, &fence_bo, 4096);
1540    if (result != VK_SUCCESS)
1541       return result;
1542
1543    /* Fences are small.  Just store the CPU data structure in the BO. */
1544    fence = fence_bo.map;
1545    fence->bo = fence_bo;
1546
1547    /* Place the batch after the CPU data but on its own cache line. */
1548    const uint32_t batch_offset = align_u32(sizeof(*fence), CACHELINE_SIZE);
1549    batch.next = batch.start = fence->bo.map + batch_offset;
1550    batch.end = fence->bo.map + fence->bo.size;
1551    anv_batch_emit(&batch, GEN7_MI_BATCH_BUFFER_END, bbe);
1552    anv_batch_emit(&batch, GEN7_MI_NOOP, noop);
1553
1554    if (!device->info.has_llc) {
1555       assert(((uintptr_t) batch.start & CACHELINE_MASK) == 0);
1556       assert(batch.next - batch.start <= CACHELINE_SIZE);
1557       __builtin_ia32_mfence();
1558       __builtin_ia32_clflush(batch.start);
1559    }
1560
1561    fence->exec2_objects[0].handle = fence->bo.gem_handle;
1562    fence->exec2_objects[0].relocation_count = 0;
1563    fence->exec2_objects[0].relocs_ptr = 0;
1564    fence->exec2_objects[0].alignment = 0;
1565    fence->exec2_objects[0].offset = fence->bo.offset;
1566    fence->exec2_objects[0].flags = 0;
1567    fence->exec2_objects[0].rsvd1 = 0;
1568    fence->exec2_objects[0].rsvd2 = 0;
1569
1570    fence->execbuf.buffers_ptr = (uintptr_t) fence->exec2_objects;
1571    fence->execbuf.buffer_count = 1;
1572    fence->execbuf.batch_start_offset = batch.start - fence->bo.map;
1573    fence->execbuf.batch_len = batch.next - batch.start;
1574    fence->execbuf.cliprects_ptr = 0;
1575    fence->execbuf.num_cliprects = 0;
1576    fence->execbuf.DR1 = 0;
1577    fence->execbuf.DR4 = 0;
1578
1579    fence->execbuf.flags =
1580       I915_EXEC_HANDLE_LUT | I915_EXEC_NO_RELOC | I915_EXEC_RENDER;
1581    fence->execbuf.rsvd1 = device->context_id;
1582    fence->execbuf.rsvd2 = 0;
1583
1584    if (pCreateInfo->flags & VK_FENCE_CREATE_SIGNALED_BIT) {
1585       fence->state = ANV_FENCE_STATE_SIGNALED;
1586    } else {
1587       fence->state = ANV_FENCE_STATE_RESET;
1588    }
1589
1590    *pFence = anv_fence_to_handle(fence);
1591
1592    return VK_SUCCESS;
1593 }
1594
1595 void anv_DestroyFence(
1596     VkDevice                                    _device,
1597     VkFence                                     _fence,
1598     const VkAllocationCallbacks*                pAllocator)
1599 {
1600    ANV_FROM_HANDLE(anv_device, device, _device);
1601    ANV_FROM_HANDLE(anv_fence, fence, _fence);
1602
1603    if (!fence)
1604       return;
1605
1606    assert(fence->bo.map == fence);
1607    anv_bo_pool_free(&device->batch_bo_pool, &fence->bo);
1608 }
1609
1610 VkResult anv_ResetFences(
1611     VkDevice                                    _device,
1612     uint32_t                                    fenceCount,
1613     const VkFence*                              pFences)
1614 {
1615    for (uint32_t i = 0; i < fenceCount; i++) {
1616       ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1617       fence->state = ANV_FENCE_STATE_RESET;
1618    }
1619
1620    return VK_SUCCESS;
1621 }
1622
1623 VkResult anv_GetFenceStatus(
1624     VkDevice                                    _device,
1625     VkFence                                     _fence)
1626 {
1627    ANV_FROM_HANDLE(anv_device, device, _device);
1628    ANV_FROM_HANDLE(anv_fence, fence, _fence);
1629    int64_t t = 0;
1630    int ret;
1631
1632    switch (fence->state) {
1633    case ANV_FENCE_STATE_RESET:
1634       /* If it hasn't even been sent off to the GPU yet, it's not ready */
1635       return VK_NOT_READY;
1636
1637    case ANV_FENCE_STATE_SIGNALED:
1638       /* It's been signaled, return success */
1639       return VK_SUCCESS;
1640
1641    case ANV_FENCE_STATE_SUBMITTED:
1642       /* It's been submitted to the GPU but we don't know if it's done yet. */
1643       ret = anv_gem_wait(device, fence->bo.gem_handle, &t);
1644       if (ret == 0) {
1645          fence->state = ANV_FENCE_STATE_SIGNALED;
1646          return VK_SUCCESS;
1647       } else {
1648          return VK_NOT_READY;
1649       }
1650    default:
1651       unreachable("Invalid fence status");
1652    }
1653 }
1654
1655 #define NSEC_PER_SEC 1000000000
1656 #define INT_TYPE_MAX(type) ((1ull << (sizeof(type) * 8 - 1)) - 1)
1657
1658 VkResult anv_WaitForFences(
1659     VkDevice                                    _device,
1660     uint32_t                                    fenceCount,
1661     const VkFence*                              pFences,
1662     VkBool32                                    waitAll,
1663     uint64_t                                    _timeout)
1664 {
1665    ANV_FROM_HANDLE(anv_device, device, _device);
1666    int ret;
1667
1668    /* DRM_IOCTL_I915_GEM_WAIT uses a signed 64 bit timeout and is supposed
1669     * to block indefinitely timeouts <= 0.  Unfortunately, this was broken
1670     * for a couple of kernel releases.  Since there's no way to know
1671     * whether or not the kernel we're using is one of the broken ones, the
1672     * best we can do is to clamp the timeout to INT64_MAX.  This limits the
1673     * maximum timeout from 584 years to 292 years - likely not a big deal.
1674     */
1675    int64_t timeout = MIN2(_timeout, INT64_MAX);
1676
1677    uint32_t pending_fences = fenceCount;
1678    while (pending_fences) {
1679       pending_fences = 0;
1680       bool signaled_fences = false;
1681       for (uint32_t i = 0; i < fenceCount; i++) {
1682          ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1683          switch (fence->state) {
1684          case ANV_FENCE_STATE_RESET:
1685             /* This fence hasn't been submitted yet, we'll catch it the next
1686              * time around.  Yes, this may mean we dead-loop but, short of
1687              * lots of locking and a condition variable, there's not much that
1688              * we can do about that.
1689              */
1690             pending_fences++;
1691             continue;
1692
1693          case ANV_FENCE_STATE_SIGNALED:
1694             /* This fence is not pending.  If waitAll isn't set, we can return
1695              * early.  Otherwise, we have to keep going.
1696              */
1697             if (!waitAll)
1698                return VK_SUCCESS;
1699             continue;
1700
1701          case ANV_FENCE_STATE_SUBMITTED:
1702             /* These are the fences we really care about.  Go ahead and wait
1703              * on it until we hit a timeout.
1704              */
1705             ret = anv_gem_wait(device, fence->bo.gem_handle, &timeout);
1706             if (ret == -1 && errno == ETIME) {
1707                return VK_TIMEOUT;
1708             } else if (ret == -1) {
1709                /* We don't know the real error. */
1710                return vk_errorf(VK_ERROR_DEVICE_LOST, "gem wait failed: %m");
1711             } else {
1712                fence->state = ANV_FENCE_STATE_SIGNALED;
1713                signaled_fences = true;
1714                if (!waitAll)
1715                   return VK_SUCCESS;
1716                continue;
1717             }
1718          }
1719       }
1720
1721       if (pending_fences && !signaled_fences) {
1722          /* If we've hit this then someone decided to vkWaitForFences before
1723           * they've actually submitted any of them to a queue.  This is a
1724           * fairly pessimal case, so it's ok to lock here and use a standard
1725           * pthreads condition variable.
1726           */
1727          pthread_mutex_lock(&device->mutex);
1728
1729          /* It's possible that some of the fences have changed state since the
1730           * last time we checked.  Now that we have the lock, check for
1731           * pending fences again and don't wait if it's changed.
1732           */
1733          uint32_t now_pending_fences = 0;
1734          for (uint32_t i = 0; i < fenceCount; i++) {
1735             ANV_FROM_HANDLE(anv_fence, fence, pFences[i]);
1736             if (fence->state == ANV_FENCE_STATE_RESET)
1737                now_pending_fences++;
1738          }
1739          assert(now_pending_fences <= pending_fences);
1740
1741          if (now_pending_fences == pending_fences) {
1742             struct timespec before;
1743             clock_gettime(CLOCK_MONOTONIC, &before);
1744
1745             uint32_t abs_nsec = before.tv_nsec + timeout % NSEC_PER_SEC;
1746             uint64_t abs_sec = before.tv_sec + (abs_nsec / NSEC_PER_SEC) +
1747                                (timeout / NSEC_PER_SEC);
1748             abs_nsec %= NSEC_PER_SEC;
1749
1750             /* Avoid roll-over in tv_sec on 32-bit systems if the user
1751              * provided timeout is UINT64_MAX
1752              */
1753             struct timespec abstime;
1754             abstime.tv_nsec = abs_nsec;
1755             abstime.tv_sec = MIN2(abs_sec, INT_TYPE_MAX(abstime.tv_sec));
1756
1757             ret = pthread_cond_timedwait(&device->queue_submit,
1758                                          &device->mutex, &abstime);
1759             assert(ret != EINVAL);
1760
1761             struct timespec after;
1762             clock_gettime(CLOCK_MONOTONIC, &after);
1763             uint64_t time_elapsed =
1764                ((uint64_t)after.tv_sec * NSEC_PER_SEC + after.tv_nsec) -
1765                ((uint64_t)before.tv_sec * NSEC_PER_SEC + before.tv_nsec);
1766
1767             if (time_elapsed >= timeout) {
1768                pthread_mutex_unlock(&device->mutex);
1769                return VK_TIMEOUT;
1770             }
1771
1772             timeout -= time_elapsed;
1773          }
1774
1775          pthread_mutex_unlock(&device->mutex);
1776       }
1777    }
1778
1779    return VK_SUCCESS;
1780 }
1781
1782 // Queue semaphore functions
1783
1784 VkResult anv_CreateSemaphore(
1785     VkDevice                                    device,
1786     const VkSemaphoreCreateInfo*                pCreateInfo,
1787     const VkAllocationCallbacks*                pAllocator,
1788     VkSemaphore*                                pSemaphore)
1789 {
1790    /* The DRM execbuffer ioctl always execute in-oder, even between different
1791     * rings. As such, there's nothing to do for the user space semaphore.
1792     */
1793
1794    *pSemaphore = (VkSemaphore)1;
1795
1796    return VK_SUCCESS;
1797 }
1798
1799 void anv_DestroySemaphore(
1800     VkDevice                                    device,
1801     VkSemaphore                                 semaphore,
1802     const VkAllocationCallbacks*                pAllocator)
1803 {
1804 }
1805
1806 // Event functions
1807
1808 VkResult anv_CreateEvent(
1809     VkDevice                                    _device,
1810     const VkEventCreateInfo*                    pCreateInfo,
1811     const VkAllocationCallbacks*                pAllocator,
1812     VkEvent*                                    pEvent)
1813 {
1814    ANV_FROM_HANDLE(anv_device, device, _device);
1815    struct anv_state state;
1816    struct anv_event *event;
1817
1818    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_EVENT_CREATE_INFO);
1819
1820    state = anv_state_pool_alloc(&device->dynamic_state_pool,
1821                                 sizeof(*event), 8);
1822    event = state.map;
1823    event->state = state;
1824    event->semaphore = VK_EVENT_RESET;
1825
1826    if (!device->info.has_llc) {
1827       /* Make sure the writes we're flushing have landed. */
1828       __builtin_ia32_mfence();
1829       __builtin_ia32_clflush(event);
1830    }
1831
1832    *pEvent = anv_event_to_handle(event);
1833
1834    return VK_SUCCESS;
1835 }
1836
1837 void anv_DestroyEvent(
1838     VkDevice                                    _device,
1839     VkEvent                                     _event,
1840     const VkAllocationCallbacks*                pAllocator)
1841 {
1842    ANV_FROM_HANDLE(anv_device, device, _device);
1843    ANV_FROM_HANDLE(anv_event, event, _event);
1844
1845    if (!event)
1846       return;
1847
1848    anv_state_pool_free(&device->dynamic_state_pool, event->state);
1849 }
1850
1851 VkResult anv_GetEventStatus(
1852     VkDevice                                    _device,
1853     VkEvent                                     _event)
1854 {
1855    ANV_FROM_HANDLE(anv_device, device, _device);
1856    ANV_FROM_HANDLE(anv_event, event, _event);
1857
1858    if (!device->info.has_llc) {
1859       /* Invalidate read cache before reading event written by GPU. */
1860       __builtin_ia32_clflush(event);
1861       __builtin_ia32_mfence();
1862
1863    }
1864
1865    return event->semaphore;
1866 }
1867
1868 VkResult anv_SetEvent(
1869     VkDevice                                    _device,
1870     VkEvent                                     _event)
1871 {
1872    ANV_FROM_HANDLE(anv_device, device, _device);
1873    ANV_FROM_HANDLE(anv_event, event, _event);
1874
1875    event->semaphore = VK_EVENT_SET;
1876
1877    if (!device->info.has_llc) {
1878       /* Make sure the writes we're flushing have landed. */
1879       __builtin_ia32_mfence();
1880       __builtin_ia32_clflush(event);
1881    }
1882
1883    return VK_SUCCESS;
1884 }
1885
1886 VkResult anv_ResetEvent(
1887     VkDevice                                    _device,
1888     VkEvent                                     _event)
1889 {
1890    ANV_FROM_HANDLE(anv_device, device, _device);
1891    ANV_FROM_HANDLE(anv_event, event, _event);
1892
1893    event->semaphore = VK_EVENT_RESET;
1894
1895    if (!device->info.has_llc) {
1896       /* Make sure the writes we're flushing have landed. */
1897       __builtin_ia32_mfence();
1898       __builtin_ia32_clflush(event);
1899    }
1900
1901    return VK_SUCCESS;
1902 }
1903
1904 // Buffer functions
1905
1906 VkResult anv_CreateBuffer(
1907     VkDevice                                    _device,
1908     const VkBufferCreateInfo*                   pCreateInfo,
1909     const VkAllocationCallbacks*                pAllocator,
1910     VkBuffer*                                   pBuffer)
1911 {
1912    ANV_FROM_HANDLE(anv_device, device, _device);
1913    struct anv_buffer *buffer;
1914
1915    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO);
1916
1917    buffer = vk_alloc2(&device->alloc, pAllocator, sizeof(*buffer), 8,
1918                        VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1919    if (buffer == NULL)
1920       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1921
1922    buffer->size = pCreateInfo->size;
1923    buffer->usage = pCreateInfo->usage;
1924    buffer->bo = NULL;
1925    buffer->offset = 0;
1926
1927    *pBuffer = anv_buffer_to_handle(buffer);
1928
1929    return VK_SUCCESS;
1930 }
1931
1932 void anv_DestroyBuffer(
1933     VkDevice                                    _device,
1934     VkBuffer                                    _buffer,
1935     const VkAllocationCallbacks*                pAllocator)
1936 {
1937    ANV_FROM_HANDLE(anv_device, device, _device);
1938    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
1939
1940    if (!buffer)
1941       return;
1942
1943    vk_free2(&device->alloc, pAllocator, buffer);
1944 }
1945
1946 void
1947 anv_fill_buffer_surface_state(struct anv_device *device, struct anv_state state,
1948                               enum isl_format format,
1949                               uint32_t offset, uint32_t range, uint32_t stride)
1950 {
1951    isl_buffer_fill_state(&device->isl_dev, state.map,
1952                          .address = offset,
1953                          .mocs = device->default_mocs,
1954                          .size = range,
1955                          .format = format,
1956                          .stride = stride);
1957
1958    if (!device->info.has_llc)
1959       anv_state_clflush(state);
1960 }
1961
1962 void anv_DestroySampler(
1963     VkDevice                                    _device,
1964     VkSampler                                   _sampler,
1965     const VkAllocationCallbacks*                pAllocator)
1966 {
1967    ANV_FROM_HANDLE(anv_device, device, _device);
1968    ANV_FROM_HANDLE(anv_sampler, sampler, _sampler);
1969
1970    if (!sampler)
1971       return;
1972
1973    vk_free2(&device->alloc, pAllocator, sampler);
1974 }
1975
1976 VkResult anv_CreateFramebuffer(
1977     VkDevice                                    _device,
1978     const VkFramebufferCreateInfo*              pCreateInfo,
1979     const VkAllocationCallbacks*                pAllocator,
1980     VkFramebuffer*                              pFramebuffer)
1981 {
1982    ANV_FROM_HANDLE(anv_device, device, _device);
1983    struct anv_framebuffer *framebuffer;
1984
1985    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO);
1986
1987    size_t size = sizeof(*framebuffer) +
1988                  sizeof(struct anv_image_view *) * pCreateInfo->attachmentCount;
1989    framebuffer = vk_alloc2(&device->alloc, pAllocator, size, 8,
1990                             VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1991    if (framebuffer == NULL)
1992       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1993
1994    framebuffer->attachment_count = pCreateInfo->attachmentCount;
1995    for (uint32_t i = 0; i < pCreateInfo->attachmentCount; i++) {
1996       VkImageView _iview = pCreateInfo->pAttachments[i];
1997       framebuffer->attachments[i] = anv_image_view_from_handle(_iview);
1998    }
1999
2000    framebuffer->width = pCreateInfo->width;
2001    framebuffer->height = pCreateInfo->height;
2002    framebuffer->layers = pCreateInfo->layers;
2003
2004    *pFramebuffer = anv_framebuffer_to_handle(framebuffer);
2005
2006    return VK_SUCCESS;
2007 }
2008
2009 void anv_DestroyFramebuffer(
2010     VkDevice                                    _device,
2011     VkFramebuffer                               _fb,
2012     const VkAllocationCallbacks*                pAllocator)
2013 {
2014    ANV_FROM_HANDLE(anv_device, device, _device);
2015    ANV_FROM_HANDLE(anv_framebuffer, fb, _fb);
2016
2017    if (!fb)
2018       return;
2019
2020    vk_free2(&device->alloc, pAllocator, fb);
2021 }