2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
31 #include "anv_private.h"
34 #include "spirv/nir_spirv.h"
36 /* Needed for SWIZZLE macros */
37 #include "program/prog_instruction.h"
41 VkResult anv_CreateShaderModule(
43 const VkShaderModuleCreateInfo* pCreateInfo,
44 const VkAllocationCallbacks* pAllocator,
45 VkShaderModule* pShaderModule)
47 ANV_FROM_HANDLE(anv_device, device, _device);
48 struct anv_shader_module *module;
50 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
51 assert(pCreateInfo->flags == 0);
53 module = anv_alloc2(&device->alloc, pAllocator,
54 sizeof(*module) + pCreateInfo->codeSize, 8,
55 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
57 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
60 module->size = pCreateInfo->codeSize;
61 memcpy(module->data, pCreateInfo->pCode, module->size);
63 _mesa_sha1_compute(module->data, module->size, module->sha1);
65 *pShaderModule = anv_shader_module_to_handle(module);
70 void anv_DestroyShaderModule(
72 VkShaderModule _module,
73 const VkAllocationCallbacks* pAllocator)
75 ANV_FROM_HANDLE(anv_device, device, _device);
76 ANV_FROM_HANDLE(anv_shader_module, module, _module);
78 anv_free2(&device->alloc, pAllocator, module);
81 #define SPIR_V_MAGIC_NUMBER 0x07230203
83 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
84 * we can't do that yet because we don't have the ability to copy nir.
87 anv_shader_compile_to_nir(struct anv_device *device,
88 struct anv_shader_module *module,
89 const char *entrypoint_name,
90 gl_shader_stage stage,
91 const VkSpecializationInfo *spec_info)
93 if (strcmp(entrypoint_name, "main") != 0) {
94 anv_finishme("Multiple shaders per module not really supported");
97 const struct brw_compiler *compiler =
98 device->instance->physicalDevice.compiler;
99 const nir_shader_compiler_options *nir_options =
100 compiler->glsl_compiler_options[stage].NirOptions;
103 nir_function *entry_point;
105 /* Some things such as our meta clear/blit code will give us a NIR
106 * shader directly. In that case, we just ignore the SPIR-V entirely
107 * and just use the NIR shader */
109 nir->options = nir_options;
110 nir_validate_shader(nir);
112 assert(exec_list_length(&nir->functions) == 1);
113 struct exec_node *node = exec_list_get_head(&nir->functions);
114 entry_point = exec_node_data(nir_function, node, node);
116 uint32_t *spirv = (uint32_t *) module->data;
117 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
118 assert(module->size % 4 == 0);
120 uint32_t num_spec_entries = 0;
121 struct nir_spirv_specialization *spec_entries = NULL;
122 if (spec_info && spec_info->mapEntryCount > 0) {
123 num_spec_entries = spec_info->mapEntryCount;
124 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
125 for (uint32_t i = 0; i < num_spec_entries; i++) {
126 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
127 const void *data = spec_info->pData + entry.offset;
128 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
130 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
131 spec_entries[i].data = *(const uint32_t *)data;
135 entry_point = spirv_to_nir(spirv, module->size / 4,
136 spec_entries, num_spec_entries,
137 stage, entrypoint_name, nir_options);
138 nir = entry_point->shader;
139 assert(nir->stage == stage);
140 nir_validate_shader(nir);
144 if (stage == MESA_SHADER_FRAGMENT) {
145 nir_lower_wpos_center(nir);
146 nir_validate_shader(nir);
149 nir_lower_returns(nir);
150 nir_validate_shader(nir);
152 nir_inline_functions(nir);
153 nir_validate_shader(nir);
155 /* Pick off the single entrypoint that we want */
156 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
157 if (func != entry_point)
158 exec_node_remove(&func->node);
160 assert(exec_list_length(&nir->functions) == 1);
161 entry_point->name = ralloc_strdup(entry_point, "main");
163 nir_remove_dead_variables(nir, nir_var_shader_in);
164 nir_remove_dead_variables(nir, nir_var_shader_out);
165 nir_remove_dead_variables(nir, nir_var_system_value);
166 nir_validate_shader(nir);
168 nir_propagate_invariant(nir);
169 nir_validate_shader(nir);
171 nir_lower_io_to_temporaries(entry_point->shader, entry_point, true, false);
173 nir_lower_system_values(nir);
174 nir_validate_shader(nir);
177 /* Vulkan uses the separate-shader linking model */
178 nir->info.separate_shader = true;
180 nir = brw_preprocess_nir(compiler, nir);
182 nir_shader_gather_info(nir, entry_point->impl);
184 nir_variable_mode indirect_mask = 0;
185 if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
186 indirect_mask |= nir_var_shader_in;
187 if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
188 indirect_mask |= nir_var_local;
190 nir_lower_indirect_derefs(nir, indirect_mask);
195 void anv_DestroyPipeline(
197 VkPipeline _pipeline,
198 const VkAllocationCallbacks* pAllocator)
200 ANV_FROM_HANDLE(anv_device, device, _device);
201 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
203 anv_reloc_list_finish(&pipeline->batch_relocs,
204 pAllocator ? pAllocator : &device->alloc);
205 if (pipeline->blend_state.map)
206 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
208 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
209 if (pipeline->shaders[s])
210 anv_shader_bin_unref(device, pipeline->shaders[s]);
213 anv_free2(&device->alloc, pAllocator, pipeline);
216 static const uint32_t vk_to_gen_primitive_type[] = {
217 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
218 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
219 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
220 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
221 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
222 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
223 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
224 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
225 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
226 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
227 /* [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST] = _3DPRIM_PATCHLIST_1 */
231 populate_sampler_prog_key(const struct brw_device_info *devinfo,
232 struct brw_sampler_prog_key_data *key)
234 /* XXX: Handle texture swizzle on HSW- */
235 for (int i = 0; i < MAX_SAMPLERS; i++) {
236 /* Assume color sampler, no swizzling. (Works for BDW+) */
237 key->swizzles[i] = SWIZZLE_XYZW;
242 populate_vs_prog_key(const struct brw_device_info *devinfo,
243 struct brw_vs_prog_key *key)
245 memset(key, 0, sizeof(*key));
247 populate_sampler_prog_key(devinfo, &key->tex);
249 /* XXX: Handle vertex input work-arounds */
251 /* XXX: Handle sampler_prog_key */
255 populate_gs_prog_key(const struct brw_device_info *devinfo,
256 struct brw_gs_prog_key *key)
258 memset(key, 0, sizeof(*key));
260 populate_sampler_prog_key(devinfo, &key->tex);
264 populate_wm_prog_key(const struct brw_device_info *devinfo,
265 const VkGraphicsPipelineCreateInfo *info,
266 const struct anv_graphics_pipeline_create_info *extra,
267 struct brw_wm_prog_key *key)
269 ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
271 memset(key, 0, sizeof(*key));
273 populate_sampler_prog_key(devinfo, &key->tex);
275 /* TODO: Fill out key->input_slots_valid */
277 /* Vulkan doesn't specify a default */
278 key->high_quality_derivatives = false;
280 /* XXX Vulkan doesn't appear to specify */
281 key->clamp_fragment_color = false;
283 if (extra && extra->color_attachment_count >= 0) {
284 key->nr_color_regions = extra->color_attachment_count;
286 key->nr_color_regions =
287 render_pass->subpasses[info->subpass].color_count;
290 key->replicate_alpha = key->nr_color_regions > 1 &&
291 info->pMultisampleState &&
292 info->pMultisampleState->alphaToCoverageEnable;
294 if (info->pMultisampleState && info->pMultisampleState->rasterizationSamples > 1) {
295 /* We should probably pull this out of the shader, but it's fairly
296 * harmless to compute it and then let dead-code take care of it.
298 key->persample_interp =
299 (info->pMultisampleState->minSampleShading *
300 info->pMultisampleState->rasterizationSamples) > 1;
301 key->multisample_fbo = true;
306 populate_cs_prog_key(const struct brw_device_info *devinfo,
307 struct brw_cs_prog_key *key)
309 memset(key, 0, sizeof(*key));
311 populate_sampler_prog_key(devinfo, &key->tex);
315 anv_pipeline_compile(struct anv_pipeline *pipeline,
316 struct anv_shader_module *module,
317 const char *entrypoint,
318 gl_shader_stage stage,
319 const VkSpecializationInfo *spec_info,
320 struct brw_stage_prog_data *prog_data,
321 struct anv_pipeline_bind_map *map)
323 nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
324 module, entrypoint, stage,
329 anv_nir_lower_push_constants(nir);
331 /* Figure out the number of parameters */
332 prog_data->nr_params = 0;
334 if (nir->num_uniforms > 0) {
335 /* If the shader uses any push constants at all, we'll just give
336 * them the maximum possible number
338 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
341 if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
342 prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
344 if (nir->info.num_images > 0) {
345 prog_data->nr_params += nir->info.num_images * BRW_IMAGE_PARAM_SIZE;
346 pipeline->needs_data_cache = true;
349 if (stage == MESA_SHADER_COMPUTE)
350 ((struct brw_cs_prog_data *)prog_data)->thread_local_id_index =
351 prog_data->nr_params++; /* The CS Thread ID uniform */
353 if (nir->info.num_ssbos > 0)
354 pipeline->needs_data_cache = true;
356 if (prog_data->nr_params > 0) {
357 /* XXX: I think we're leaking this */
358 prog_data->param = (const union gl_constant_value **)
359 malloc(prog_data->nr_params * sizeof(union gl_constant_value *));
361 /* We now set the param values to be offsets into a
362 * anv_push_constant_data structure. Since the compiler doesn't
363 * actually dereference any of the gl_constant_value pointers in the
364 * params array, it doesn't really matter what we put here.
366 struct anv_push_constants *null_data = NULL;
367 if (nir->num_uniforms > 0) {
368 /* Fill out the push constants section of the param array */
369 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++)
370 prog_data->param[i] = (const union gl_constant_value *)
371 &null_data->client_data[i * sizeof(float)];
375 /* Set up dynamic offsets */
376 anv_nir_apply_dynamic_offsets(pipeline, nir, prog_data);
378 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
379 if (pipeline->layout)
380 anv_nir_apply_pipeline_layout(pipeline, nir, prog_data, map);
382 /* nir_lower_io will only handle the push constants; we need to set this
383 * to the full number of possible uniforms.
385 nir->num_uniforms = prog_data->nr_params * 4;
391 anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
393 prog_data->binding_table.size_bytes = 0;
394 prog_data->binding_table.texture_start = bias;
395 prog_data->binding_table.ubo_start = bias;
396 prog_data->binding_table.ssbo_start = bias;
397 prog_data->binding_table.image_start = bias;
400 static struct anv_shader_bin *
401 anv_pipeline_upload_kernel(struct anv_pipeline *pipeline,
402 struct anv_pipeline_cache *cache,
403 const void *key_data, uint32_t key_size,
404 const void *kernel_data, uint32_t kernel_size,
405 const void *prog_data, uint32_t prog_data_size,
406 const struct anv_pipeline_bind_map *bind_map)
409 return anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
410 kernel_data, kernel_size,
411 prog_data, prog_data_size,
414 return anv_shader_bin_create(pipeline->device, key_data, key_size,
415 kernel_data, kernel_size,
416 prog_data, prog_data_size, bind_map);
422 anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
423 gl_shader_stage stage,
424 struct anv_shader_bin *shader)
426 pipeline->shaders[stage] = shader;
427 pipeline->active_stages |= mesa_to_vk_shader_stage(stage);
431 anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
432 struct anv_pipeline_cache *cache,
433 const VkGraphicsPipelineCreateInfo *info,
434 struct anv_shader_module *module,
435 const char *entrypoint,
436 const VkSpecializationInfo *spec_info)
438 const struct brw_compiler *compiler =
439 pipeline->device->instance->physicalDevice.compiler;
440 struct anv_pipeline_bind_map map;
441 struct brw_vs_prog_key key;
442 struct anv_shader_bin *bin = NULL;
443 unsigned char sha1[20];
445 populate_vs_prog_key(&pipeline->device->info, &key);
448 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
449 pipeline->layout, spec_info);
450 bin = anv_pipeline_cache_search(cache, sha1, 20);
454 struct brw_vs_prog_data prog_data = { 0, };
455 struct anv_pipeline_binding surface_to_descriptor[256];
456 struct anv_pipeline_binding sampler_to_descriptor[256];
458 map = (struct anv_pipeline_bind_map) {
459 .surface_to_descriptor = surface_to_descriptor,
460 .sampler_to_descriptor = sampler_to_descriptor
463 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
464 MESA_SHADER_VERTEX, spec_info,
465 &prog_data.base.base, &map);
467 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
469 anv_fill_binding_table(&prog_data.base.base, 0);
471 void *mem_ctx = ralloc_context(NULL);
473 if (module->nir == NULL)
474 ralloc_steal(mem_ctx, nir);
476 prog_data.inputs_read = nir->info.inputs_read;
478 brw_compute_vue_map(&pipeline->device->info,
479 &prog_data.base.vue_map,
480 nir->info.outputs_written,
481 nir->info.separate_shader);
484 const unsigned *shader_code =
485 brw_compile_vs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
486 NULL, false, -1, &code_size, NULL);
487 if (shader_code == NULL) {
488 ralloc_free(mem_ctx);
489 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
492 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
493 shader_code, code_size,
494 &prog_data, sizeof(prog_data), &map);
496 ralloc_free(mem_ctx);
497 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
500 ralloc_free(mem_ctx);
503 const struct brw_vs_prog_data *vs_prog_data =
504 (const struct brw_vs_prog_data *)anv_shader_bin_get_prog_data(bin);
506 if (vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
507 pipeline->vs_simd8 = bin->kernel.offset;
508 pipeline->vs_vec4 = NO_KERNEL;
510 pipeline->vs_simd8 = NO_KERNEL;
511 pipeline->vs_vec4 = bin->kernel.offset;
514 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX, bin);
520 anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
521 struct anv_pipeline_cache *cache,
522 const VkGraphicsPipelineCreateInfo *info,
523 struct anv_shader_module *module,
524 const char *entrypoint,
525 const VkSpecializationInfo *spec_info)
527 const struct brw_compiler *compiler =
528 pipeline->device->instance->physicalDevice.compiler;
529 struct anv_pipeline_bind_map map;
530 struct brw_gs_prog_key key;
531 struct anv_shader_bin *bin = NULL;
532 unsigned char sha1[20];
534 populate_gs_prog_key(&pipeline->device->info, &key);
537 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
538 pipeline->layout, spec_info);
539 bin = anv_pipeline_cache_search(cache, sha1, 20);
543 struct brw_gs_prog_data prog_data = { 0, };
544 struct anv_pipeline_binding surface_to_descriptor[256];
545 struct anv_pipeline_binding sampler_to_descriptor[256];
547 map = (struct anv_pipeline_bind_map) {
548 .surface_to_descriptor = surface_to_descriptor,
549 .sampler_to_descriptor = sampler_to_descriptor
552 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
553 MESA_SHADER_GEOMETRY, spec_info,
554 &prog_data.base.base, &map);
556 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
558 anv_fill_binding_table(&prog_data.base.base, 0);
560 void *mem_ctx = ralloc_context(NULL);
562 if (module->nir == NULL)
563 ralloc_steal(mem_ctx, nir);
565 brw_compute_vue_map(&pipeline->device->info,
566 &prog_data.base.vue_map,
567 nir->info.outputs_written,
568 nir->info.separate_shader);
571 const unsigned *shader_code =
572 brw_compile_gs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
573 NULL, -1, &code_size, NULL);
574 if (shader_code == NULL) {
575 ralloc_free(mem_ctx);
576 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
580 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
581 shader_code, code_size,
582 &prog_data, sizeof(prog_data), &map);
584 ralloc_free(mem_ctx);
585 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
588 ralloc_free(mem_ctx);
591 pipeline->gs_kernel = bin->kernel.offset;
593 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY, bin);
599 anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
600 struct anv_pipeline_cache *cache,
601 const VkGraphicsPipelineCreateInfo *info,
602 const struct anv_graphics_pipeline_create_info *extra,
603 struct anv_shader_module *module,
604 const char *entrypoint,
605 const VkSpecializationInfo *spec_info)
607 const struct brw_compiler *compiler =
608 pipeline->device->instance->physicalDevice.compiler;
609 struct anv_pipeline_bind_map map;
610 struct brw_wm_prog_key key;
611 struct anv_shader_bin *bin = NULL;
612 unsigned char sha1[20];
614 populate_wm_prog_key(&pipeline->device->info, info, extra, &key);
617 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
618 pipeline->layout, spec_info);
619 bin = anv_pipeline_cache_search(cache, sha1, 20);
623 struct brw_wm_prog_data prog_data = { 0, };
624 struct anv_pipeline_binding surface_to_descriptor[256];
625 struct anv_pipeline_binding sampler_to_descriptor[256];
627 map = (struct anv_pipeline_bind_map) {
628 .surface_to_descriptor = surface_to_descriptor + 8,
629 .sampler_to_descriptor = sampler_to_descriptor
632 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
633 MESA_SHADER_FRAGMENT, spec_info,
634 &prog_data.base, &map);
636 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
638 unsigned num_rts = 0;
639 struct anv_pipeline_binding rt_bindings[8];
640 nir_function_impl *impl = nir_shader_get_entrypoint(nir)->impl;
641 nir_foreach_variable_safe(var, &nir->outputs) {
642 if (var->data.location < FRAG_RESULT_DATA0)
645 unsigned rt = var->data.location - FRAG_RESULT_DATA0;
646 if (rt >= key.nr_color_regions) {
647 /* Out-of-bounds, throw it away */
648 var->data.mode = nir_var_local;
649 exec_node_remove(&var->node);
650 exec_list_push_tail(&impl->locals, &var->node);
654 /* Give it a new, compacted, location */
655 var->data.location = FRAG_RESULT_DATA0 + num_rts;
658 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
659 assert(num_rts + array_len <= 8);
661 for (unsigned i = 0; i < array_len; i++) {
662 rt_bindings[num_rts + i] = (struct anv_pipeline_binding) {
663 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
669 num_rts += array_len;
672 if (pipeline->use_repclear) {
673 assert(num_rts == 1);
674 key.nr_color_regions = 1;
678 /* If we have no render targets, we need a null render target */
679 rt_bindings[0] = (struct anv_pipeline_binding) {
680 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
687 assert(num_rts <= 8);
688 map.surface_to_descriptor -= num_rts;
689 map.surface_count += num_rts;
690 assert(map.surface_count <= 256);
691 memcpy(map.surface_to_descriptor, rt_bindings,
692 num_rts * sizeof(*rt_bindings));
694 anv_fill_binding_table(&prog_data.base, num_rts);
696 void *mem_ctx = ralloc_context(NULL);
698 if (module->nir == NULL)
699 ralloc_steal(mem_ctx, nir);
702 const unsigned *shader_code =
703 brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
704 NULL, -1, -1, true, pipeline->use_repclear,
706 if (shader_code == NULL) {
707 ralloc_free(mem_ctx);
708 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
711 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
712 shader_code, code_size,
713 &prog_data, sizeof(prog_data), &map);
715 ralloc_free(mem_ctx);
716 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
719 ralloc_free(mem_ctx);
722 pipeline->ps_ksp0 = bin->kernel.offset;
724 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT, bin);
730 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
731 struct anv_pipeline_cache *cache,
732 const VkComputePipelineCreateInfo *info,
733 struct anv_shader_module *module,
734 const char *entrypoint,
735 const VkSpecializationInfo *spec_info)
737 const struct brw_compiler *compiler =
738 pipeline->device->instance->physicalDevice.compiler;
739 struct anv_pipeline_bind_map map;
740 struct brw_cs_prog_key key;
741 struct anv_shader_bin *bin = NULL;
742 unsigned char sha1[20];
744 populate_cs_prog_key(&pipeline->device->info, &key);
747 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
748 pipeline->layout, spec_info);
749 bin = anv_pipeline_cache_search(cache, sha1, 20);
753 struct brw_cs_prog_data prog_data = { 0, };
754 struct anv_pipeline_binding surface_to_descriptor[256];
755 struct anv_pipeline_binding sampler_to_descriptor[256];
757 map = (struct anv_pipeline_bind_map) {
758 .surface_to_descriptor = surface_to_descriptor,
759 .sampler_to_descriptor = sampler_to_descriptor
762 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
763 MESA_SHADER_COMPUTE, spec_info,
764 &prog_data.base, &map);
766 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
768 anv_fill_binding_table(&prog_data.base, 1);
770 void *mem_ctx = ralloc_context(NULL);
772 if (module->nir == NULL)
773 ralloc_steal(mem_ctx, nir);
776 const unsigned *shader_code =
777 brw_compile_cs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
778 -1, &code_size, NULL);
779 if (shader_code == NULL) {
780 ralloc_free(mem_ctx);
781 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
784 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
785 shader_code, code_size,
786 &prog_data, sizeof(prog_data), &map);
788 ralloc_free(mem_ctx);
789 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
792 ralloc_free(mem_ctx);
795 pipeline->cs_simd = bin->kernel.offset;
797 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE, bin);
804 anv_setup_pipeline_l3_config(struct anv_pipeline *pipeline)
806 const struct brw_device_info *devinfo = &pipeline->device->info;
807 switch (devinfo->gen) {
809 if (devinfo->is_haswell)
810 gen75_setup_pipeline_l3_config(pipeline);
812 gen7_setup_pipeline_l3_config(pipeline);
815 gen8_setup_pipeline_l3_config(pipeline);
818 gen9_setup_pipeline_l3_config(pipeline);
821 unreachable("unsupported gen\n");
826 anv_compute_urb_partition(struct anv_pipeline *pipeline)
828 const struct brw_device_info *devinfo = &pipeline->device->info;
830 bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
831 unsigned vs_size = vs_present ?
832 get_vs_prog_data(pipeline)->base.urb_entry_size : 1;
833 unsigned vs_entry_size_bytes = vs_size * 64;
834 bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
835 unsigned gs_size = gs_present ?
836 get_gs_prog_data(pipeline)->base.urb_entry_size : 1;
837 unsigned gs_entry_size_bytes = gs_size * 64;
839 /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
841 * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
842 * Allocation Size is less than 9 512-bit URB entries.
844 * Similar text exists for GS.
846 unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
847 unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
849 /* URB allocations must be done in 8k chunks. */
850 unsigned chunk_size_bytes = 8192;
852 /* Determine the size of the URB in chunks. */
853 unsigned urb_chunks = pipeline->urb.total_size * 1024 / chunk_size_bytes;
855 /* Reserve space for push constants */
856 unsigned push_constant_kb;
857 if (pipeline->device->info.gen >= 8)
858 push_constant_kb = 32;
859 else if (pipeline->device->info.is_haswell)
860 push_constant_kb = pipeline->device->info.gt == 3 ? 32 : 16;
862 push_constant_kb = 16;
864 unsigned push_constant_bytes = push_constant_kb * 1024;
865 unsigned push_constant_chunks =
866 push_constant_bytes / chunk_size_bytes;
868 /* Initially, assign each stage the minimum amount of URB space it needs,
869 * and make a note of how much additional space it "wants" (the amount of
870 * additional space it could actually make use of).
873 /* VS has a lower limit on the number of URB entries */
875 ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
876 chunk_size_bytes) / chunk_size_bytes;
878 ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
879 chunk_size_bytes) / chunk_size_bytes - vs_chunks;
881 unsigned gs_chunks = 0;
882 unsigned gs_wants = 0;
884 /* There are two constraints on the minimum amount of URB space we can
887 * (1) We need room for at least 2 URB entries, since we always operate
888 * the GS in DUAL_OBJECT mode.
890 * (2) We can't allocate less than nr_gs_entries_granularity.
892 gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
893 chunk_size_bytes) / chunk_size_bytes;
895 ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
896 chunk_size_bytes) / chunk_size_bytes - gs_chunks;
899 /* There should always be enough URB space to satisfy the minimum
900 * requirements of each stage.
902 unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
903 assert(total_needs <= urb_chunks);
905 /* Mete out remaining space (if any) in proportion to "wants". */
906 unsigned total_wants = vs_wants + gs_wants;
907 unsigned remaining_space = urb_chunks - total_needs;
908 if (remaining_space > total_wants)
909 remaining_space = total_wants;
910 if (remaining_space > 0) {
911 unsigned vs_additional = (unsigned)
912 round(vs_wants * (((double) remaining_space) / total_wants));
913 vs_chunks += vs_additional;
914 remaining_space -= vs_additional;
915 gs_chunks += remaining_space;
918 /* Sanity check that we haven't over-allocated. */
919 assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
921 /* Finally, compute the number of entries that can fit in the space
922 * allocated to each stage.
924 unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
925 unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
927 /* Since we rounded up when computing *_wants, this may be slightly more
928 * than the maximum allowed amount, so correct for that.
930 nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
931 nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
933 /* Ensure that we program a multiple of the granularity. */
934 nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
935 nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
937 /* Finally, sanity check to make sure we have at least the minimum number
938 * of entries needed for each stage.
940 assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
942 assert(nr_gs_entries >= 2);
944 /* Lay out the URB in the following order:
949 pipeline->urb.start[MESA_SHADER_VERTEX] = push_constant_chunks;
950 pipeline->urb.size[MESA_SHADER_VERTEX] = vs_size;
951 pipeline->urb.entries[MESA_SHADER_VERTEX] = nr_vs_entries;
953 pipeline->urb.start[MESA_SHADER_GEOMETRY] = push_constant_chunks + vs_chunks;
954 pipeline->urb.size[MESA_SHADER_GEOMETRY] = gs_size;
955 pipeline->urb.entries[MESA_SHADER_GEOMETRY] = nr_gs_entries;
957 pipeline->urb.start[MESA_SHADER_TESS_CTRL] = push_constant_chunks;
958 pipeline->urb.size[MESA_SHADER_TESS_CTRL] = 1;
959 pipeline->urb.entries[MESA_SHADER_TESS_CTRL] = 0;
961 pipeline->urb.start[MESA_SHADER_TESS_EVAL] = push_constant_chunks;
962 pipeline->urb.size[MESA_SHADER_TESS_EVAL] = 1;
963 pipeline->urb.entries[MESA_SHADER_TESS_EVAL] = 0;
967 * Copy pipeline state not marked as dynamic.
968 * Dynamic state is pipeline state which hasn't been provided at pipeline
969 * creation time, but is dynamically provided afterwards using various
970 * vkCmdSet* functions.
972 * The set of state considered "non_dynamic" is determined by the pieces of
973 * state that have their corresponding VkDynamicState enums omitted from
974 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
976 * @param[out] pipeline Destination non_dynamic state.
977 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
980 copy_non_dynamic_state(struct anv_pipeline *pipeline,
981 const VkGraphicsPipelineCreateInfo *pCreateInfo)
983 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
984 ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
985 struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
987 pipeline->dynamic_state = default_dynamic_state;
989 if (pCreateInfo->pDynamicState) {
990 /* Remove all of the states that are marked as dynamic */
991 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
992 for (uint32_t s = 0; s < count; s++)
993 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
996 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
998 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1000 * pViewportState is [...] NULL if the pipeline
1001 * has rasterization disabled.
1003 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1004 assert(pCreateInfo->pViewportState);
1006 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1007 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
1008 typed_memcpy(dynamic->viewport.viewports,
1009 pCreateInfo->pViewportState->pViewports,
1010 pCreateInfo->pViewportState->viewportCount);
1013 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1014 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
1015 typed_memcpy(dynamic->scissor.scissors,
1016 pCreateInfo->pViewportState->pScissors,
1017 pCreateInfo->pViewportState->scissorCount);
1021 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
1022 assert(pCreateInfo->pRasterizationState);
1023 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1026 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
1027 assert(pCreateInfo->pRasterizationState);
1028 dynamic->depth_bias.bias =
1029 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1030 dynamic->depth_bias.clamp =
1031 pCreateInfo->pRasterizationState->depthBiasClamp;
1032 dynamic->depth_bias.slope =
1033 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1036 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1038 * pColorBlendState is [...] NULL if the pipeline has rasterization
1039 * disabled or if the subpass of the render pass the pipeline is
1040 * created against does not use any color attachments.
1042 bool uses_color_att = false;
1043 for (unsigned i = 0; i < subpass->color_count; ++i) {
1044 if (subpass->color_attachments[i] != VK_ATTACHMENT_UNUSED) {
1045 uses_color_att = true;
1050 if (uses_color_att &&
1051 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1052 assert(pCreateInfo->pColorBlendState);
1054 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
1055 typed_memcpy(dynamic->blend_constants,
1056 pCreateInfo->pColorBlendState->blendConstants, 4);
1059 /* If there is no depthstencil attachment, then don't read
1060 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1061 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1062 * no need to override the depthstencil defaults in
1063 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1065 * Section 9.2 of the Vulkan 1.0.15 spec says:
1067 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1068 * disabled or if the subpass of the render pass the pipeline is created
1069 * against does not use a depth/stencil attachment.
1071 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1072 subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
1073 assert(pCreateInfo->pDepthStencilState);
1075 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1076 dynamic->depth_bounds.min =
1077 pCreateInfo->pDepthStencilState->minDepthBounds;
1078 dynamic->depth_bounds.max =
1079 pCreateInfo->pDepthStencilState->maxDepthBounds;
1082 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1083 dynamic->stencil_compare_mask.front =
1084 pCreateInfo->pDepthStencilState->front.compareMask;
1085 dynamic->stencil_compare_mask.back =
1086 pCreateInfo->pDepthStencilState->back.compareMask;
1089 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1090 dynamic->stencil_write_mask.front =
1091 pCreateInfo->pDepthStencilState->front.writeMask;
1092 dynamic->stencil_write_mask.back =
1093 pCreateInfo->pDepthStencilState->back.writeMask;
1096 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1097 dynamic->stencil_reference.front =
1098 pCreateInfo->pDepthStencilState->front.reference;
1099 dynamic->stencil_reference.back =
1100 pCreateInfo->pDepthStencilState->back.reference;
1104 pipeline->dynamic_state_mask = states;
1108 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1110 struct anv_render_pass *renderpass = NULL;
1111 struct anv_subpass *subpass = NULL;
1113 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1114 * present, as explained by the Vulkan (20 Oct 2015, git-aa308cb), Section
1115 * 4.2 Graphics Pipeline.
1117 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1119 renderpass = anv_render_pass_from_handle(info->renderPass);
1122 if (renderpass != &anv_meta_dummy_renderpass) {
1123 assert(info->subpass < renderpass->subpass_count);
1124 subpass = &renderpass->subpasses[info->subpass];
1127 assert(info->stageCount >= 1);
1128 assert(info->pVertexInputState);
1129 assert(info->pInputAssemblyState);
1130 assert(info->pViewportState);
1131 assert(info->pRasterizationState);
1133 if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED)
1134 assert(info->pDepthStencilState);
1136 if (subpass && subpass->color_count > 0)
1137 assert(info->pColorBlendState);
1139 for (uint32_t i = 0; i < info->stageCount; ++i) {
1140 switch (info->pStages[i].stage) {
1141 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1142 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1143 assert(info->pTessellationState);
1152 anv_pipeline_init(struct anv_pipeline *pipeline,
1153 struct anv_device *device,
1154 struct anv_pipeline_cache *cache,
1155 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1156 const struct anv_graphics_pipeline_create_info *extra,
1157 const VkAllocationCallbacks *alloc)
1162 anv_pipeline_validate_create_info(pCreateInfo);
1166 alloc = &device->alloc;
1168 pipeline->device = device;
1169 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
1171 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1172 if (result != VK_SUCCESS)
1175 pipeline->batch.alloc = alloc;
1176 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1177 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1178 pipeline->batch.relocs = &pipeline->batch_relocs;
1180 copy_non_dynamic_state(pipeline, pCreateInfo);
1181 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1182 pCreateInfo->pRasterizationState->depthClampEnable;
1184 pipeline->use_repclear = extra && extra->use_repclear;
1186 pipeline->needs_data_cache = false;
1188 /* When we free the pipeline, we detect stages based on the NULL status
1189 * of various prog_data pointers. Make them NULL by default.
1191 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1193 pipeline->vs_simd8 = NO_KERNEL;
1194 pipeline->vs_vec4 = NO_KERNEL;
1195 pipeline->gs_kernel = NO_KERNEL;
1196 pipeline->ps_ksp0 = NO_KERNEL;
1198 pipeline->active_stages = 0;
1200 const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, };
1201 struct anv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
1202 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
1203 gl_shader_stage stage = ffs(pCreateInfo->pStages[i].stage) - 1;
1204 pStages[stage] = &pCreateInfo->pStages[i];
1205 modules[stage] = anv_shader_module_from_handle(pStages[stage]->module);
1208 if (modules[MESA_SHADER_VERTEX]) {
1209 result = anv_pipeline_compile_vs(pipeline, cache, pCreateInfo,
1210 modules[MESA_SHADER_VERTEX],
1211 pStages[MESA_SHADER_VERTEX]->pName,
1212 pStages[MESA_SHADER_VERTEX]->pSpecializationInfo);
1213 if (result != VK_SUCCESS)
1217 if (modules[MESA_SHADER_TESS_CTRL] || modules[MESA_SHADER_TESS_EVAL])
1218 anv_finishme("no tessellation support");
1220 if (modules[MESA_SHADER_GEOMETRY]) {
1221 result = anv_pipeline_compile_gs(pipeline, cache, pCreateInfo,
1222 modules[MESA_SHADER_GEOMETRY],
1223 pStages[MESA_SHADER_GEOMETRY]->pName,
1224 pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo);
1225 if (result != VK_SUCCESS)
1229 if (modules[MESA_SHADER_FRAGMENT]) {
1230 result = anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, extra,
1231 modules[MESA_SHADER_FRAGMENT],
1232 pStages[MESA_SHADER_FRAGMENT]->pName,
1233 pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
1234 if (result != VK_SUCCESS)
1238 if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
1239 /* Vertex is only optional if disable_vs is set */
1240 assert(extra->disable_vs);
1243 anv_setup_pipeline_l3_config(pipeline);
1244 anv_compute_urb_partition(pipeline);
1246 const VkPipelineVertexInputStateCreateInfo *vi_info =
1247 pCreateInfo->pVertexInputState;
1249 uint64_t inputs_read;
1250 if (extra && extra->disable_vs) {
1251 /* If the VS is disabled, just assume the user knows what they're
1252 * doing and apply the layout blindly. This can only come from
1253 * meta, so this *should* be safe.
1255 inputs_read = ~0ull;
1257 inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1260 pipeline->vb_used = 0;
1261 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1262 const VkVertexInputAttributeDescription *desc =
1263 &vi_info->pVertexAttributeDescriptions[i];
1265 if (inputs_read & (1 << (VERT_ATTRIB_GENERIC0 + desc->location)))
1266 pipeline->vb_used |= 1 << desc->binding;
1269 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1270 const VkVertexInputBindingDescription *desc =
1271 &vi_info->pVertexBindingDescriptions[i];
1273 pipeline->binding_stride[desc->binding] = desc->stride;
1275 /* Step rate is programmed per vertex element (attribute), not
1276 * binding. Set up a map of which bindings step per instance, for
1277 * reference by vertex element setup. */
1278 switch (desc->inputRate) {
1280 case VK_VERTEX_INPUT_RATE_VERTEX:
1281 pipeline->instancing_enable[desc->binding] = false;
1283 case VK_VERTEX_INPUT_RATE_INSTANCE:
1284 pipeline->instancing_enable[desc->binding] = true;
1289 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1290 pCreateInfo->pInputAssemblyState;
1291 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1292 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1294 if (extra && extra->use_rectlist)
1295 pipeline->topology = _3DPRIM_RECTLIST;
1300 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1301 if (pipeline->shaders[s])
1302 anv_shader_bin_unref(device, pipeline->shaders[s]);
1305 anv_reloc_list_finish(&pipeline->batch_relocs, alloc);
1311 anv_graphics_pipeline_create(
1313 VkPipelineCache _cache,
1314 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1315 const struct anv_graphics_pipeline_create_info *extra,
1316 const VkAllocationCallbacks *pAllocator,
1317 VkPipeline *pPipeline)
1319 ANV_FROM_HANDLE(anv_device, device, _device);
1320 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
1322 switch (device->info.gen) {
1324 if (device->info.is_haswell)
1325 return gen75_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1327 return gen7_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1329 return gen8_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1331 return gen9_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1333 unreachable("unsupported gen\n");
1337 VkResult anv_CreateGraphicsPipelines(
1339 VkPipelineCache pipelineCache,
1341 const VkGraphicsPipelineCreateInfo* pCreateInfos,
1342 const VkAllocationCallbacks* pAllocator,
1343 VkPipeline* pPipelines)
1345 VkResult result = VK_SUCCESS;
1348 for (; i < count; i++) {
1349 result = anv_graphics_pipeline_create(_device,
1352 NULL, pAllocator, &pPipelines[i]);
1353 if (result != VK_SUCCESS) {
1354 for (unsigned j = 0; j < i; j++) {
1355 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
1365 static VkResult anv_compute_pipeline_create(
1367 VkPipelineCache _cache,
1368 const VkComputePipelineCreateInfo* pCreateInfo,
1369 const VkAllocationCallbacks* pAllocator,
1370 VkPipeline* pPipeline)
1372 ANV_FROM_HANDLE(anv_device, device, _device);
1373 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
1375 switch (device->info.gen) {
1377 if (device->info.is_haswell)
1378 return gen75_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1380 return gen7_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1382 return gen8_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1384 return gen9_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1386 unreachable("unsupported gen\n");
1390 VkResult anv_CreateComputePipelines(
1392 VkPipelineCache pipelineCache,
1394 const VkComputePipelineCreateInfo* pCreateInfos,
1395 const VkAllocationCallbacks* pAllocator,
1396 VkPipeline* pPipelines)
1398 VkResult result = VK_SUCCESS;
1401 for (; i < count; i++) {
1402 result = anv_compute_pipeline_create(_device, pipelineCache,
1404 pAllocator, &pPipelines[i]);
1405 if (result != VK_SUCCESS) {
1406 for (unsigned j = 0; j < i; j++) {
1407 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);