2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
31 #include "anv_private.h"
34 #include "spirv/nir_spirv.h"
36 /* Needed for SWIZZLE macros */
37 #include "program/prog_instruction.h"
41 VkResult anv_CreateShaderModule(
43 const VkShaderModuleCreateInfo* pCreateInfo,
44 const VkAllocationCallbacks* pAllocator,
45 VkShaderModule* pShaderModule)
47 ANV_FROM_HANDLE(anv_device, device, _device);
48 struct anv_shader_module *module;
50 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
51 assert(pCreateInfo->flags == 0);
53 module = anv_alloc2(&device->alloc, pAllocator,
54 sizeof(*module) + pCreateInfo->codeSize, 8,
55 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
57 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
60 module->size = pCreateInfo->codeSize;
61 memcpy(module->data, pCreateInfo->pCode, module->size);
63 _mesa_sha1_compute(module->data, module->size, module->sha1);
65 *pShaderModule = anv_shader_module_to_handle(module);
70 void anv_DestroyShaderModule(
72 VkShaderModule _module,
73 const VkAllocationCallbacks* pAllocator)
75 ANV_FROM_HANDLE(anv_device, device, _device);
76 ANV_FROM_HANDLE(anv_shader_module, module, _module);
78 anv_free2(&device->alloc, pAllocator, module);
81 #define SPIR_V_MAGIC_NUMBER 0x07230203
83 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
84 * we can't do that yet because we don't have the ability to copy nir.
87 anv_shader_compile_to_nir(struct anv_device *device,
88 struct anv_shader_module *module,
89 const char *entrypoint_name,
90 gl_shader_stage stage,
91 const VkSpecializationInfo *spec_info)
93 if (strcmp(entrypoint_name, "main") != 0) {
94 anv_finishme("Multiple shaders per module not really supported");
97 const struct brw_compiler *compiler =
98 device->instance->physicalDevice.compiler;
99 const nir_shader_compiler_options *nir_options =
100 compiler->glsl_compiler_options[stage].NirOptions;
103 nir_function *entry_point;
105 /* Some things such as our meta clear/blit code will give us a NIR
106 * shader directly. In that case, we just ignore the SPIR-V entirely
107 * and just use the NIR shader */
109 nir->options = nir_options;
110 nir_validate_shader(nir);
112 assert(exec_list_length(&nir->functions) == 1);
113 struct exec_node *node = exec_list_get_head(&nir->functions);
114 entry_point = exec_node_data(nir_function, node, node);
116 uint32_t *spirv = (uint32_t *) module->data;
117 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
118 assert(module->size % 4 == 0);
120 uint32_t num_spec_entries = 0;
121 struct nir_spirv_specialization *spec_entries = NULL;
122 if (spec_info && spec_info->mapEntryCount > 0) {
123 num_spec_entries = spec_info->mapEntryCount;
124 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
125 for (uint32_t i = 0; i < num_spec_entries; i++) {
126 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
127 const void *data = spec_info->pData + entry.offset;
128 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
130 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
131 spec_entries[i].data = *(const uint32_t *)data;
135 entry_point = spirv_to_nir(spirv, module->size / 4,
136 spec_entries, num_spec_entries,
137 stage, entrypoint_name, nir_options);
138 nir = entry_point->shader;
139 assert(nir->stage == stage);
140 nir_validate_shader(nir);
144 if (stage == MESA_SHADER_FRAGMENT) {
145 nir_lower_wpos_center(nir);
146 nir_validate_shader(nir);
149 nir_lower_returns(nir);
150 nir_validate_shader(nir);
152 nir_inline_functions(nir);
153 nir_validate_shader(nir);
155 /* Pick off the single entrypoint that we want */
156 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
157 if (func != entry_point)
158 exec_node_remove(&func->node);
160 assert(exec_list_length(&nir->functions) == 1);
161 entry_point->name = ralloc_strdup(entry_point, "main");
163 nir_remove_dead_variables(nir, nir_var_shader_in);
164 nir_remove_dead_variables(nir, nir_var_shader_out);
165 nir_remove_dead_variables(nir, nir_var_system_value);
166 nir_validate_shader(nir);
168 nir_lower_io_to_temporaries(entry_point->shader, entry_point, true, false);
170 nir_lower_system_values(nir);
171 nir_validate_shader(nir);
174 /* Vulkan uses the separate-shader linking model */
175 nir->info.separate_shader = true;
177 nir = brw_preprocess_nir(compiler, nir);
179 nir_shader_gather_info(nir, entry_point->impl);
181 nir_variable_mode indirect_mask = 0;
182 if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
183 indirect_mask |= nir_var_shader_in;
184 if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
185 indirect_mask |= nir_var_local;
187 nir_lower_indirect_derefs(nir, indirect_mask);
192 void anv_DestroyPipeline(
194 VkPipeline _pipeline,
195 const VkAllocationCallbacks* pAllocator)
197 ANV_FROM_HANDLE(anv_device, device, _device);
198 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
200 anv_reloc_list_finish(&pipeline->batch_relocs,
201 pAllocator ? pAllocator : &device->alloc);
202 if (pipeline->blend_state.map)
203 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
204 anv_free2(&device->alloc, pAllocator, pipeline);
207 static const uint32_t vk_to_gen_primitive_type[] = {
208 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
209 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
210 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
211 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
212 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
213 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
214 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
215 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
216 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
217 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
218 /* [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST] = _3DPRIM_PATCHLIST_1 */
222 populate_sampler_prog_key(const struct brw_device_info *devinfo,
223 struct brw_sampler_prog_key_data *key)
225 /* XXX: Handle texture swizzle on HSW- */
226 for (int i = 0; i < MAX_SAMPLERS; i++) {
227 /* Assume color sampler, no swizzling. (Works for BDW+) */
228 key->swizzles[i] = SWIZZLE_XYZW;
233 populate_vs_prog_key(const struct brw_device_info *devinfo,
234 struct brw_vs_prog_key *key)
236 memset(key, 0, sizeof(*key));
238 populate_sampler_prog_key(devinfo, &key->tex);
240 /* XXX: Handle vertex input work-arounds */
242 /* XXX: Handle sampler_prog_key */
246 populate_gs_prog_key(const struct brw_device_info *devinfo,
247 struct brw_gs_prog_key *key)
249 memset(key, 0, sizeof(*key));
251 populate_sampler_prog_key(devinfo, &key->tex);
255 populate_wm_prog_key(const struct brw_device_info *devinfo,
256 const VkGraphicsPipelineCreateInfo *info,
257 const struct anv_graphics_pipeline_create_info *extra,
258 struct brw_wm_prog_key *key)
260 ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
262 memset(key, 0, sizeof(*key));
264 populate_sampler_prog_key(devinfo, &key->tex);
266 /* TODO: Fill out key->input_slots_valid */
268 /* Vulkan doesn't specify a default */
269 key->high_quality_derivatives = false;
271 /* XXX Vulkan doesn't appear to specify */
272 key->clamp_fragment_color = false;
274 if (extra && extra->color_attachment_count >= 0) {
275 key->nr_color_regions = extra->color_attachment_count;
277 key->nr_color_regions =
278 render_pass->subpasses[info->subpass].color_count;
281 key->replicate_alpha = key->nr_color_regions > 1 &&
282 info->pMultisampleState &&
283 info->pMultisampleState->alphaToCoverageEnable;
285 if (info->pMultisampleState && info->pMultisampleState->rasterizationSamples > 1) {
286 /* We should probably pull this out of the shader, but it's fairly
287 * harmless to compute it and then let dead-code take care of it.
289 key->persample_interp =
290 (info->pMultisampleState->minSampleShading *
291 info->pMultisampleState->rasterizationSamples) > 1;
292 key->multisample_fbo = true;
297 populate_cs_prog_key(const struct brw_device_info *devinfo,
298 struct brw_cs_prog_key *key)
300 memset(key, 0, sizeof(*key));
302 populate_sampler_prog_key(devinfo, &key->tex);
306 anv_pipeline_compile(struct anv_pipeline *pipeline,
307 struct anv_shader_module *module,
308 const char *entrypoint,
309 gl_shader_stage stage,
310 const VkSpecializationInfo *spec_info,
311 struct brw_stage_prog_data *prog_data,
312 struct anv_pipeline_bind_map *map)
314 nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
315 module, entrypoint, stage,
320 anv_nir_lower_push_constants(nir);
322 /* Figure out the number of parameters */
323 prog_data->nr_params = 0;
325 if (nir->num_uniforms > 0) {
326 /* If the shader uses any push constants at all, we'll just give
327 * them the maximum possible number
329 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
332 if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
333 prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
335 if (nir->info.num_images > 0) {
336 prog_data->nr_params += nir->info.num_images * BRW_IMAGE_PARAM_SIZE;
337 pipeline->needs_data_cache = true;
340 if (stage == MESA_SHADER_COMPUTE)
341 ((struct brw_cs_prog_data *)prog_data)->thread_local_id_index =
342 prog_data->nr_params++; /* The CS Thread ID uniform */
344 if (nir->info.num_ssbos > 0)
345 pipeline->needs_data_cache = true;
347 if (prog_data->nr_params > 0) {
348 /* XXX: I think we're leaking this */
349 prog_data->param = (const union gl_constant_value **)
350 malloc(prog_data->nr_params * sizeof(union gl_constant_value *));
352 /* We now set the param values to be offsets into a
353 * anv_push_constant_data structure. Since the compiler doesn't
354 * actually dereference any of the gl_constant_value pointers in the
355 * params array, it doesn't really matter what we put here.
357 struct anv_push_constants *null_data = NULL;
358 if (nir->num_uniforms > 0) {
359 /* Fill out the push constants section of the param array */
360 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++)
361 prog_data->param[i] = (const union gl_constant_value *)
362 &null_data->client_data[i * sizeof(float)];
366 /* Set up dynamic offsets */
367 anv_nir_apply_dynamic_offsets(pipeline, nir, prog_data);
369 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
370 if (pipeline->layout)
371 anv_nir_apply_pipeline_layout(pipeline, nir, prog_data, map);
373 /* nir_lower_io will only handle the push constants; we need to set this
374 * to the full number of possible uniforms.
376 nir->num_uniforms = prog_data->nr_params * 4;
382 anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
384 prog_data->binding_table.size_bytes = 0;
385 prog_data->binding_table.texture_start = bias;
386 prog_data->binding_table.ubo_start = bias;
387 prog_data->binding_table.ssbo_start = bias;
388 prog_data->binding_table.image_start = bias;
392 anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
393 gl_shader_stage stage,
394 const struct brw_stage_prog_data *prog_data,
395 struct anv_pipeline_bind_map *map)
397 struct brw_device_info *devinfo = &pipeline->device->info;
398 uint32_t max_threads[] = {
399 [MESA_SHADER_VERTEX] = devinfo->max_vs_threads,
400 [MESA_SHADER_TESS_CTRL] = devinfo->max_hs_threads,
401 [MESA_SHADER_TESS_EVAL] = devinfo->max_ds_threads,
402 [MESA_SHADER_GEOMETRY] = devinfo->max_gs_threads,
403 [MESA_SHADER_FRAGMENT] = devinfo->max_wm_threads,
404 [MESA_SHADER_COMPUTE] = devinfo->max_cs_threads,
407 pipeline->prog_data[stage] = prog_data;
408 pipeline->active_stages |= mesa_to_vk_shader_stage(stage);
409 pipeline->scratch_start[stage] = pipeline->total_scratch;
410 pipeline->total_scratch =
411 align_u32(pipeline->total_scratch, 1024) +
412 prog_data->total_scratch * max_threads[stage];
413 pipeline->bindings[stage] = *map;
417 anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
418 struct anv_pipeline_cache *cache,
419 const VkGraphicsPipelineCreateInfo *info,
420 struct anv_shader_module *module,
421 const char *entrypoint,
422 const VkSpecializationInfo *spec_info)
424 const struct brw_compiler *compiler =
425 pipeline->device->instance->physicalDevice.compiler;
426 const struct brw_stage_prog_data *stage_prog_data;
427 struct anv_pipeline_bind_map map;
428 struct brw_vs_prog_key key;
429 uint32_t kernel = NO_KERNEL;
430 unsigned char sha1[20];
432 populate_vs_prog_key(&pipeline->device->info, &key);
434 if (module->size > 0) {
435 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
436 kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
439 if (kernel == NO_KERNEL) {
440 struct brw_vs_prog_data prog_data = { 0, };
441 struct anv_pipeline_binding surface_to_descriptor[256];
442 struct anv_pipeline_binding sampler_to_descriptor[256];
444 map = (struct anv_pipeline_bind_map) {
445 .surface_to_descriptor = surface_to_descriptor,
446 .sampler_to_descriptor = sampler_to_descriptor
449 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
450 MESA_SHADER_VERTEX, spec_info,
451 &prog_data.base.base, &map);
453 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
455 anv_fill_binding_table(&prog_data.base.base, 0);
457 void *mem_ctx = ralloc_context(NULL);
459 if (module->nir == NULL)
460 ralloc_steal(mem_ctx, nir);
462 prog_data.inputs_read = nir->info.inputs_read;
464 brw_compute_vue_map(&pipeline->device->info,
465 &prog_data.base.vue_map,
466 nir->info.outputs_written,
467 nir->info.separate_shader);
470 const unsigned *shader_code =
471 brw_compile_vs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
472 NULL, false, -1, &code_size, NULL);
473 if (shader_code == NULL) {
474 ralloc_free(mem_ctx);
475 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
478 stage_prog_data = &prog_data.base.base;
479 kernel = anv_pipeline_cache_upload_kernel(cache,
480 module->size > 0 ? sha1 : NULL,
481 shader_code, code_size,
482 &stage_prog_data, sizeof(prog_data),
484 ralloc_free(mem_ctx);
487 const struct brw_vs_prog_data *vs_prog_data =
488 (const struct brw_vs_prog_data *) stage_prog_data;
490 if (vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
491 pipeline->vs_simd8 = kernel;
492 pipeline->vs_vec4 = NO_KERNEL;
494 pipeline->vs_simd8 = NO_KERNEL;
495 pipeline->vs_vec4 = kernel;
498 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX,
499 stage_prog_data, &map);
505 anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
506 struct anv_pipeline_cache *cache,
507 const VkGraphicsPipelineCreateInfo *info,
508 struct anv_shader_module *module,
509 const char *entrypoint,
510 const VkSpecializationInfo *spec_info)
512 const struct brw_compiler *compiler =
513 pipeline->device->instance->physicalDevice.compiler;
514 const struct brw_stage_prog_data *stage_prog_data;
515 struct anv_pipeline_bind_map map;
516 struct brw_gs_prog_key key;
517 uint32_t kernel = NO_KERNEL;
518 unsigned char sha1[20];
520 populate_gs_prog_key(&pipeline->device->info, &key);
522 if (module->size > 0) {
523 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
524 kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
527 if (kernel == NO_KERNEL) {
528 struct brw_gs_prog_data prog_data = { 0, };
529 struct anv_pipeline_binding surface_to_descriptor[256];
530 struct anv_pipeline_binding sampler_to_descriptor[256];
532 map = (struct anv_pipeline_bind_map) {
533 .surface_to_descriptor = surface_to_descriptor,
534 .sampler_to_descriptor = sampler_to_descriptor
537 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
538 MESA_SHADER_GEOMETRY, spec_info,
539 &prog_data.base.base, &map);
541 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
543 anv_fill_binding_table(&prog_data.base.base, 0);
545 void *mem_ctx = ralloc_context(NULL);
547 if (module->nir == NULL)
548 ralloc_steal(mem_ctx, nir);
550 brw_compute_vue_map(&pipeline->device->info,
551 &prog_data.base.vue_map,
552 nir->info.outputs_written,
553 nir->info.separate_shader);
556 const unsigned *shader_code =
557 brw_compile_gs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
558 NULL, -1, &code_size, NULL);
559 if (shader_code == NULL) {
560 ralloc_free(mem_ctx);
561 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
565 stage_prog_data = &prog_data.base.base;
566 kernel = anv_pipeline_cache_upload_kernel(cache,
567 module->size > 0 ? sha1 : NULL,
568 shader_code, code_size,
569 &stage_prog_data, sizeof(prog_data),
572 ralloc_free(mem_ctx);
575 pipeline->gs_kernel = kernel;
577 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY,
578 stage_prog_data, &map);
584 anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
585 struct anv_pipeline_cache *cache,
586 const VkGraphicsPipelineCreateInfo *info,
587 const struct anv_graphics_pipeline_create_info *extra,
588 struct anv_shader_module *module,
589 const char *entrypoint,
590 const VkSpecializationInfo *spec_info)
592 const struct brw_compiler *compiler =
593 pipeline->device->instance->physicalDevice.compiler;
594 const struct brw_stage_prog_data *stage_prog_data;
595 struct anv_pipeline_bind_map map;
596 struct brw_wm_prog_key key;
597 unsigned char sha1[20];
599 populate_wm_prog_key(&pipeline->device->info, info, extra, &key);
601 if (module->size > 0) {
602 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
604 anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
607 if (pipeline->ps_ksp0 == NO_KERNEL) {
608 struct brw_wm_prog_data prog_data = { 0, };
609 struct anv_pipeline_binding surface_to_descriptor[256];
610 struct anv_pipeline_binding sampler_to_descriptor[256];
612 map = (struct anv_pipeline_bind_map) {
613 .surface_to_descriptor = surface_to_descriptor + 8,
614 .sampler_to_descriptor = sampler_to_descriptor
617 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
618 MESA_SHADER_FRAGMENT, spec_info,
619 &prog_data.base, &map);
621 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
623 unsigned num_rts = 0;
624 struct anv_pipeline_binding rt_bindings[8];
625 nir_function_impl *impl = nir_shader_get_entrypoint(nir)->impl;
626 nir_foreach_variable_safe(var, &nir->outputs) {
627 if (var->data.location < FRAG_RESULT_DATA0)
630 unsigned rt = var->data.location - FRAG_RESULT_DATA0;
631 if (rt >= key.nr_color_regions) {
632 /* Out-of-bounds, throw it away */
633 var->data.mode = nir_var_local;
634 exec_node_remove(&var->node);
635 exec_list_push_tail(&impl->locals, &var->node);
639 /* Give it a new, compacted, location */
640 var->data.location = FRAG_RESULT_DATA0 + num_rts;
643 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
644 assert(num_rts + array_len <= 8);
646 for (unsigned i = 0; i < array_len; i++) {
647 rt_bindings[num_rts] = (struct anv_pipeline_binding) {
648 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
654 num_rts += array_len;
657 if (pipeline->use_repclear) {
658 assert(num_rts == 1);
659 key.nr_color_regions = 1;
663 /* If we have no render targets, we need a null render target */
664 rt_bindings[0] = (struct anv_pipeline_binding) {
665 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
672 assert(num_rts <= 8);
673 map.surface_to_descriptor -= num_rts;
674 map.surface_count += num_rts;
675 assert(map.surface_count <= 256);
676 memcpy(map.surface_to_descriptor, rt_bindings,
677 num_rts * sizeof(*rt_bindings));
679 anv_fill_binding_table(&prog_data.base, num_rts);
681 void *mem_ctx = ralloc_context(NULL);
683 if (module->nir == NULL)
684 ralloc_steal(mem_ctx, nir);
687 const unsigned *shader_code =
688 brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
689 NULL, -1, -1, true, pipeline->use_repclear,
691 if (shader_code == NULL) {
692 ralloc_free(mem_ctx);
693 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
696 stage_prog_data = &prog_data.base;
698 anv_pipeline_cache_upload_kernel(cache,
699 module->size > 0 ? sha1 : NULL,
700 shader_code, code_size,
701 &stage_prog_data, sizeof(prog_data),
704 ralloc_free(mem_ctx);
707 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT,
708 stage_prog_data, &map);
714 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
715 struct anv_pipeline_cache *cache,
716 const VkComputePipelineCreateInfo *info,
717 struct anv_shader_module *module,
718 const char *entrypoint,
719 const VkSpecializationInfo *spec_info)
721 const struct brw_compiler *compiler =
722 pipeline->device->instance->physicalDevice.compiler;
723 const struct brw_stage_prog_data *stage_prog_data;
724 struct anv_pipeline_bind_map map;
725 struct brw_cs_prog_key key;
726 uint32_t kernel = NO_KERNEL;
727 unsigned char sha1[20];
729 populate_cs_prog_key(&pipeline->device->info, &key);
731 if (module->size > 0) {
732 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint, spec_info);
733 kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
736 if (module->size == 0 || kernel == NO_KERNEL) {
737 struct brw_cs_prog_data prog_data = { 0, };
738 struct anv_pipeline_binding surface_to_descriptor[256];
739 struct anv_pipeline_binding sampler_to_descriptor[256];
741 map = (struct anv_pipeline_bind_map) {
742 .surface_to_descriptor = surface_to_descriptor,
743 .sampler_to_descriptor = sampler_to_descriptor
746 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
747 MESA_SHADER_COMPUTE, spec_info,
748 &prog_data.base, &map);
750 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
752 anv_fill_binding_table(&prog_data.base, 1);
754 void *mem_ctx = ralloc_context(NULL);
756 if (module->nir == NULL)
757 ralloc_steal(mem_ctx, nir);
760 const unsigned *shader_code =
761 brw_compile_cs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
762 -1, &code_size, NULL);
763 if (shader_code == NULL) {
764 ralloc_free(mem_ctx);
765 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
768 stage_prog_data = &prog_data.base;
769 kernel = anv_pipeline_cache_upload_kernel(cache,
770 module->size > 0 ? sha1 : NULL,
771 shader_code, code_size,
772 &stage_prog_data, sizeof(prog_data),
775 ralloc_free(mem_ctx);
778 pipeline->cs_simd = kernel;
780 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE,
781 stage_prog_data, &map);
788 anv_setup_pipeline_l3_config(struct anv_pipeline *pipeline)
790 const struct brw_device_info *devinfo = &pipeline->device->info;
791 switch (devinfo->gen) {
793 if (devinfo->is_haswell)
794 gen75_setup_pipeline_l3_config(pipeline);
796 gen7_setup_pipeline_l3_config(pipeline);
799 gen8_setup_pipeline_l3_config(pipeline);
802 gen9_setup_pipeline_l3_config(pipeline);
805 unreachable("unsupported gen\n");
810 anv_compute_urb_partition(struct anv_pipeline *pipeline)
812 const struct brw_device_info *devinfo = &pipeline->device->info;
814 bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
815 unsigned vs_size = vs_present ?
816 get_vs_prog_data(pipeline)->base.urb_entry_size : 1;
817 unsigned vs_entry_size_bytes = vs_size * 64;
818 bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
819 unsigned gs_size = gs_present ?
820 get_gs_prog_data(pipeline)->base.urb_entry_size : 1;
821 unsigned gs_entry_size_bytes = gs_size * 64;
823 /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
825 * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
826 * Allocation Size is less than 9 512-bit URB entries.
828 * Similar text exists for GS.
830 unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
831 unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
833 /* URB allocations must be done in 8k chunks. */
834 unsigned chunk_size_bytes = 8192;
836 /* Determine the size of the URB in chunks. */
837 unsigned urb_chunks = pipeline->urb.total_size * 1024 / chunk_size_bytes;
839 /* Reserve space for push constants */
840 unsigned push_constant_kb;
841 if (pipeline->device->info.gen >= 8)
842 push_constant_kb = 32;
843 else if (pipeline->device->info.is_haswell)
844 push_constant_kb = pipeline->device->info.gt == 3 ? 32 : 16;
846 push_constant_kb = 16;
848 unsigned push_constant_bytes = push_constant_kb * 1024;
849 unsigned push_constant_chunks =
850 push_constant_bytes / chunk_size_bytes;
852 /* Initially, assign each stage the minimum amount of URB space it needs,
853 * and make a note of how much additional space it "wants" (the amount of
854 * additional space it could actually make use of).
857 /* VS has a lower limit on the number of URB entries */
859 ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
860 chunk_size_bytes) / chunk_size_bytes;
862 ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
863 chunk_size_bytes) / chunk_size_bytes - vs_chunks;
865 unsigned gs_chunks = 0;
866 unsigned gs_wants = 0;
868 /* There are two constraints on the minimum amount of URB space we can
871 * (1) We need room for at least 2 URB entries, since we always operate
872 * the GS in DUAL_OBJECT mode.
874 * (2) We can't allocate less than nr_gs_entries_granularity.
876 gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
877 chunk_size_bytes) / chunk_size_bytes;
879 ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
880 chunk_size_bytes) / chunk_size_bytes - gs_chunks;
883 /* There should always be enough URB space to satisfy the minimum
884 * requirements of each stage.
886 unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
887 assert(total_needs <= urb_chunks);
889 /* Mete out remaining space (if any) in proportion to "wants". */
890 unsigned total_wants = vs_wants + gs_wants;
891 unsigned remaining_space = urb_chunks - total_needs;
892 if (remaining_space > total_wants)
893 remaining_space = total_wants;
894 if (remaining_space > 0) {
895 unsigned vs_additional = (unsigned)
896 round(vs_wants * (((double) remaining_space) / total_wants));
897 vs_chunks += vs_additional;
898 remaining_space -= vs_additional;
899 gs_chunks += remaining_space;
902 /* Sanity check that we haven't over-allocated. */
903 assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
905 /* Finally, compute the number of entries that can fit in the space
906 * allocated to each stage.
908 unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
909 unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
911 /* Since we rounded up when computing *_wants, this may be slightly more
912 * than the maximum allowed amount, so correct for that.
914 nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
915 nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
917 /* Ensure that we program a multiple of the granularity. */
918 nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
919 nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
921 /* Finally, sanity check to make sure we have at least the minimum number
922 * of entries needed for each stage.
924 assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
926 assert(nr_gs_entries >= 2);
928 /* Lay out the URB in the following order:
933 pipeline->urb.start[MESA_SHADER_VERTEX] = push_constant_chunks;
934 pipeline->urb.size[MESA_SHADER_VERTEX] = vs_size;
935 pipeline->urb.entries[MESA_SHADER_VERTEX] = nr_vs_entries;
937 pipeline->urb.start[MESA_SHADER_GEOMETRY] = push_constant_chunks + vs_chunks;
938 pipeline->urb.size[MESA_SHADER_GEOMETRY] = gs_size;
939 pipeline->urb.entries[MESA_SHADER_GEOMETRY] = nr_gs_entries;
941 pipeline->urb.start[MESA_SHADER_TESS_CTRL] = push_constant_chunks;
942 pipeline->urb.size[MESA_SHADER_TESS_CTRL] = 1;
943 pipeline->urb.entries[MESA_SHADER_TESS_CTRL] = 0;
945 pipeline->urb.start[MESA_SHADER_TESS_EVAL] = push_constant_chunks;
946 pipeline->urb.size[MESA_SHADER_TESS_EVAL] = 1;
947 pipeline->urb.entries[MESA_SHADER_TESS_EVAL] = 0;
951 anv_pipeline_init_dynamic_state(struct anv_pipeline *pipeline,
952 const VkGraphicsPipelineCreateInfo *pCreateInfo)
954 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
955 ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
956 struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
958 pipeline->dynamic_state = default_dynamic_state;
960 if (pCreateInfo->pDynamicState) {
961 /* Remove all of the states that are marked as dynamic */
962 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
963 for (uint32_t s = 0; s < count; s++)
964 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
967 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
969 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
970 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
971 typed_memcpy(dynamic->viewport.viewports,
972 pCreateInfo->pViewportState->pViewports,
973 pCreateInfo->pViewportState->viewportCount);
976 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
977 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
978 typed_memcpy(dynamic->scissor.scissors,
979 pCreateInfo->pViewportState->pScissors,
980 pCreateInfo->pViewportState->scissorCount);
983 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
984 assert(pCreateInfo->pRasterizationState);
985 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
988 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
989 assert(pCreateInfo->pRasterizationState);
990 dynamic->depth_bias.bias =
991 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
992 dynamic->depth_bias.clamp =
993 pCreateInfo->pRasterizationState->depthBiasClamp;
994 dynamic->depth_bias.slope =
995 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
998 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS)) {
999 assert(pCreateInfo->pColorBlendState);
1000 typed_memcpy(dynamic->blend_constants,
1001 pCreateInfo->pColorBlendState->blendConstants, 4);
1004 /* If there is no depthstencil attachment, then don't read
1005 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1006 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1007 * no need to override the depthstencil defaults in
1008 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1010 * From the Vulkan spec (20 Oct 2015, git-aa308cb):
1012 * pDepthStencilState [...] may only be NULL if renderPass and subpass
1013 * specify a subpass that has no depth/stencil attachment.
1015 if (subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
1016 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1017 assert(pCreateInfo->pDepthStencilState);
1018 dynamic->depth_bounds.min =
1019 pCreateInfo->pDepthStencilState->minDepthBounds;
1020 dynamic->depth_bounds.max =
1021 pCreateInfo->pDepthStencilState->maxDepthBounds;
1024 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1025 assert(pCreateInfo->pDepthStencilState);
1026 dynamic->stencil_compare_mask.front =
1027 pCreateInfo->pDepthStencilState->front.compareMask;
1028 dynamic->stencil_compare_mask.back =
1029 pCreateInfo->pDepthStencilState->back.compareMask;
1032 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1033 assert(pCreateInfo->pDepthStencilState);
1034 dynamic->stencil_write_mask.front =
1035 pCreateInfo->pDepthStencilState->front.writeMask;
1036 dynamic->stencil_write_mask.back =
1037 pCreateInfo->pDepthStencilState->back.writeMask;
1040 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1041 assert(pCreateInfo->pDepthStencilState);
1042 dynamic->stencil_reference.front =
1043 pCreateInfo->pDepthStencilState->front.reference;
1044 dynamic->stencil_reference.back =
1045 pCreateInfo->pDepthStencilState->back.reference;
1049 pipeline->dynamic_state_mask = states;
1053 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1055 struct anv_render_pass *renderpass = NULL;
1056 struct anv_subpass *subpass = NULL;
1058 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1059 * present, as explained by the Vulkan (20 Oct 2015, git-aa308cb), Section
1060 * 4.2 Graphics Pipeline.
1062 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1064 renderpass = anv_render_pass_from_handle(info->renderPass);
1067 if (renderpass != &anv_meta_dummy_renderpass) {
1068 assert(info->subpass < renderpass->subpass_count);
1069 subpass = &renderpass->subpasses[info->subpass];
1072 assert(info->stageCount >= 1);
1073 assert(info->pVertexInputState);
1074 assert(info->pInputAssemblyState);
1075 assert(info->pViewportState);
1076 assert(info->pRasterizationState);
1078 if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED)
1079 assert(info->pDepthStencilState);
1081 if (subpass && subpass->color_count > 0)
1082 assert(info->pColorBlendState);
1084 for (uint32_t i = 0; i < info->stageCount; ++i) {
1085 switch (info->pStages[i].stage) {
1086 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1087 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1088 assert(info->pTessellationState);
1097 anv_pipeline_init(struct anv_pipeline *pipeline,
1098 struct anv_device *device,
1099 struct anv_pipeline_cache *cache,
1100 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1101 const struct anv_graphics_pipeline_create_info *extra,
1102 const VkAllocationCallbacks *alloc)
1107 anv_pipeline_validate_create_info(pCreateInfo);
1111 alloc = &device->alloc;
1113 pipeline->device = device;
1114 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
1116 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1117 if (result != VK_SUCCESS)
1120 pipeline->batch.alloc = alloc;
1121 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1122 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1123 pipeline->batch.relocs = &pipeline->batch_relocs;
1125 anv_pipeline_init_dynamic_state(pipeline, pCreateInfo);
1127 pipeline->use_repclear = extra && extra->use_repclear;
1129 pipeline->needs_data_cache = false;
1131 /* When we free the pipeline, we detect stages based on the NULL status
1132 * of various prog_data pointers. Make them NULL by default.
1134 memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
1135 memset(pipeline->scratch_start, 0, sizeof(pipeline->scratch_start));
1136 memset(pipeline->bindings, 0, sizeof(pipeline->bindings));
1138 pipeline->vs_simd8 = NO_KERNEL;
1139 pipeline->vs_vec4 = NO_KERNEL;
1140 pipeline->gs_kernel = NO_KERNEL;
1141 pipeline->ps_ksp0 = NO_KERNEL;
1143 pipeline->active_stages = 0;
1144 pipeline->total_scratch = 0;
1146 const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, };
1147 struct anv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
1148 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
1149 gl_shader_stage stage = ffs(pCreateInfo->pStages[i].stage) - 1;
1150 pStages[stage] = &pCreateInfo->pStages[i];
1151 modules[stage] = anv_shader_module_from_handle(pStages[stage]->module);
1154 if (modules[MESA_SHADER_VERTEX]) {
1155 anv_pipeline_compile_vs(pipeline, cache, pCreateInfo,
1156 modules[MESA_SHADER_VERTEX],
1157 pStages[MESA_SHADER_VERTEX]->pName,
1158 pStages[MESA_SHADER_VERTEX]->pSpecializationInfo);
1161 if (modules[MESA_SHADER_TESS_CTRL] || modules[MESA_SHADER_TESS_EVAL])
1162 anv_finishme("no tessellation support");
1164 if (modules[MESA_SHADER_GEOMETRY]) {
1165 anv_pipeline_compile_gs(pipeline, cache, pCreateInfo,
1166 modules[MESA_SHADER_GEOMETRY],
1167 pStages[MESA_SHADER_GEOMETRY]->pName,
1168 pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo);
1171 if (modules[MESA_SHADER_FRAGMENT]) {
1172 anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, extra,
1173 modules[MESA_SHADER_FRAGMENT],
1174 pStages[MESA_SHADER_FRAGMENT]->pName,
1175 pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
1178 if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
1179 /* Vertex is only optional if disable_vs is set */
1180 assert(extra->disable_vs);
1183 anv_setup_pipeline_l3_config(pipeline);
1184 anv_compute_urb_partition(pipeline);
1186 const VkPipelineVertexInputStateCreateInfo *vi_info =
1187 pCreateInfo->pVertexInputState;
1189 uint64_t inputs_read;
1190 if (extra && extra->disable_vs) {
1191 /* If the VS is disabled, just assume the user knows what they're
1192 * doing and apply the layout blindly. This can only come from
1193 * meta, so this *should* be safe.
1195 inputs_read = ~0ull;
1197 inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1200 pipeline->vb_used = 0;
1201 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1202 const VkVertexInputAttributeDescription *desc =
1203 &vi_info->pVertexAttributeDescriptions[i];
1205 if (inputs_read & (1 << (VERT_ATTRIB_GENERIC0 + desc->location)))
1206 pipeline->vb_used |= 1 << desc->binding;
1209 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1210 const VkVertexInputBindingDescription *desc =
1211 &vi_info->pVertexBindingDescriptions[i];
1213 pipeline->binding_stride[desc->binding] = desc->stride;
1215 /* Step rate is programmed per vertex element (attribute), not
1216 * binding. Set up a map of which bindings step per instance, for
1217 * reference by vertex element setup. */
1218 switch (desc->inputRate) {
1220 case VK_VERTEX_INPUT_RATE_VERTEX:
1221 pipeline->instancing_enable[desc->binding] = false;
1223 case VK_VERTEX_INPUT_RATE_INSTANCE:
1224 pipeline->instancing_enable[desc->binding] = true;
1229 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1230 pCreateInfo->pInputAssemblyState;
1231 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1232 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1234 if (extra && extra->use_rectlist)
1235 pipeline->topology = _3DPRIM_RECTLIST;
1237 while (anv_block_pool_size(&device->scratch_block_pool) <
1238 pipeline->total_scratch)
1239 anv_block_pool_alloc(&device->scratch_block_pool);
1245 anv_graphics_pipeline_create(
1247 VkPipelineCache _cache,
1248 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1249 const struct anv_graphics_pipeline_create_info *extra,
1250 const VkAllocationCallbacks *pAllocator,
1251 VkPipeline *pPipeline)
1253 ANV_FROM_HANDLE(anv_device, device, _device);
1254 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
1257 cache = &device->default_pipeline_cache;
1259 switch (device->info.gen) {
1261 if (device->info.is_haswell)
1262 return gen75_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1264 return gen7_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1266 return gen8_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1268 return gen9_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1270 unreachable("unsupported gen\n");
1274 VkResult anv_CreateGraphicsPipelines(
1276 VkPipelineCache pipelineCache,
1278 const VkGraphicsPipelineCreateInfo* pCreateInfos,
1279 const VkAllocationCallbacks* pAllocator,
1280 VkPipeline* pPipelines)
1282 VkResult result = VK_SUCCESS;
1285 for (; i < count; i++) {
1286 result = anv_graphics_pipeline_create(_device,
1289 NULL, pAllocator, &pPipelines[i]);
1290 if (result != VK_SUCCESS) {
1291 for (unsigned j = 0; j < i; j++) {
1292 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
1302 static VkResult anv_compute_pipeline_create(
1304 VkPipelineCache _cache,
1305 const VkComputePipelineCreateInfo* pCreateInfo,
1306 const VkAllocationCallbacks* pAllocator,
1307 VkPipeline* pPipeline)
1309 ANV_FROM_HANDLE(anv_device, device, _device);
1310 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
1313 cache = &device->default_pipeline_cache;
1315 switch (device->info.gen) {
1317 if (device->info.is_haswell)
1318 return gen75_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1320 return gen7_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1322 return gen8_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1324 return gen9_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1326 unreachable("unsupported gen\n");
1330 VkResult anv_CreateComputePipelines(
1332 VkPipelineCache pipelineCache,
1334 const VkComputePipelineCreateInfo* pCreateInfos,
1335 const VkAllocationCallbacks* pAllocator,
1336 VkPipeline* pPipelines)
1338 VkResult result = VK_SUCCESS;
1341 for (; i < count; i++) {
1342 result = anv_compute_pipeline_create(_device, pipelineCache,
1344 pAllocator, &pPipelines[i]);
1345 if (result != VK_SUCCESS) {
1346 for (unsigned j = 0; j < i; j++) {
1347 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);