2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
31 #include "anv_private.h"
34 #include "spirv/nir_spirv.h"
36 /* Needed for SWIZZLE macros */
37 #include "program/prog_instruction.h"
41 VkResult anv_CreateShaderModule(
43 const VkShaderModuleCreateInfo* pCreateInfo,
44 const VkAllocationCallbacks* pAllocator,
45 VkShaderModule* pShaderModule)
47 ANV_FROM_HANDLE(anv_device, device, _device);
48 struct anv_shader_module *module;
50 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
51 assert(pCreateInfo->flags == 0);
53 module = anv_alloc2(&device->alloc, pAllocator,
54 sizeof(*module) + pCreateInfo->codeSize, 8,
55 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
57 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
60 module->size = pCreateInfo->codeSize;
61 memcpy(module->data, pCreateInfo->pCode, module->size);
63 _mesa_sha1_compute(module->data, module->size, module->sha1);
65 *pShaderModule = anv_shader_module_to_handle(module);
70 void anv_DestroyShaderModule(
72 VkShaderModule _module,
73 const VkAllocationCallbacks* pAllocator)
75 ANV_FROM_HANDLE(anv_device, device, _device);
76 ANV_FROM_HANDLE(anv_shader_module, module, _module);
78 anv_free2(&device->alloc, pAllocator, module);
81 #define SPIR_V_MAGIC_NUMBER 0x07230203
83 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
84 * we can't do that yet because we don't have the ability to copy nir.
87 anv_shader_compile_to_nir(struct anv_device *device,
88 struct anv_shader_module *module,
89 const char *entrypoint_name,
90 gl_shader_stage stage,
91 const VkSpecializationInfo *spec_info)
93 if (strcmp(entrypoint_name, "main") != 0) {
94 anv_finishme("Multiple shaders per module not really supported");
97 const struct brw_compiler *compiler =
98 device->instance->physicalDevice.compiler;
99 const nir_shader_compiler_options *nir_options =
100 compiler->glsl_compiler_options[stage].NirOptions;
103 nir_function *entry_point;
105 /* Some things such as our meta clear/blit code will give us a NIR
106 * shader directly. In that case, we just ignore the SPIR-V entirely
107 * and just use the NIR shader */
109 nir->options = nir_options;
110 nir_validate_shader(nir);
112 assert(exec_list_length(&nir->functions) == 1);
113 struct exec_node *node = exec_list_get_head(&nir->functions);
114 entry_point = exec_node_data(nir_function, node, node);
116 uint32_t *spirv = (uint32_t *) module->data;
117 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
118 assert(module->size % 4 == 0);
120 uint32_t num_spec_entries = 0;
121 struct nir_spirv_specialization *spec_entries = NULL;
122 if (spec_info && spec_info->mapEntryCount > 0) {
123 num_spec_entries = spec_info->mapEntryCount;
124 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
125 for (uint32_t i = 0; i < num_spec_entries; i++) {
126 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
127 const void *data = spec_info->pData + entry.offset;
128 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
130 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
131 spec_entries[i].data = *(const uint32_t *)data;
135 entry_point = spirv_to_nir(spirv, module->size / 4,
136 spec_entries, num_spec_entries,
137 stage, entrypoint_name, nir_options);
138 nir = entry_point->shader;
139 assert(nir->stage == stage);
140 nir_validate_shader(nir);
144 if (stage == MESA_SHADER_FRAGMENT) {
145 nir_lower_wpos_center(nir);
146 nir_validate_shader(nir);
149 nir_lower_returns(nir);
150 nir_validate_shader(nir);
152 nir_inline_functions(nir);
153 nir_validate_shader(nir);
155 /* Pick off the single entrypoint that we want */
156 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
157 if (func != entry_point)
158 exec_node_remove(&func->node);
160 assert(exec_list_length(&nir->functions) == 1);
161 entry_point->name = ralloc_strdup(entry_point, "main");
163 nir_remove_dead_variables(nir, nir_var_shader_in);
164 nir_remove_dead_variables(nir, nir_var_shader_out);
165 nir_remove_dead_variables(nir, nir_var_system_value);
166 nir_validate_shader(nir);
168 nir_propagate_invariant(nir);
169 nir_validate_shader(nir);
171 nir_lower_io_to_temporaries(entry_point->shader, entry_point, true, false);
173 nir_lower_system_values(nir);
174 nir_validate_shader(nir);
177 /* Vulkan uses the separate-shader linking model */
178 nir->info.separate_shader = true;
180 nir = brw_preprocess_nir(compiler, nir);
182 nir_shader_gather_info(nir, entry_point->impl);
184 nir_variable_mode indirect_mask = 0;
185 if (compiler->glsl_compiler_options[stage].EmitNoIndirectInput)
186 indirect_mask |= nir_var_shader_in;
187 if (compiler->glsl_compiler_options[stage].EmitNoIndirectTemp)
188 indirect_mask |= nir_var_local;
190 nir_lower_indirect_derefs(nir, indirect_mask);
195 void anv_DestroyPipeline(
197 VkPipeline _pipeline,
198 const VkAllocationCallbacks* pAllocator)
200 ANV_FROM_HANDLE(anv_device, device, _device);
201 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
203 anv_reloc_list_finish(&pipeline->batch_relocs,
204 pAllocator ? pAllocator : &device->alloc);
205 if (pipeline->blend_state.map)
206 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
207 anv_free2(&device->alloc, pAllocator, pipeline);
210 static const uint32_t vk_to_gen_primitive_type[] = {
211 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
212 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
213 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
214 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
215 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
216 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
217 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
218 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
219 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
220 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
221 /* [VK_PRIMITIVE_TOPOLOGY_PATCH_LIST] = _3DPRIM_PATCHLIST_1 */
225 populate_sampler_prog_key(const struct brw_device_info *devinfo,
226 struct brw_sampler_prog_key_data *key)
228 /* XXX: Handle texture swizzle on HSW- */
229 for (int i = 0; i < MAX_SAMPLERS; i++) {
230 /* Assume color sampler, no swizzling. (Works for BDW+) */
231 key->swizzles[i] = SWIZZLE_XYZW;
236 populate_vs_prog_key(const struct brw_device_info *devinfo,
237 struct brw_vs_prog_key *key)
239 memset(key, 0, sizeof(*key));
241 populate_sampler_prog_key(devinfo, &key->tex);
243 /* XXX: Handle vertex input work-arounds */
245 /* XXX: Handle sampler_prog_key */
249 populate_gs_prog_key(const struct brw_device_info *devinfo,
250 struct brw_gs_prog_key *key)
252 memset(key, 0, sizeof(*key));
254 populate_sampler_prog_key(devinfo, &key->tex);
258 populate_wm_prog_key(const struct brw_device_info *devinfo,
259 const VkGraphicsPipelineCreateInfo *info,
260 const struct anv_graphics_pipeline_create_info *extra,
261 struct brw_wm_prog_key *key)
263 ANV_FROM_HANDLE(anv_render_pass, render_pass, info->renderPass);
265 memset(key, 0, sizeof(*key));
267 populate_sampler_prog_key(devinfo, &key->tex);
269 /* TODO: Fill out key->input_slots_valid */
271 /* Vulkan doesn't specify a default */
272 key->high_quality_derivatives = false;
274 /* XXX Vulkan doesn't appear to specify */
275 key->clamp_fragment_color = false;
277 if (extra && extra->color_attachment_count >= 0) {
278 key->nr_color_regions = extra->color_attachment_count;
280 key->nr_color_regions =
281 render_pass->subpasses[info->subpass].color_count;
284 key->replicate_alpha = key->nr_color_regions > 1 &&
285 info->pMultisampleState &&
286 info->pMultisampleState->alphaToCoverageEnable;
288 if (info->pMultisampleState && info->pMultisampleState->rasterizationSamples > 1) {
289 /* We should probably pull this out of the shader, but it's fairly
290 * harmless to compute it and then let dead-code take care of it.
292 key->persample_interp =
293 (info->pMultisampleState->minSampleShading *
294 info->pMultisampleState->rasterizationSamples) > 1;
295 key->multisample_fbo = true;
300 populate_cs_prog_key(const struct brw_device_info *devinfo,
301 struct brw_cs_prog_key *key)
303 memset(key, 0, sizeof(*key));
305 populate_sampler_prog_key(devinfo, &key->tex);
309 anv_pipeline_compile(struct anv_pipeline *pipeline,
310 struct anv_shader_module *module,
311 const char *entrypoint,
312 gl_shader_stage stage,
313 const VkSpecializationInfo *spec_info,
314 struct brw_stage_prog_data *prog_data,
315 struct anv_pipeline_bind_map *map)
317 nir_shader *nir = anv_shader_compile_to_nir(pipeline->device,
318 module, entrypoint, stage,
323 anv_nir_lower_push_constants(nir);
325 /* Figure out the number of parameters */
326 prog_data->nr_params = 0;
328 if (nir->num_uniforms > 0) {
329 /* If the shader uses any push constants at all, we'll just give
330 * them the maximum possible number
332 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
335 if (pipeline->layout && pipeline->layout->stage[stage].has_dynamic_offsets)
336 prog_data->nr_params += MAX_DYNAMIC_BUFFERS * 2;
338 if (nir->info.num_images > 0) {
339 prog_data->nr_params += nir->info.num_images * BRW_IMAGE_PARAM_SIZE;
340 pipeline->needs_data_cache = true;
343 if (stage == MESA_SHADER_COMPUTE)
344 ((struct brw_cs_prog_data *)prog_data)->thread_local_id_index =
345 prog_data->nr_params++; /* The CS Thread ID uniform */
347 if (nir->info.num_ssbos > 0)
348 pipeline->needs_data_cache = true;
350 if (prog_data->nr_params > 0) {
351 /* XXX: I think we're leaking this */
352 prog_data->param = (const union gl_constant_value **)
353 malloc(prog_data->nr_params * sizeof(union gl_constant_value *));
355 /* We now set the param values to be offsets into a
356 * anv_push_constant_data structure. Since the compiler doesn't
357 * actually dereference any of the gl_constant_value pointers in the
358 * params array, it doesn't really matter what we put here.
360 struct anv_push_constants *null_data = NULL;
361 if (nir->num_uniforms > 0) {
362 /* Fill out the push constants section of the param array */
363 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++)
364 prog_data->param[i] = (const union gl_constant_value *)
365 &null_data->client_data[i * sizeof(float)];
369 /* Set up dynamic offsets */
370 anv_nir_apply_dynamic_offsets(pipeline, nir, prog_data);
372 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
373 if (pipeline->layout)
374 anv_nir_apply_pipeline_layout(pipeline, nir, prog_data, map);
376 /* nir_lower_io will only handle the push constants; we need to set this
377 * to the full number of possible uniforms.
379 nir->num_uniforms = prog_data->nr_params * 4;
385 anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
387 prog_data->binding_table.size_bytes = 0;
388 prog_data->binding_table.texture_start = bias;
389 prog_data->binding_table.ubo_start = bias;
390 prog_data->binding_table.ssbo_start = bias;
391 prog_data->binding_table.image_start = bias;
395 anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
396 gl_shader_stage stage,
397 const struct brw_stage_prog_data *prog_data,
398 struct anv_pipeline_bind_map *map)
400 pipeline->prog_data[stage] = prog_data;
401 pipeline->active_stages |= mesa_to_vk_shader_stage(stage);
402 pipeline->bindings[stage] = *map;
406 anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
407 struct anv_pipeline_cache *cache,
408 const VkGraphicsPipelineCreateInfo *info,
409 struct anv_shader_module *module,
410 const char *entrypoint,
411 const VkSpecializationInfo *spec_info)
413 const struct brw_compiler *compiler =
414 pipeline->device->instance->physicalDevice.compiler;
415 const struct brw_stage_prog_data *stage_prog_data;
416 struct anv_pipeline_bind_map map;
417 struct brw_vs_prog_key key;
418 uint32_t kernel = NO_KERNEL;
419 unsigned char sha1[20];
421 populate_vs_prog_key(&pipeline->device->info, &key);
423 if (module->size > 0) {
424 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
425 pipeline->layout, spec_info);
426 kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
429 if (kernel == NO_KERNEL) {
430 struct brw_vs_prog_data prog_data = { 0, };
431 struct anv_pipeline_binding surface_to_descriptor[256];
432 struct anv_pipeline_binding sampler_to_descriptor[256];
434 map = (struct anv_pipeline_bind_map) {
435 .surface_to_descriptor = surface_to_descriptor,
436 .sampler_to_descriptor = sampler_to_descriptor
439 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
440 MESA_SHADER_VERTEX, spec_info,
441 &prog_data.base.base, &map);
443 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
445 anv_fill_binding_table(&prog_data.base.base, 0);
447 void *mem_ctx = ralloc_context(NULL);
449 if (module->nir == NULL)
450 ralloc_steal(mem_ctx, nir);
452 prog_data.inputs_read = nir->info.inputs_read;
454 brw_compute_vue_map(&pipeline->device->info,
455 &prog_data.base.vue_map,
456 nir->info.outputs_written,
457 nir->info.separate_shader);
460 const unsigned *shader_code =
461 brw_compile_vs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
462 NULL, false, -1, &code_size, NULL);
463 if (shader_code == NULL) {
464 ralloc_free(mem_ctx);
465 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
468 stage_prog_data = &prog_data.base.base;
469 kernel = anv_pipeline_cache_upload_kernel(cache,
470 module->size > 0 ? sha1 : NULL,
471 shader_code, code_size,
472 &stage_prog_data, sizeof(prog_data),
474 ralloc_free(mem_ctx);
477 const struct brw_vs_prog_data *vs_prog_data =
478 (const struct brw_vs_prog_data *) stage_prog_data;
480 if (vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8) {
481 pipeline->vs_simd8 = kernel;
482 pipeline->vs_vec4 = NO_KERNEL;
484 pipeline->vs_simd8 = NO_KERNEL;
485 pipeline->vs_vec4 = kernel;
488 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX,
489 stage_prog_data, &map);
495 anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
496 struct anv_pipeline_cache *cache,
497 const VkGraphicsPipelineCreateInfo *info,
498 struct anv_shader_module *module,
499 const char *entrypoint,
500 const VkSpecializationInfo *spec_info)
502 const struct brw_compiler *compiler =
503 pipeline->device->instance->physicalDevice.compiler;
504 const struct brw_stage_prog_data *stage_prog_data;
505 struct anv_pipeline_bind_map map;
506 struct brw_gs_prog_key key;
507 uint32_t kernel = NO_KERNEL;
508 unsigned char sha1[20];
510 populate_gs_prog_key(&pipeline->device->info, &key);
512 if (module->size > 0) {
513 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
514 pipeline->layout, spec_info);
515 kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
518 if (kernel == NO_KERNEL) {
519 struct brw_gs_prog_data prog_data = { 0, };
520 struct anv_pipeline_binding surface_to_descriptor[256];
521 struct anv_pipeline_binding sampler_to_descriptor[256];
523 map = (struct anv_pipeline_bind_map) {
524 .surface_to_descriptor = surface_to_descriptor,
525 .sampler_to_descriptor = sampler_to_descriptor
528 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
529 MESA_SHADER_GEOMETRY, spec_info,
530 &prog_data.base.base, &map);
532 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
534 anv_fill_binding_table(&prog_data.base.base, 0);
536 void *mem_ctx = ralloc_context(NULL);
538 if (module->nir == NULL)
539 ralloc_steal(mem_ctx, nir);
541 brw_compute_vue_map(&pipeline->device->info,
542 &prog_data.base.vue_map,
543 nir->info.outputs_written,
544 nir->info.separate_shader);
547 const unsigned *shader_code =
548 brw_compile_gs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
549 NULL, -1, &code_size, NULL);
550 if (shader_code == NULL) {
551 ralloc_free(mem_ctx);
552 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
556 stage_prog_data = &prog_data.base.base;
557 kernel = anv_pipeline_cache_upload_kernel(cache,
558 module->size > 0 ? sha1 : NULL,
559 shader_code, code_size,
560 &stage_prog_data, sizeof(prog_data),
563 ralloc_free(mem_ctx);
566 pipeline->gs_kernel = kernel;
568 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY,
569 stage_prog_data, &map);
575 anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
576 struct anv_pipeline_cache *cache,
577 const VkGraphicsPipelineCreateInfo *info,
578 const struct anv_graphics_pipeline_create_info *extra,
579 struct anv_shader_module *module,
580 const char *entrypoint,
581 const VkSpecializationInfo *spec_info)
583 const struct brw_compiler *compiler =
584 pipeline->device->instance->physicalDevice.compiler;
585 const struct brw_stage_prog_data *stage_prog_data;
586 struct anv_pipeline_bind_map map;
587 struct brw_wm_prog_key key;
588 unsigned char sha1[20];
590 populate_wm_prog_key(&pipeline->device->info, info, extra, &key);
592 if (module->size > 0) {
593 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
594 pipeline->layout, spec_info);
596 anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
599 if (pipeline->ps_ksp0 == NO_KERNEL) {
600 struct brw_wm_prog_data prog_data = { 0, };
601 struct anv_pipeline_binding surface_to_descriptor[256];
602 struct anv_pipeline_binding sampler_to_descriptor[256];
604 map = (struct anv_pipeline_bind_map) {
605 .surface_to_descriptor = surface_to_descriptor + 8,
606 .sampler_to_descriptor = sampler_to_descriptor
609 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
610 MESA_SHADER_FRAGMENT, spec_info,
611 &prog_data.base, &map);
613 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
615 unsigned num_rts = 0;
616 struct anv_pipeline_binding rt_bindings[8];
617 nir_function_impl *impl = nir_shader_get_entrypoint(nir)->impl;
618 nir_foreach_variable_safe(var, &nir->outputs) {
619 if (var->data.location < FRAG_RESULT_DATA0)
622 unsigned rt = var->data.location - FRAG_RESULT_DATA0;
623 if (rt >= key.nr_color_regions) {
624 /* Out-of-bounds, throw it away */
625 var->data.mode = nir_var_local;
626 exec_node_remove(&var->node);
627 exec_list_push_tail(&impl->locals, &var->node);
631 /* Give it a new, compacted, location */
632 var->data.location = FRAG_RESULT_DATA0 + num_rts;
635 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
636 assert(num_rts + array_len <= 8);
638 for (unsigned i = 0; i < array_len; i++) {
639 rt_bindings[num_rts] = (struct anv_pipeline_binding) {
640 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
646 num_rts += array_len;
649 if (pipeline->use_repclear) {
650 assert(num_rts == 1);
651 key.nr_color_regions = 1;
655 /* If we have no render targets, we need a null render target */
656 rt_bindings[0] = (struct anv_pipeline_binding) {
657 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
664 assert(num_rts <= 8);
665 map.surface_to_descriptor -= num_rts;
666 map.surface_count += num_rts;
667 assert(map.surface_count <= 256);
668 memcpy(map.surface_to_descriptor, rt_bindings,
669 num_rts * sizeof(*rt_bindings));
671 anv_fill_binding_table(&prog_data.base, num_rts);
673 void *mem_ctx = ralloc_context(NULL);
675 if (module->nir == NULL)
676 ralloc_steal(mem_ctx, nir);
679 const unsigned *shader_code =
680 brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
681 NULL, -1, -1, true, pipeline->use_repclear,
683 if (shader_code == NULL) {
684 ralloc_free(mem_ctx);
685 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
688 stage_prog_data = &prog_data.base;
690 anv_pipeline_cache_upload_kernel(cache,
691 module->size > 0 ? sha1 : NULL,
692 shader_code, code_size,
693 &stage_prog_data, sizeof(prog_data),
696 ralloc_free(mem_ctx);
699 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT,
700 stage_prog_data, &map);
706 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
707 struct anv_pipeline_cache *cache,
708 const VkComputePipelineCreateInfo *info,
709 struct anv_shader_module *module,
710 const char *entrypoint,
711 const VkSpecializationInfo *spec_info)
713 const struct brw_compiler *compiler =
714 pipeline->device->instance->physicalDevice.compiler;
715 const struct brw_stage_prog_data *stage_prog_data;
716 struct anv_pipeline_bind_map map;
717 struct brw_cs_prog_key key;
718 uint32_t kernel = NO_KERNEL;
719 unsigned char sha1[20];
721 populate_cs_prog_key(&pipeline->device->info, &key);
723 if (module->size > 0) {
724 anv_hash_shader(sha1, &key, sizeof(key), module, entrypoint,
725 pipeline->layout, spec_info);
726 kernel = anv_pipeline_cache_search(cache, sha1, &stage_prog_data, &map);
729 if (module->size == 0 || kernel == NO_KERNEL) {
730 struct brw_cs_prog_data prog_data = { 0, };
731 struct anv_pipeline_binding surface_to_descriptor[256];
732 struct anv_pipeline_binding sampler_to_descriptor[256];
734 map = (struct anv_pipeline_bind_map) {
735 .surface_to_descriptor = surface_to_descriptor,
736 .sampler_to_descriptor = sampler_to_descriptor
739 nir_shader *nir = anv_pipeline_compile(pipeline, module, entrypoint,
740 MESA_SHADER_COMPUTE, spec_info,
741 &prog_data.base, &map);
743 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
745 anv_fill_binding_table(&prog_data.base, 1);
747 void *mem_ctx = ralloc_context(NULL);
749 if (module->nir == NULL)
750 ralloc_steal(mem_ctx, nir);
753 const unsigned *shader_code =
754 brw_compile_cs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
755 -1, &code_size, NULL);
756 if (shader_code == NULL) {
757 ralloc_free(mem_ctx);
758 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
761 stage_prog_data = &prog_data.base;
762 kernel = anv_pipeline_cache_upload_kernel(cache,
763 module->size > 0 ? sha1 : NULL,
764 shader_code, code_size,
765 &stage_prog_data, sizeof(prog_data),
768 ralloc_free(mem_ctx);
771 pipeline->cs_simd = kernel;
773 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE,
774 stage_prog_data, &map);
781 anv_setup_pipeline_l3_config(struct anv_pipeline *pipeline)
783 const struct brw_device_info *devinfo = &pipeline->device->info;
784 switch (devinfo->gen) {
786 if (devinfo->is_haswell)
787 gen75_setup_pipeline_l3_config(pipeline);
789 gen7_setup_pipeline_l3_config(pipeline);
792 gen8_setup_pipeline_l3_config(pipeline);
795 gen9_setup_pipeline_l3_config(pipeline);
798 unreachable("unsupported gen\n");
803 anv_compute_urb_partition(struct anv_pipeline *pipeline)
805 const struct brw_device_info *devinfo = &pipeline->device->info;
807 bool vs_present = pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT;
808 unsigned vs_size = vs_present ?
809 get_vs_prog_data(pipeline)->base.urb_entry_size : 1;
810 unsigned vs_entry_size_bytes = vs_size * 64;
811 bool gs_present = pipeline->active_stages & VK_SHADER_STAGE_GEOMETRY_BIT;
812 unsigned gs_size = gs_present ?
813 get_gs_prog_data(pipeline)->base.urb_entry_size : 1;
814 unsigned gs_entry_size_bytes = gs_size * 64;
816 /* From p35 of the Ivy Bridge PRM (section 1.7.1: 3DSTATE_URB_GS):
818 * VS Number of URB Entries must be divisible by 8 if the VS URB Entry
819 * Allocation Size is less than 9 512-bit URB entries.
821 * Similar text exists for GS.
823 unsigned vs_granularity = (vs_size < 9) ? 8 : 1;
824 unsigned gs_granularity = (gs_size < 9) ? 8 : 1;
826 /* URB allocations must be done in 8k chunks. */
827 unsigned chunk_size_bytes = 8192;
829 /* Determine the size of the URB in chunks. */
830 unsigned urb_chunks = pipeline->urb.total_size * 1024 / chunk_size_bytes;
832 /* Reserve space for push constants */
833 unsigned push_constant_kb;
834 if (pipeline->device->info.gen >= 8)
835 push_constant_kb = 32;
836 else if (pipeline->device->info.is_haswell)
837 push_constant_kb = pipeline->device->info.gt == 3 ? 32 : 16;
839 push_constant_kb = 16;
841 unsigned push_constant_bytes = push_constant_kb * 1024;
842 unsigned push_constant_chunks =
843 push_constant_bytes / chunk_size_bytes;
845 /* Initially, assign each stage the minimum amount of URB space it needs,
846 * and make a note of how much additional space it "wants" (the amount of
847 * additional space it could actually make use of).
850 /* VS has a lower limit on the number of URB entries */
852 ALIGN(devinfo->urb.min_vs_entries * vs_entry_size_bytes,
853 chunk_size_bytes) / chunk_size_bytes;
855 ALIGN(devinfo->urb.max_vs_entries * vs_entry_size_bytes,
856 chunk_size_bytes) / chunk_size_bytes - vs_chunks;
858 unsigned gs_chunks = 0;
859 unsigned gs_wants = 0;
861 /* There are two constraints on the minimum amount of URB space we can
864 * (1) We need room for at least 2 URB entries, since we always operate
865 * the GS in DUAL_OBJECT mode.
867 * (2) We can't allocate less than nr_gs_entries_granularity.
869 gs_chunks = ALIGN(MAX2(gs_granularity, 2) * gs_entry_size_bytes,
870 chunk_size_bytes) / chunk_size_bytes;
872 ALIGN(devinfo->urb.max_gs_entries * gs_entry_size_bytes,
873 chunk_size_bytes) / chunk_size_bytes - gs_chunks;
876 /* There should always be enough URB space to satisfy the minimum
877 * requirements of each stage.
879 unsigned total_needs = push_constant_chunks + vs_chunks + gs_chunks;
880 assert(total_needs <= urb_chunks);
882 /* Mete out remaining space (if any) in proportion to "wants". */
883 unsigned total_wants = vs_wants + gs_wants;
884 unsigned remaining_space = urb_chunks - total_needs;
885 if (remaining_space > total_wants)
886 remaining_space = total_wants;
887 if (remaining_space > 0) {
888 unsigned vs_additional = (unsigned)
889 round(vs_wants * (((double) remaining_space) / total_wants));
890 vs_chunks += vs_additional;
891 remaining_space -= vs_additional;
892 gs_chunks += remaining_space;
895 /* Sanity check that we haven't over-allocated. */
896 assert(push_constant_chunks + vs_chunks + gs_chunks <= urb_chunks);
898 /* Finally, compute the number of entries that can fit in the space
899 * allocated to each stage.
901 unsigned nr_vs_entries = vs_chunks * chunk_size_bytes / vs_entry_size_bytes;
902 unsigned nr_gs_entries = gs_chunks * chunk_size_bytes / gs_entry_size_bytes;
904 /* Since we rounded up when computing *_wants, this may be slightly more
905 * than the maximum allowed amount, so correct for that.
907 nr_vs_entries = MIN2(nr_vs_entries, devinfo->urb.max_vs_entries);
908 nr_gs_entries = MIN2(nr_gs_entries, devinfo->urb.max_gs_entries);
910 /* Ensure that we program a multiple of the granularity. */
911 nr_vs_entries = ROUND_DOWN_TO(nr_vs_entries, vs_granularity);
912 nr_gs_entries = ROUND_DOWN_TO(nr_gs_entries, gs_granularity);
914 /* Finally, sanity check to make sure we have at least the minimum number
915 * of entries needed for each stage.
917 assert(nr_vs_entries >= devinfo->urb.min_vs_entries);
919 assert(nr_gs_entries >= 2);
921 /* Lay out the URB in the following order:
926 pipeline->urb.start[MESA_SHADER_VERTEX] = push_constant_chunks;
927 pipeline->urb.size[MESA_SHADER_VERTEX] = vs_size;
928 pipeline->urb.entries[MESA_SHADER_VERTEX] = nr_vs_entries;
930 pipeline->urb.start[MESA_SHADER_GEOMETRY] = push_constant_chunks + vs_chunks;
931 pipeline->urb.size[MESA_SHADER_GEOMETRY] = gs_size;
932 pipeline->urb.entries[MESA_SHADER_GEOMETRY] = nr_gs_entries;
934 pipeline->urb.start[MESA_SHADER_TESS_CTRL] = push_constant_chunks;
935 pipeline->urb.size[MESA_SHADER_TESS_CTRL] = 1;
936 pipeline->urb.entries[MESA_SHADER_TESS_CTRL] = 0;
938 pipeline->urb.start[MESA_SHADER_TESS_EVAL] = push_constant_chunks;
939 pipeline->urb.size[MESA_SHADER_TESS_EVAL] = 1;
940 pipeline->urb.entries[MESA_SHADER_TESS_EVAL] = 0;
944 * Copy pipeline state not marked as dynamic.
945 * Dynamic state is pipeline state which hasn't been provided at pipeline
946 * creation time, but is dynamically provided afterwards using various
947 * vkCmdSet* functions.
949 * The set of state considered "non_dynamic" is determined by the pieces of
950 * state that have their corresponding VkDynamicState enums omitted from
951 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
953 * @param[out] pipeline Destination non_dynamic state.
954 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
957 copy_non_dynamic_state(struct anv_pipeline *pipeline,
958 const VkGraphicsPipelineCreateInfo *pCreateInfo)
960 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
961 ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
962 struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
964 pipeline->dynamic_state = default_dynamic_state;
966 if (pCreateInfo->pDynamicState) {
967 /* Remove all of the states that are marked as dynamic */
968 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
969 for (uint32_t s = 0; s < count; s++)
970 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
973 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
975 /* Section 9.2 of the Vulkan 1.0.15 spec says:
977 * pViewportState is [...] NULL if the pipeline
978 * has rasterization disabled.
980 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
981 assert(pCreateInfo->pViewportState);
983 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
984 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
985 typed_memcpy(dynamic->viewport.viewports,
986 pCreateInfo->pViewportState->pViewports,
987 pCreateInfo->pViewportState->viewportCount);
990 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
991 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
992 typed_memcpy(dynamic->scissor.scissors,
993 pCreateInfo->pViewportState->pScissors,
994 pCreateInfo->pViewportState->scissorCount);
998 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
999 assert(pCreateInfo->pRasterizationState);
1000 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1003 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
1004 assert(pCreateInfo->pRasterizationState);
1005 dynamic->depth_bias.bias =
1006 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1007 dynamic->depth_bias.clamp =
1008 pCreateInfo->pRasterizationState->depthBiasClamp;
1009 dynamic->depth_bias.slope =
1010 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1013 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1015 * pColorBlendState is [...] NULL if the pipeline has rasterization
1016 * disabled or if the subpass of the render pass the pipeline is
1017 * created against does not use any color attachments.
1019 bool uses_color_att = false;
1020 for (unsigned i = 0; i < subpass->color_count; ++i) {
1021 if (subpass->color_attachments[i] != VK_ATTACHMENT_UNUSED) {
1022 uses_color_att = true;
1027 if (uses_color_att &&
1028 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1029 assert(pCreateInfo->pColorBlendState);
1031 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
1032 typed_memcpy(dynamic->blend_constants,
1033 pCreateInfo->pColorBlendState->blendConstants, 4);
1036 /* If there is no depthstencil attachment, then don't read
1037 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1038 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1039 * no need to override the depthstencil defaults in
1040 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1042 * Section 9.2 of the Vulkan 1.0.15 spec says:
1044 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1045 * disabled or if the subpass of the render pass the pipeline is created
1046 * against does not use a depth/stencil attachment.
1048 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1049 subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED) {
1050 assert(pCreateInfo->pDepthStencilState);
1052 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1053 dynamic->depth_bounds.min =
1054 pCreateInfo->pDepthStencilState->minDepthBounds;
1055 dynamic->depth_bounds.max =
1056 pCreateInfo->pDepthStencilState->maxDepthBounds;
1059 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1060 dynamic->stencil_compare_mask.front =
1061 pCreateInfo->pDepthStencilState->front.compareMask;
1062 dynamic->stencil_compare_mask.back =
1063 pCreateInfo->pDepthStencilState->back.compareMask;
1066 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1067 dynamic->stencil_write_mask.front =
1068 pCreateInfo->pDepthStencilState->front.writeMask;
1069 dynamic->stencil_write_mask.back =
1070 pCreateInfo->pDepthStencilState->back.writeMask;
1073 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1074 dynamic->stencil_reference.front =
1075 pCreateInfo->pDepthStencilState->front.reference;
1076 dynamic->stencil_reference.back =
1077 pCreateInfo->pDepthStencilState->back.reference;
1081 pipeline->dynamic_state_mask = states;
1085 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1087 struct anv_render_pass *renderpass = NULL;
1088 struct anv_subpass *subpass = NULL;
1090 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1091 * present, as explained by the Vulkan (20 Oct 2015, git-aa308cb), Section
1092 * 4.2 Graphics Pipeline.
1094 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1096 renderpass = anv_render_pass_from_handle(info->renderPass);
1099 if (renderpass != &anv_meta_dummy_renderpass) {
1100 assert(info->subpass < renderpass->subpass_count);
1101 subpass = &renderpass->subpasses[info->subpass];
1104 assert(info->stageCount >= 1);
1105 assert(info->pVertexInputState);
1106 assert(info->pInputAssemblyState);
1107 assert(info->pViewportState);
1108 assert(info->pRasterizationState);
1110 if (subpass && subpass->depth_stencil_attachment != VK_ATTACHMENT_UNUSED)
1111 assert(info->pDepthStencilState);
1113 if (subpass && subpass->color_count > 0)
1114 assert(info->pColorBlendState);
1116 for (uint32_t i = 0; i < info->stageCount; ++i) {
1117 switch (info->pStages[i].stage) {
1118 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1119 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1120 assert(info->pTessellationState);
1129 anv_pipeline_init(struct anv_pipeline *pipeline,
1130 struct anv_device *device,
1131 struct anv_pipeline_cache *cache,
1132 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1133 const struct anv_graphics_pipeline_create_info *extra,
1134 const VkAllocationCallbacks *alloc)
1139 anv_pipeline_validate_create_info(pCreateInfo);
1143 alloc = &device->alloc;
1145 pipeline->device = device;
1146 pipeline->layout = anv_pipeline_layout_from_handle(pCreateInfo->layout);
1148 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1149 if (result != VK_SUCCESS)
1152 pipeline->batch.alloc = alloc;
1153 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1154 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1155 pipeline->batch.relocs = &pipeline->batch_relocs;
1157 copy_non_dynamic_state(pipeline, pCreateInfo);
1158 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1159 pCreateInfo->pRasterizationState->depthClampEnable;
1161 pipeline->use_repclear = extra && extra->use_repclear;
1163 pipeline->needs_data_cache = false;
1165 /* When we free the pipeline, we detect stages based on the NULL status
1166 * of various prog_data pointers. Make them NULL by default.
1168 memset(pipeline->prog_data, 0, sizeof(pipeline->prog_data));
1169 memset(pipeline->bindings, 0, sizeof(pipeline->bindings));
1171 pipeline->vs_simd8 = NO_KERNEL;
1172 pipeline->vs_vec4 = NO_KERNEL;
1173 pipeline->gs_kernel = NO_KERNEL;
1174 pipeline->ps_ksp0 = NO_KERNEL;
1176 pipeline->active_stages = 0;
1178 const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = { 0, };
1179 struct anv_shader_module *modules[MESA_SHADER_STAGES] = { 0, };
1180 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
1181 gl_shader_stage stage = ffs(pCreateInfo->pStages[i].stage) - 1;
1182 pStages[stage] = &pCreateInfo->pStages[i];
1183 modules[stage] = anv_shader_module_from_handle(pStages[stage]->module);
1186 if (modules[MESA_SHADER_VERTEX]) {
1187 anv_pipeline_compile_vs(pipeline, cache, pCreateInfo,
1188 modules[MESA_SHADER_VERTEX],
1189 pStages[MESA_SHADER_VERTEX]->pName,
1190 pStages[MESA_SHADER_VERTEX]->pSpecializationInfo);
1193 if (modules[MESA_SHADER_TESS_CTRL] || modules[MESA_SHADER_TESS_EVAL])
1194 anv_finishme("no tessellation support");
1196 if (modules[MESA_SHADER_GEOMETRY]) {
1197 anv_pipeline_compile_gs(pipeline, cache, pCreateInfo,
1198 modules[MESA_SHADER_GEOMETRY],
1199 pStages[MESA_SHADER_GEOMETRY]->pName,
1200 pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo);
1203 if (modules[MESA_SHADER_FRAGMENT]) {
1204 anv_pipeline_compile_fs(pipeline, cache, pCreateInfo, extra,
1205 modules[MESA_SHADER_FRAGMENT],
1206 pStages[MESA_SHADER_FRAGMENT]->pName,
1207 pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
1210 if (!(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT)) {
1211 /* Vertex is only optional if disable_vs is set */
1212 assert(extra->disable_vs);
1215 anv_setup_pipeline_l3_config(pipeline);
1216 anv_compute_urb_partition(pipeline);
1218 const VkPipelineVertexInputStateCreateInfo *vi_info =
1219 pCreateInfo->pVertexInputState;
1221 uint64_t inputs_read;
1222 if (extra && extra->disable_vs) {
1223 /* If the VS is disabled, just assume the user knows what they're
1224 * doing and apply the layout blindly. This can only come from
1225 * meta, so this *should* be safe.
1227 inputs_read = ~0ull;
1229 inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1232 pipeline->vb_used = 0;
1233 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1234 const VkVertexInputAttributeDescription *desc =
1235 &vi_info->pVertexAttributeDescriptions[i];
1237 if (inputs_read & (1 << (VERT_ATTRIB_GENERIC0 + desc->location)))
1238 pipeline->vb_used |= 1 << desc->binding;
1241 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1242 const VkVertexInputBindingDescription *desc =
1243 &vi_info->pVertexBindingDescriptions[i];
1245 pipeline->binding_stride[desc->binding] = desc->stride;
1247 /* Step rate is programmed per vertex element (attribute), not
1248 * binding. Set up a map of which bindings step per instance, for
1249 * reference by vertex element setup. */
1250 switch (desc->inputRate) {
1252 case VK_VERTEX_INPUT_RATE_VERTEX:
1253 pipeline->instancing_enable[desc->binding] = false;
1255 case VK_VERTEX_INPUT_RATE_INSTANCE:
1256 pipeline->instancing_enable[desc->binding] = true;
1261 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1262 pCreateInfo->pInputAssemblyState;
1263 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1264 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1266 if (extra && extra->use_rectlist)
1267 pipeline->topology = _3DPRIM_RECTLIST;
1273 anv_graphics_pipeline_create(
1275 VkPipelineCache _cache,
1276 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1277 const struct anv_graphics_pipeline_create_info *extra,
1278 const VkAllocationCallbacks *pAllocator,
1279 VkPipeline *pPipeline)
1281 ANV_FROM_HANDLE(anv_device, device, _device);
1282 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
1285 cache = &device->default_pipeline_cache;
1287 switch (device->info.gen) {
1289 if (device->info.is_haswell)
1290 return gen75_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1292 return gen7_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1294 return gen8_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1296 return gen9_graphics_pipeline_create(_device, cache, pCreateInfo, extra, pAllocator, pPipeline);
1298 unreachable("unsupported gen\n");
1302 VkResult anv_CreateGraphicsPipelines(
1304 VkPipelineCache pipelineCache,
1306 const VkGraphicsPipelineCreateInfo* pCreateInfos,
1307 const VkAllocationCallbacks* pAllocator,
1308 VkPipeline* pPipelines)
1310 VkResult result = VK_SUCCESS;
1313 for (; i < count; i++) {
1314 result = anv_graphics_pipeline_create(_device,
1317 NULL, pAllocator, &pPipelines[i]);
1318 if (result != VK_SUCCESS) {
1319 for (unsigned j = 0; j < i; j++) {
1320 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);
1330 static VkResult anv_compute_pipeline_create(
1332 VkPipelineCache _cache,
1333 const VkComputePipelineCreateInfo* pCreateInfo,
1334 const VkAllocationCallbacks* pAllocator,
1335 VkPipeline* pPipeline)
1337 ANV_FROM_HANDLE(anv_device, device, _device);
1338 ANV_FROM_HANDLE(anv_pipeline_cache, cache, _cache);
1341 cache = &device->default_pipeline_cache;
1343 switch (device->info.gen) {
1345 if (device->info.is_haswell)
1346 return gen75_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1348 return gen7_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1350 return gen8_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1352 return gen9_compute_pipeline_create(_device, cache, pCreateInfo, pAllocator, pPipeline);
1354 unreachable("unsupported gen\n");
1358 VkResult anv_CreateComputePipelines(
1360 VkPipelineCache pipelineCache,
1362 const VkComputePipelineCreateInfo* pCreateInfos,
1363 const VkAllocationCallbacks* pAllocator,
1364 VkPipeline* pPipelines)
1366 VkResult result = VK_SUCCESS;
1369 for (; i < count; i++) {
1370 result = anv_compute_pipeline_create(_device, pipelineCache,
1372 pAllocator, &pPipelines[i]);
1373 if (result != VK_SUCCESS) {
1374 for (unsigned j = 0; j < i; j++) {
1375 anv_DestroyPipeline(_device, pPipelines[j], pAllocator);