2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 #include "util/mesa-sha1.h"
31 #include "common/gen_l3_config.h"
32 #include "anv_private.h"
33 #include "compiler/brw_nir.h"
35 #include "spirv/nir_spirv.h"
37 /* Needed for SWIZZLE macros */
38 #include "program/prog_instruction.h"
42 VkResult anv_CreateShaderModule(
44 const VkShaderModuleCreateInfo* pCreateInfo,
45 const VkAllocationCallbacks* pAllocator,
46 VkShaderModule* pShaderModule)
48 ANV_FROM_HANDLE(anv_device, device, _device);
49 struct anv_shader_module *module;
51 assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO);
52 assert(pCreateInfo->flags == 0);
54 module = vk_alloc2(&device->alloc, pAllocator,
55 sizeof(*module) + pCreateInfo->codeSize, 8,
56 VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
58 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
60 module->size = pCreateInfo->codeSize;
61 memcpy(module->data, pCreateInfo->pCode, module->size);
63 _mesa_sha1_compute(module->data, module->size, module->sha1);
65 *pShaderModule = anv_shader_module_to_handle(module);
70 void anv_DestroyShaderModule(
72 VkShaderModule _module,
73 const VkAllocationCallbacks* pAllocator)
75 ANV_FROM_HANDLE(anv_device, device, _device);
76 ANV_FROM_HANDLE(anv_shader_module, module, _module);
81 vk_free2(&device->alloc, pAllocator, module);
84 #define SPIR_V_MAGIC_NUMBER 0x07230203
86 static const uint64_t stage_to_debug[] = {
87 [MESA_SHADER_VERTEX] = DEBUG_VS,
88 [MESA_SHADER_TESS_CTRL] = DEBUG_TCS,
89 [MESA_SHADER_TESS_EVAL] = DEBUG_TES,
90 [MESA_SHADER_GEOMETRY] = DEBUG_GS,
91 [MESA_SHADER_FRAGMENT] = DEBUG_WM,
92 [MESA_SHADER_COMPUTE] = DEBUG_CS,
95 /* Eventually, this will become part of anv_CreateShader. Unfortunately,
96 * we can't do that yet because we don't have the ability to copy nir.
99 anv_shader_compile_to_nir(struct anv_pipeline *pipeline,
101 struct anv_shader_module *module,
102 const char *entrypoint_name,
103 gl_shader_stage stage,
104 const VkSpecializationInfo *spec_info)
106 const struct anv_device *device = pipeline->device;
108 const struct brw_compiler *compiler =
109 device->instance->physicalDevice.compiler;
110 const nir_shader_compiler_options *nir_options =
111 compiler->glsl_compiler_options[stage].NirOptions;
113 uint32_t *spirv = (uint32_t *) module->data;
114 assert(spirv[0] == SPIR_V_MAGIC_NUMBER);
115 assert(module->size % 4 == 0);
117 uint32_t num_spec_entries = 0;
118 struct nir_spirv_specialization *spec_entries = NULL;
119 if (spec_info && spec_info->mapEntryCount > 0) {
120 num_spec_entries = spec_info->mapEntryCount;
121 spec_entries = malloc(num_spec_entries * sizeof(*spec_entries));
122 for (uint32_t i = 0; i < num_spec_entries; i++) {
123 VkSpecializationMapEntry entry = spec_info->pMapEntries[i];
124 const void *data = spec_info->pData + entry.offset;
125 assert(data + entry.size <= spec_info->pData + spec_info->dataSize);
127 spec_entries[i].id = spec_info->pMapEntries[i].constantID;
128 if (spec_info->dataSize == 8)
129 spec_entries[i].data64 = *(const uint64_t *)data;
131 spec_entries[i].data32 = *(const uint32_t *)data;
135 struct spirv_to_nir_options spirv_options = {
136 .lower_workgroup_access_to_offsets = true,
138 .float64 = device->instance->physicalDevice.info.gen >= 8,
139 .int64 = device->instance->physicalDevice.info.gen >= 8,
140 .tessellation = true,
141 .device_group = true,
142 .draw_parameters = true,
143 .image_write_without_format = true,
145 .variable_pointers = true,
146 .storage_16bit = device->instance->physicalDevice.info.gen >= 8,
147 .int16 = device->instance->physicalDevice.info.gen >= 8,
148 .shader_viewport_index_layer = true,
149 .subgroup_arithmetic = true,
150 .subgroup_basic = true,
151 .subgroup_ballot = true,
152 .subgroup_quad = true,
153 .subgroup_shuffle = true,
154 .subgroup_vote = true,
155 .stencil_export = device->instance->physicalDevice.info.gen >= 9,
159 nir_function *entry_point =
160 spirv_to_nir(spirv, module->size / 4,
161 spec_entries, num_spec_entries,
162 stage, entrypoint_name, &spirv_options, nir_options);
163 nir_shader *nir = entry_point->shader;
164 assert(nir->info.stage == stage);
165 nir_validate_shader(nir);
166 ralloc_steal(mem_ctx, nir);
170 if (unlikely(INTEL_DEBUG & stage_to_debug[stage])) {
171 fprintf(stderr, "NIR (from SPIR-V) for %s shader:\n",
172 gl_shader_stage_name(stage));
173 nir_print_shader(nir, stderr);
176 /* We have to lower away local constant initializers right before we
177 * inline functions. That way they get properly initialized at the top
178 * of the function and not at the top of its caller.
180 NIR_PASS_V(nir, nir_lower_constant_initializers, nir_var_local);
181 NIR_PASS_V(nir, nir_lower_returns);
182 NIR_PASS_V(nir, nir_inline_functions);
183 NIR_PASS_V(nir, nir_copy_prop);
185 /* Pick off the single entrypoint that we want */
186 foreach_list_typed_safe(nir_function, func, node, &nir->functions) {
187 if (func != entry_point)
188 exec_node_remove(&func->node);
190 assert(exec_list_length(&nir->functions) == 1);
191 entry_point->name = ralloc_strdup(entry_point, "main");
193 NIR_PASS_V(nir, nir_lower_deref_instrs, ~0);
195 /* Now that we've deleted all but the main function, we can go ahead and
196 * lower the rest of the constant initializers. We do this here so that
197 * nir_remove_dead_variables and split_per_member_structs below see the
198 * corresponding stores.
200 NIR_PASS_V(nir, nir_lower_constant_initializers, ~0);
202 /* Split member structs. We do this before lower_io_to_temporaries so that
203 * it doesn't lower system values to temporaries by accident.
205 NIR_PASS_V(nir, nir_split_var_copies);
206 NIR_PASS_V(nir, nir_split_per_member_structs);
208 NIR_PASS_V(nir, nir_remove_dead_variables,
209 nir_var_shader_in | nir_var_shader_out | nir_var_system_value);
211 if (stage == MESA_SHADER_FRAGMENT)
212 NIR_PASS_V(nir, nir_lower_wpos_center, pipeline->sample_shading_enable);
214 NIR_PASS_V(nir, nir_propagate_invariant);
215 NIR_PASS_V(nir, nir_lower_io_to_temporaries,
216 entry_point->impl, true, false);
218 /* Vulkan uses the separate-shader linking model */
219 nir->info.separate_shader = true;
221 nir = brw_preprocess_nir(compiler, nir);
223 if (stage == MESA_SHADER_FRAGMENT)
224 NIR_PASS_V(nir, anv_nir_lower_input_attachments);
229 void anv_DestroyPipeline(
231 VkPipeline _pipeline,
232 const VkAllocationCallbacks* pAllocator)
234 ANV_FROM_HANDLE(anv_device, device, _device);
235 ANV_FROM_HANDLE(anv_pipeline, pipeline, _pipeline);
240 anv_reloc_list_finish(&pipeline->batch_relocs,
241 pAllocator ? pAllocator : &device->alloc);
242 if (pipeline->blend_state.map)
243 anv_state_pool_free(&device->dynamic_state_pool, pipeline->blend_state);
245 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
246 if (pipeline->shaders[s])
247 anv_shader_bin_unref(device, pipeline->shaders[s]);
250 vk_free2(&device->alloc, pAllocator, pipeline);
253 static const uint32_t vk_to_gen_primitive_type[] = {
254 [VK_PRIMITIVE_TOPOLOGY_POINT_LIST] = _3DPRIM_POINTLIST,
255 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST] = _3DPRIM_LINELIST,
256 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP] = _3DPRIM_LINESTRIP,
257 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST] = _3DPRIM_TRILIST,
258 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP] = _3DPRIM_TRISTRIP,
259 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_FAN] = _3DPRIM_TRIFAN,
260 [VK_PRIMITIVE_TOPOLOGY_LINE_LIST_WITH_ADJACENCY] = _3DPRIM_LINELIST_ADJ,
261 [VK_PRIMITIVE_TOPOLOGY_LINE_STRIP_WITH_ADJACENCY] = _3DPRIM_LINESTRIP_ADJ,
262 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST_WITH_ADJACENCY] = _3DPRIM_TRILIST_ADJ,
263 [VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP_WITH_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
267 populate_sampler_prog_key(const struct gen_device_info *devinfo,
268 struct brw_sampler_prog_key_data *key)
270 /* Almost all multisampled textures are compressed. The only time when we
271 * don't compress a multisampled texture is for 16x MSAA with a surface
272 * width greater than 8k which is a bit of an edge case. Since the sampler
273 * just ignores the MCS parameter to ld2ms when MCS is disabled, it's safe
274 * to tell the compiler to always assume compression.
276 key->compressed_multisample_layout_mask = ~0;
278 /* SkyLake added support for 16x MSAA. With this came a new message for
279 * reading from a 16x MSAA surface with compression. The new message was
280 * needed because now the MCS data is 64 bits instead of 32 or lower as is
281 * the case for 8x, 4x, and 2x. The key->msaa_16 bit-field controls which
282 * message we use. Fortunately, the 16x message works for 8x, 4x, and 2x
283 * so we can just use it unconditionally. This may not be quite as
284 * efficient but it saves us from recompiling.
286 if (devinfo->gen >= 9)
289 /* XXX: Handle texture swizzle on HSW- */
290 for (int i = 0; i < MAX_SAMPLERS; i++) {
291 /* Assume color sampler, no swizzling. (Works for BDW+) */
292 key->swizzles[i] = SWIZZLE_XYZW;
297 populate_vs_prog_key(const struct gen_device_info *devinfo,
298 struct brw_vs_prog_key *key)
300 memset(key, 0, sizeof(*key));
302 populate_sampler_prog_key(devinfo, &key->tex);
304 /* XXX: Handle vertex input work-arounds */
306 /* XXX: Handle sampler_prog_key */
310 populate_gs_prog_key(const struct gen_device_info *devinfo,
311 struct brw_gs_prog_key *key)
313 memset(key, 0, sizeof(*key));
315 populate_sampler_prog_key(devinfo, &key->tex);
319 populate_wm_prog_key(const struct anv_pipeline *pipeline,
320 const VkGraphicsPipelineCreateInfo *info,
321 struct brw_wm_prog_key *key)
323 const struct gen_device_info *devinfo = &pipeline->device->info;
325 memset(key, 0, sizeof(*key));
327 populate_sampler_prog_key(devinfo, &key->tex);
329 /* TODO: we could set this to 0 based on the information in nir_shader, but
330 * this function is called before spirv_to_nir. */
331 const struct brw_vue_map *vue_map =
332 &anv_pipeline_get_last_vue_prog_data(pipeline)->vue_map;
333 key->input_slots_valid = vue_map->slots_valid;
335 /* Vulkan doesn't specify a default */
336 key->high_quality_derivatives = false;
338 /* XXX Vulkan doesn't appear to specify */
339 key->clamp_fragment_color = false;
341 key->nr_color_regions = pipeline->subpass->color_count;
343 key->replicate_alpha = key->nr_color_regions > 1 &&
344 info->pMultisampleState &&
345 info->pMultisampleState->alphaToCoverageEnable;
347 if (info->pMultisampleState) {
348 /* We should probably pull this out of the shader, but it's fairly
349 * harmless to compute it and then let dead-code take care of it.
351 if (info->pMultisampleState->rasterizationSamples > 1) {
352 key->persample_interp =
353 (info->pMultisampleState->minSampleShading *
354 info->pMultisampleState->rasterizationSamples) > 1;
355 key->multisample_fbo = true;
358 key->frag_coord_adds_sample_pos =
359 info->pMultisampleState->sampleShadingEnable;
364 populate_cs_prog_key(const struct gen_device_info *devinfo,
365 struct brw_cs_prog_key *key)
367 memset(key, 0, sizeof(*key));
369 populate_sampler_prog_key(devinfo, &key->tex);
373 anv_pipeline_hash_shader(struct anv_pipeline *pipeline,
374 struct anv_pipeline_layout *layout,
375 struct anv_shader_module *module,
376 const char *entrypoint,
377 gl_shader_stage stage,
378 const VkSpecializationInfo *spec_info,
379 const void *key, size_t key_size,
380 unsigned char *sha1_out)
382 struct mesa_sha1 ctx;
384 _mesa_sha1_init(&ctx);
385 if (stage != MESA_SHADER_COMPUTE) {
386 _mesa_sha1_update(&ctx, &pipeline->subpass->view_mask,
387 sizeof(pipeline->subpass->view_mask));
390 _mesa_sha1_update(&ctx, layout->sha1, sizeof(layout->sha1));
391 _mesa_sha1_update(&ctx, module->sha1, sizeof(module->sha1));
392 _mesa_sha1_update(&ctx, entrypoint, strlen(entrypoint));
393 _mesa_sha1_update(&ctx, &stage, sizeof(stage));
395 _mesa_sha1_update(&ctx, spec_info->pMapEntries,
396 spec_info->mapEntryCount * sizeof(*spec_info->pMapEntries));
397 _mesa_sha1_update(&ctx, spec_info->pData, spec_info->dataSize);
399 _mesa_sha1_update(&ctx, key, key_size);
400 _mesa_sha1_final(&ctx, sha1_out);
404 anv_pipeline_compile(struct anv_pipeline *pipeline,
406 struct anv_pipeline_layout *layout,
407 struct anv_shader_module *module,
408 const char *entrypoint,
409 gl_shader_stage stage,
410 const VkSpecializationInfo *spec_info,
411 struct brw_stage_prog_data *prog_data,
412 struct anv_pipeline_bind_map *map)
414 const struct brw_compiler *compiler =
415 pipeline->device->instance->physicalDevice.compiler;
417 nir_shader *nir = anv_shader_compile_to_nir(pipeline, mem_ctx,
418 module, entrypoint, stage,
423 NIR_PASS_V(nir, anv_nir_lower_ycbcr_textures, layout);
425 NIR_PASS_V(nir, anv_nir_lower_push_constants);
427 if (stage != MESA_SHADER_COMPUTE)
428 NIR_PASS_V(nir, anv_nir_lower_multiview, pipeline->subpass->view_mask);
430 if (stage == MESA_SHADER_COMPUTE)
431 prog_data->total_shared = nir->num_shared;
433 nir_shader_gather_info(nir, nir_shader_get_entrypoint(nir));
435 if (nir->num_uniforms > 0) {
436 assert(prog_data->nr_params == 0);
438 /* If the shader uses any push constants at all, we'll just give
439 * them the maximum possible number
441 assert(nir->num_uniforms <= MAX_PUSH_CONSTANTS_SIZE);
442 nir->num_uniforms = MAX_PUSH_CONSTANTS_SIZE;
443 prog_data->nr_params += MAX_PUSH_CONSTANTS_SIZE / sizeof(float);
444 prog_data->param = ralloc_array(mem_ctx, uint32_t, prog_data->nr_params);
446 /* We now set the param values to be offsets into a
447 * anv_push_constant_data structure. Since the compiler doesn't
448 * actually dereference any of the gl_constant_value pointers in the
449 * params array, it doesn't really matter what we put here.
451 struct anv_push_constants *null_data = NULL;
452 /* Fill out the push constants section of the param array */
453 for (unsigned i = 0; i < MAX_PUSH_CONSTANTS_SIZE / sizeof(float); i++) {
454 prog_data->param[i] = ANV_PARAM_PUSH(
455 (uintptr_t)&null_data->client_data[i * sizeof(float)]);
459 if (nir->info.num_ssbos > 0 || nir->info.num_images > 0)
460 pipeline->needs_data_cache = true;
462 /* Apply the actual pipeline layout to UBOs, SSBOs, and textures */
464 anv_nir_apply_pipeline_layout(pipeline, layout, nir, prog_data, map);
466 if (stage != MESA_SHADER_COMPUTE)
467 brw_nir_analyze_ubo_ranges(compiler, nir, prog_data->ubo_ranges);
469 assert(nir->num_uniforms == prog_data->nr_params * 4);
475 anv_fill_binding_table(struct brw_stage_prog_data *prog_data, unsigned bias)
477 prog_data->binding_table.size_bytes = 0;
478 prog_data->binding_table.texture_start = bias;
479 prog_data->binding_table.gather_texture_start = bias;
480 prog_data->binding_table.ubo_start = bias;
481 prog_data->binding_table.ssbo_start = bias;
482 prog_data->binding_table.image_start = bias;
485 static struct anv_shader_bin *
486 anv_pipeline_upload_kernel(struct anv_pipeline *pipeline,
487 struct anv_pipeline_cache *cache,
488 const void *key_data, uint32_t key_size,
489 const void *kernel_data, uint32_t kernel_size,
490 const struct brw_stage_prog_data *prog_data,
491 uint32_t prog_data_size,
492 const struct anv_pipeline_bind_map *bind_map)
495 return anv_pipeline_cache_upload_kernel(cache, key_data, key_size,
496 kernel_data, kernel_size,
497 prog_data, prog_data_size,
500 return anv_shader_bin_create(pipeline->device, key_data, key_size,
501 kernel_data, kernel_size,
502 prog_data, prog_data_size,
503 prog_data->param, bind_map);
509 anv_pipeline_add_compiled_stage(struct anv_pipeline *pipeline,
510 gl_shader_stage stage,
511 struct anv_shader_bin *shader)
513 pipeline->shaders[stage] = shader;
517 anv_pipeline_compile_vs(struct anv_pipeline *pipeline,
518 struct anv_pipeline_cache *cache,
519 const VkGraphicsPipelineCreateInfo *info,
520 struct anv_shader_module *module,
521 const char *entrypoint,
522 const VkSpecializationInfo *spec_info)
524 const struct brw_compiler *compiler =
525 pipeline->device->instance->physicalDevice.compiler;
526 struct brw_vs_prog_key key;
527 struct anv_shader_bin *bin = NULL;
528 unsigned char sha1[20];
530 populate_vs_prog_key(&pipeline->device->info, &key);
532 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
535 anv_pipeline_hash_shader(pipeline, layout, module, entrypoint,
536 MESA_SHADER_VERTEX, spec_info,
537 &key, sizeof(key), sha1);
538 bin = anv_pipeline_cache_search(cache, sha1, 20);
542 struct brw_vs_prog_data prog_data = {};
543 struct anv_pipeline_binding surface_to_descriptor[256];
544 struct anv_pipeline_binding sampler_to_descriptor[256];
546 struct anv_pipeline_bind_map map = {
547 .surface_to_descriptor = surface_to_descriptor,
548 .sampler_to_descriptor = sampler_to_descriptor
551 void *mem_ctx = ralloc_context(NULL);
553 nir_shader *nir = anv_pipeline_compile(pipeline, mem_ctx, layout,
555 MESA_SHADER_VERTEX, spec_info,
556 &prog_data.base.base, &map);
558 ralloc_free(mem_ctx);
559 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
562 anv_fill_binding_table(&prog_data.base.base, 0);
564 brw_compute_vue_map(&pipeline->device->info,
565 &prog_data.base.vue_map,
566 nir->info.outputs_written,
567 nir->info.separate_shader);
569 const unsigned *shader_code =
570 brw_compile_vs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
572 if (shader_code == NULL) {
573 ralloc_free(mem_ctx);
574 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
577 unsigned code_size = prog_data.base.base.program_size;
578 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
579 shader_code, code_size,
580 &prog_data.base.base, sizeof(prog_data),
583 ralloc_free(mem_ctx);
584 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
587 ralloc_free(mem_ctx);
590 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_VERTEX, bin);
596 merge_tess_info(struct shader_info *tes_info,
597 const struct shader_info *tcs_info)
599 /* The Vulkan 1.0.38 spec, section 21.1 Tessellator says:
601 * "PointMode. Controls generation of points rather than triangles
602 * or lines. This functionality defaults to disabled, and is
603 * enabled if either shader stage includes the execution mode.
605 * and about Triangles, Quads, IsoLines, VertexOrderCw, VertexOrderCcw,
606 * PointMode, SpacingEqual, SpacingFractionalEven, SpacingFractionalOdd,
607 * and OutputVertices, it says:
609 * "One mode must be set in at least one of the tessellation
612 * So, the fields can be set in either the TCS or TES, but they must
613 * agree if set in both. Our backend looks at TES, so bitwise-or in
614 * the values from the TCS.
616 assert(tcs_info->tess.tcs_vertices_out == 0 ||
617 tes_info->tess.tcs_vertices_out == 0 ||
618 tcs_info->tess.tcs_vertices_out == tes_info->tess.tcs_vertices_out);
619 tes_info->tess.tcs_vertices_out |= tcs_info->tess.tcs_vertices_out;
621 assert(tcs_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
622 tes_info->tess.spacing == TESS_SPACING_UNSPECIFIED ||
623 tcs_info->tess.spacing == tes_info->tess.spacing);
624 tes_info->tess.spacing |= tcs_info->tess.spacing;
626 assert(tcs_info->tess.primitive_mode == 0 ||
627 tes_info->tess.primitive_mode == 0 ||
628 tcs_info->tess.primitive_mode == tes_info->tess.primitive_mode);
629 tes_info->tess.primitive_mode |= tcs_info->tess.primitive_mode;
630 tes_info->tess.ccw |= tcs_info->tess.ccw;
631 tes_info->tess.point_mode |= tcs_info->tess.point_mode;
635 anv_pipeline_compile_tcs_tes(struct anv_pipeline *pipeline,
636 struct anv_pipeline_cache *cache,
637 const VkGraphicsPipelineCreateInfo *info,
638 struct anv_shader_module *tcs_module,
639 const char *tcs_entrypoint,
640 const VkSpecializationInfo *tcs_spec_info,
641 struct anv_shader_module *tes_module,
642 const char *tes_entrypoint,
643 const VkSpecializationInfo *tes_spec_info)
645 const struct gen_device_info *devinfo = &pipeline->device->info;
646 const struct brw_compiler *compiler =
647 pipeline->device->instance->physicalDevice.compiler;
648 struct brw_tcs_prog_key tcs_key = {};
649 struct brw_tes_prog_key tes_key = {};
650 struct anv_shader_bin *tcs_bin = NULL;
651 struct anv_shader_bin *tes_bin = NULL;
652 unsigned char tcs_sha1[40];
653 unsigned char tes_sha1[40];
655 populate_sampler_prog_key(&pipeline->device->info, &tcs_key.tex);
656 populate_sampler_prog_key(&pipeline->device->info, &tes_key.tex);
657 tcs_key.input_vertices = info->pTessellationState->patchControlPoints;
659 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
662 anv_pipeline_hash_shader(pipeline, layout, tcs_module, tcs_entrypoint,
663 MESA_SHADER_TESS_CTRL, tcs_spec_info,
664 &tcs_key, sizeof(tcs_key), tcs_sha1);
665 anv_pipeline_hash_shader(pipeline, layout, tes_module, tes_entrypoint,
666 MESA_SHADER_TESS_EVAL, tes_spec_info,
667 &tes_key, sizeof(tes_key), tes_sha1);
668 memcpy(&tcs_sha1[20], tes_sha1, 20);
669 memcpy(&tes_sha1[20], tcs_sha1, 20);
670 tcs_bin = anv_pipeline_cache_search(cache, tcs_sha1, sizeof(tcs_sha1));
671 tes_bin = anv_pipeline_cache_search(cache, tes_sha1, sizeof(tes_sha1));
674 if (tcs_bin == NULL || tes_bin == NULL) {
675 struct brw_tcs_prog_data tcs_prog_data = {};
676 struct brw_tes_prog_data tes_prog_data = {};
677 struct anv_pipeline_binding tcs_surface_to_descriptor[256];
678 struct anv_pipeline_binding tcs_sampler_to_descriptor[256];
679 struct anv_pipeline_binding tes_surface_to_descriptor[256];
680 struct anv_pipeline_binding tes_sampler_to_descriptor[256];
682 struct anv_pipeline_bind_map tcs_map = {
683 .surface_to_descriptor = tcs_surface_to_descriptor,
684 .sampler_to_descriptor = tcs_sampler_to_descriptor
686 struct anv_pipeline_bind_map tes_map = {
687 .surface_to_descriptor = tes_surface_to_descriptor,
688 .sampler_to_descriptor = tes_sampler_to_descriptor
691 void *mem_ctx = ralloc_context(NULL);
693 nir_shader *tcs_nir =
694 anv_pipeline_compile(pipeline, mem_ctx, layout,
695 tcs_module, tcs_entrypoint,
696 MESA_SHADER_TESS_CTRL, tcs_spec_info,
697 &tcs_prog_data.base.base, &tcs_map);
698 nir_shader *tes_nir =
699 anv_pipeline_compile(pipeline, mem_ctx, layout,
700 tes_module, tes_entrypoint,
701 MESA_SHADER_TESS_EVAL, tes_spec_info,
702 &tes_prog_data.base.base, &tes_map);
703 if (tcs_nir == NULL || tes_nir == NULL) {
704 ralloc_free(mem_ctx);
705 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
708 nir_lower_tes_patch_vertices(tes_nir,
709 tcs_nir->info.tess.tcs_vertices_out);
711 /* Copy TCS info into the TES info */
712 merge_tess_info(&tes_nir->info, &tcs_nir->info);
714 anv_fill_binding_table(&tcs_prog_data.base.base, 0);
715 anv_fill_binding_table(&tes_prog_data.base.base, 0);
717 /* Whacking the key after cache lookup is a bit sketchy, but all of
718 * this comes from the SPIR-V, which is part of the hash used for the
719 * pipeline cache. So it should be safe.
721 tcs_key.tes_primitive_mode = tes_nir->info.tess.primitive_mode;
722 tcs_key.outputs_written = tcs_nir->info.outputs_written;
723 tcs_key.patch_outputs_written = tcs_nir->info.patch_outputs_written;
724 tcs_key.quads_workaround =
726 tes_nir->info.tess.primitive_mode == 7 /* GL_QUADS */ &&
727 tes_nir->info.tess.spacing == TESS_SPACING_EQUAL;
729 tes_key.inputs_read = tcs_key.outputs_written;
730 tes_key.patch_inputs_read = tcs_key.patch_outputs_written;
732 const int shader_time_index = -1;
733 const unsigned *shader_code;
736 brw_compile_tcs(compiler, NULL, mem_ctx, &tcs_key, &tcs_prog_data,
737 tcs_nir, shader_time_index, NULL);
738 if (shader_code == NULL) {
739 ralloc_free(mem_ctx);
740 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
743 unsigned code_size = tcs_prog_data.base.base.program_size;
744 tcs_bin = anv_pipeline_upload_kernel(pipeline, cache,
745 tcs_sha1, sizeof(tcs_sha1),
746 shader_code, code_size,
747 &tcs_prog_data.base.base,
748 sizeof(tcs_prog_data),
751 ralloc_free(mem_ctx);
752 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
756 brw_compile_tes(compiler, NULL, mem_ctx, &tes_key,
757 &tcs_prog_data.base.vue_map, &tes_prog_data, tes_nir,
758 NULL, shader_time_index, NULL);
759 if (shader_code == NULL) {
760 ralloc_free(mem_ctx);
761 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
764 code_size = tes_prog_data.base.base.program_size;
765 tes_bin = anv_pipeline_upload_kernel(pipeline, cache,
766 tes_sha1, sizeof(tes_sha1),
767 shader_code, code_size,
768 &tes_prog_data.base.base,
769 sizeof(tes_prog_data),
772 ralloc_free(mem_ctx);
773 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
776 ralloc_free(mem_ctx);
779 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_TESS_CTRL, tcs_bin);
780 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_TESS_EVAL, tes_bin);
786 anv_pipeline_compile_gs(struct anv_pipeline *pipeline,
787 struct anv_pipeline_cache *cache,
788 const VkGraphicsPipelineCreateInfo *info,
789 struct anv_shader_module *module,
790 const char *entrypoint,
791 const VkSpecializationInfo *spec_info)
793 const struct brw_compiler *compiler =
794 pipeline->device->instance->physicalDevice.compiler;
795 struct brw_gs_prog_key key;
796 struct anv_shader_bin *bin = NULL;
797 unsigned char sha1[20];
799 populate_gs_prog_key(&pipeline->device->info, &key);
801 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
804 anv_pipeline_hash_shader(pipeline, layout, module, entrypoint,
805 MESA_SHADER_GEOMETRY, spec_info,
806 &key, sizeof(key), sha1);
807 bin = anv_pipeline_cache_search(cache, sha1, 20);
811 struct brw_gs_prog_data prog_data = {};
812 struct anv_pipeline_binding surface_to_descriptor[256];
813 struct anv_pipeline_binding sampler_to_descriptor[256];
815 struct anv_pipeline_bind_map map = {
816 .surface_to_descriptor = surface_to_descriptor,
817 .sampler_to_descriptor = sampler_to_descriptor
820 void *mem_ctx = ralloc_context(NULL);
822 nir_shader *nir = anv_pipeline_compile(pipeline, mem_ctx, layout,
824 MESA_SHADER_GEOMETRY, spec_info,
825 &prog_data.base.base, &map);
827 ralloc_free(mem_ctx);
828 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
831 anv_fill_binding_table(&prog_data.base.base, 0);
833 brw_compute_vue_map(&pipeline->device->info,
834 &prog_data.base.vue_map,
835 nir->info.outputs_written,
836 nir->info.separate_shader);
838 const unsigned *shader_code =
839 brw_compile_gs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
841 if (shader_code == NULL) {
842 ralloc_free(mem_ctx);
843 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
847 const unsigned code_size = prog_data.base.base.program_size;
848 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
849 shader_code, code_size,
850 &prog_data.base.base, sizeof(prog_data),
853 ralloc_free(mem_ctx);
854 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
857 ralloc_free(mem_ctx);
860 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_GEOMETRY, bin);
866 anv_pipeline_compile_fs(struct anv_pipeline *pipeline,
867 struct anv_pipeline_cache *cache,
868 const VkGraphicsPipelineCreateInfo *info,
869 struct anv_shader_module *module,
870 const char *entrypoint,
871 const VkSpecializationInfo *spec_info)
873 const struct brw_compiler *compiler =
874 pipeline->device->instance->physicalDevice.compiler;
875 struct brw_wm_prog_key key;
876 struct anv_shader_bin *bin = NULL;
877 unsigned char sha1[20];
879 populate_wm_prog_key(pipeline, info, &key);
881 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
884 anv_pipeline_hash_shader(pipeline, layout, module, entrypoint,
885 MESA_SHADER_FRAGMENT, spec_info,
886 &key, sizeof(key), sha1);
887 bin = anv_pipeline_cache_search(cache, sha1, 20);
891 struct brw_wm_prog_data prog_data = {};
892 struct anv_pipeline_binding surface_to_descriptor[256];
893 struct anv_pipeline_binding sampler_to_descriptor[256];
895 struct anv_pipeline_bind_map map = {
896 .surface_to_descriptor = surface_to_descriptor + 8,
897 .sampler_to_descriptor = sampler_to_descriptor
900 void *mem_ctx = ralloc_context(NULL);
902 nir_shader *nir = anv_pipeline_compile(pipeline, mem_ctx, layout,
904 MESA_SHADER_FRAGMENT, spec_info,
905 &prog_data.base, &map);
907 ralloc_free(mem_ctx);
908 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
911 unsigned num_rts = 0;
912 const int max_rt = FRAG_RESULT_DATA7 - FRAG_RESULT_DATA0 + 1;
913 struct anv_pipeline_binding rt_bindings[max_rt];
914 nir_function_impl *impl = nir_shader_get_entrypoint(nir);
915 int rt_to_bindings[max_rt];
916 memset(rt_to_bindings, -1, sizeof(rt_to_bindings));
917 bool rt_used[max_rt];
918 memset(rt_used, 0, sizeof(rt_used));
920 /* Flag used render targets */
921 nir_foreach_variable_safe(var, &nir->outputs) {
922 if (var->data.location < FRAG_RESULT_DATA0)
925 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
927 if (rt >= key.nr_color_regions)
930 const unsigned array_len =
931 glsl_type_is_array(var->type) ? glsl_get_length(var->type) : 1;
932 assert(rt + array_len <= max_rt);
934 for (unsigned i = 0; i < array_len; i++)
935 rt_used[rt + i] = true;
938 /* Set new, compacted, location */
939 for (unsigned i = 0; i < max_rt; i++) {
943 rt_to_bindings[i] = num_rts;
944 rt_bindings[rt_to_bindings[i]] = (struct anv_pipeline_binding) {
945 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
952 nir_foreach_variable_safe(var, &nir->outputs) {
953 if (var->data.location < FRAG_RESULT_DATA0)
956 const unsigned rt = var->data.location - FRAG_RESULT_DATA0;
957 if (rt >= key.nr_color_regions) {
958 /* Out-of-bounds, throw it away */
959 var->data.mode = nir_var_local;
960 exec_node_remove(&var->node);
961 exec_list_push_tail(&impl->locals, &var->node);
965 /* Give it the new location */
966 assert(rt_to_bindings[rt] != -1);
967 var->data.location = rt_to_bindings[rt] + FRAG_RESULT_DATA0;
971 /* If we have no render targets, we need a null render target */
972 rt_bindings[0] = (struct anv_pipeline_binding) {
973 .set = ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS,
980 assert(num_rts <= max_rt);
981 map.surface_to_descriptor -= num_rts;
982 map.surface_count += num_rts;
983 assert(map.surface_count <= 256);
984 memcpy(map.surface_to_descriptor, rt_bindings,
985 num_rts * sizeof(*rt_bindings));
987 anv_fill_binding_table(&prog_data.base, num_rts);
989 const unsigned *shader_code =
990 brw_compile_fs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
991 NULL, -1, -1, true, false, NULL, NULL);
992 if (shader_code == NULL) {
993 ralloc_free(mem_ctx);
994 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
997 unsigned code_size = prog_data.base.program_size;
998 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
999 shader_code, code_size,
1000 &prog_data.base, sizeof(prog_data),
1003 ralloc_free(mem_ctx);
1004 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1007 ralloc_free(mem_ctx);
1010 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_FRAGMENT, bin);
1016 anv_pipeline_compile_cs(struct anv_pipeline *pipeline,
1017 struct anv_pipeline_cache *cache,
1018 const VkComputePipelineCreateInfo *info,
1019 struct anv_shader_module *module,
1020 const char *entrypoint,
1021 const VkSpecializationInfo *spec_info)
1023 const struct brw_compiler *compiler =
1024 pipeline->device->instance->physicalDevice.compiler;
1025 struct brw_cs_prog_key key;
1026 struct anv_shader_bin *bin = NULL;
1027 unsigned char sha1[20];
1029 populate_cs_prog_key(&pipeline->device->info, &key);
1031 ANV_FROM_HANDLE(anv_pipeline_layout, layout, info->layout);
1034 anv_pipeline_hash_shader(pipeline, layout, module, entrypoint,
1035 MESA_SHADER_COMPUTE, spec_info,
1036 &key, sizeof(key), sha1);
1037 bin = anv_pipeline_cache_search(cache, sha1, 20);
1041 struct brw_cs_prog_data prog_data = {};
1042 struct anv_pipeline_binding surface_to_descriptor[256];
1043 struct anv_pipeline_binding sampler_to_descriptor[256];
1045 struct anv_pipeline_bind_map map = {
1046 .surface_to_descriptor = surface_to_descriptor,
1047 .sampler_to_descriptor = sampler_to_descriptor
1050 void *mem_ctx = ralloc_context(NULL);
1052 nir_shader *nir = anv_pipeline_compile(pipeline, mem_ctx, layout,
1054 MESA_SHADER_COMPUTE, spec_info,
1055 &prog_data.base, &map);
1057 ralloc_free(mem_ctx);
1058 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1061 NIR_PASS_V(nir, anv_nir_add_base_work_group_id, &prog_data);
1063 anv_fill_binding_table(&prog_data.base, 1);
1065 const unsigned *shader_code =
1066 brw_compile_cs(compiler, NULL, mem_ctx, &key, &prog_data, nir,
1068 if (shader_code == NULL) {
1069 ralloc_free(mem_ctx);
1070 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1073 const unsigned code_size = prog_data.base.program_size;
1074 bin = anv_pipeline_upload_kernel(pipeline, cache, sha1, 20,
1075 shader_code, code_size,
1076 &prog_data.base, sizeof(prog_data),
1079 ralloc_free(mem_ctx);
1080 return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1083 ralloc_free(mem_ctx);
1086 anv_pipeline_add_compiled_stage(pipeline, MESA_SHADER_COMPUTE, bin);
1092 * Copy pipeline state not marked as dynamic.
1093 * Dynamic state is pipeline state which hasn't been provided at pipeline
1094 * creation time, but is dynamically provided afterwards using various
1095 * vkCmdSet* functions.
1097 * The set of state considered "non_dynamic" is determined by the pieces of
1098 * state that have their corresponding VkDynamicState enums omitted from
1099 * VkPipelineDynamicStateCreateInfo::pDynamicStates.
1101 * @param[out] pipeline Destination non_dynamic state.
1102 * @param[in] pCreateInfo Source of non_dynamic state to be copied.
1105 copy_non_dynamic_state(struct anv_pipeline *pipeline,
1106 const VkGraphicsPipelineCreateInfo *pCreateInfo)
1108 anv_cmd_dirty_mask_t states = ANV_CMD_DIRTY_DYNAMIC_ALL;
1109 struct anv_subpass *subpass = pipeline->subpass;
1111 pipeline->dynamic_state = default_dynamic_state;
1113 if (pCreateInfo->pDynamicState) {
1114 /* Remove all of the states that are marked as dynamic */
1115 uint32_t count = pCreateInfo->pDynamicState->dynamicStateCount;
1116 for (uint32_t s = 0; s < count; s++)
1117 states &= ~(1 << pCreateInfo->pDynamicState->pDynamicStates[s]);
1120 struct anv_dynamic_state *dynamic = &pipeline->dynamic_state;
1122 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1124 * pViewportState is [...] NULL if the pipeline
1125 * has rasterization disabled.
1127 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1128 assert(pCreateInfo->pViewportState);
1130 dynamic->viewport.count = pCreateInfo->pViewportState->viewportCount;
1131 if (states & (1 << VK_DYNAMIC_STATE_VIEWPORT)) {
1132 typed_memcpy(dynamic->viewport.viewports,
1133 pCreateInfo->pViewportState->pViewports,
1134 pCreateInfo->pViewportState->viewportCount);
1137 dynamic->scissor.count = pCreateInfo->pViewportState->scissorCount;
1138 if (states & (1 << VK_DYNAMIC_STATE_SCISSOR)) {
1139 typed_memcpy(dynamic->scissor.scissors,
1140 pCreateInfo->pViewportState->pScissors,
1141 pCreateInfo->pViewportState->scissorCount);
1145 if (states & (1 << VK_DYNAMIC_STATE_LINE_WIDTH)) {
1146 assert(pCreateInfo->pRasterizationState);
1147 dynamic->line_width = pCreateInfo->pRasterizationState->lineWidth;
1150 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BIAS)) {
1151 assert(pCreateInfo->pRasterizationState);
1152 dynamic->depth_bias.bias =
1153 pCreateInfo->pRasterizationState->depthBiasConstantFactor;
1154 dynamic->depth_bias.clamp =
1155 pCreateInfo->pRasterizationState->depthBiasClamp;
1156 dynamic->depth_bias.slope =
1157 pCreateInfo->pRasterizationState->depthBiasSlopeFactor;
1160 /* Section 9.2 of the Vulkan 1.0.15 spec says:
1162 * pColorBlendState is [...] NULL if the pipeline has rasterization
1163 * disabled or if the subpass of the render pass the pipeline is
1164 * created against does not use any color attachments.
1166 bool uses_color_att = false;
1167 for (unsigned i = 0; i < subpass->color_count; ++i) {
1168 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED) {
1169 uses_color_att = true;
1174 if (uses_color_att &&
1175 !pCreateInfo->pRasterizationState->rasterizerDiscardEnable) {
1176 assert(pCreateInfo->pColorBlendState);
1178 if (states & (1 << VK_DYNAMIC_STATE_BLEND_CONSTANTS))
1179 typed_memcpy(dynamic->blend_constants,
1180 pCreateInfo->pColorBlendState->blendConstants, 4);
1183 /* If there is no depthstencil attachment, then don't read
1184 * pDepthStencilState. The Vulkan spec states that pDepthStencilState may
1185 * be NULL in this case. Even if pDepthStencilState is non-NULL, there is
1186 * no need to override the depthstencil defaults in
1187 * anv_pipeline::dynamic_state when there is no depthstencil attachment.
1189 * Section 9.2 of the Vulkan 1.0.15 spec says:
1191 * pDepthStencilState is [...] NULL if the pipeline has rasterization
1192 * disabled or if the subpass of the render pass the pipeline is created
1193 * against does not use a depth/stencil attachment.
1195 if (!pCreateInfo->pRasterizationState->rasterizerDiscardEnable &&
1196 subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED) {
1197 assert(pCreateInfo->pDepthStencilState);
1199 if (states & (1 << VK_DYNAMIC_STATE_DEPTH_BOUNDS)) {
1200 dynamic->depth_bounds.min =
1201 pCreateInfo->pDepthStencilState->minDepthBounds;
1202 dynamic->depth_bounds.max =
1203 pCreateInfo->pDepthStencilState->maxDepthBounds;
1206 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_COMPARE_MASK)) {
1207 dynamic->stencil_compare_mask.front =
1208 pCreateInfo->pDepthStencilState->front.compareMask;
1209 dynamic->stencil_compare_mask.back =
1210 pCreateInfo->pDepthStencilState->back.compareMask;
1213 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_WRITE_MASK)) {
1214 dynamic->stencil_write_mask.front =
1215 pCreateInfo->pDepthStencilState->front.writeMask;
1216 dynamic->stencil_write_mask.back =
1217 pCreateInfo->pDepthStencilState->back.writeMask;
1220 if (states & (1 << VK_DYNAMIC_STATE_STENCIL_REFERENCE)) {
1221 dynamic->stencil_reference.front =
1222 pCreateInfo->pDepthStencilState->front.reference;
1223 dynamic->stencil_reference.back =
1224 pCreateInfo->pDepthStencilState->back.reference;
1228 pipeline->dynamic_state_mask = states;
1232 anv_pipeline_validate_create_info(const VkGraphicsPipelineCreateInfo *info)
1235 struct anv_render_pass *renderpass = NULL;
1236 struct anv_subpass *subpass = NULL;
1238 /* Assert that all required members of VkGraphicsPipelineCreateInfo are
1239 * present. See the Vulkan 1.0.28 spec, Section 9.2 Graphics Pipelines.
1241 assert(info->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1243 renderpass = anv_render_pass_from_handle(info->renderPass);
1246 assert(info->subpass < renderpass->subpass_count);
1247 subpass = &renderpass->subpasses[info->subpass];
1249 assert(info->stageCount >= 1);
1250 assert(info->pVertexInputState);
1251 assert(info->pInputAssemblyState);
1252 assert(info->pRasterizationState);
1253 if (!info->pRasterizationState->rasterizerDiscardEnable) {
1254 assert(info->pViewportState);
1255 assert(info->pMultisampleState);
1257 if (subpass && subpass->depth_stencil_attachment.attachment != VK_ATTACHMENT_UNUSED)
1258 assert(info->pDepthStencilState);
1260 if (subpass && subpass->color_count > 0) {
1261 bool all_color_unused = true;
1262 for (int i = 0; i < subpass->color_count; i++) {
1263 if (subpass->color_attachments[i].attachment != VK_ATTACHMENT_UNUSED)
1264 all_color_unused = false;
1266 /* pColorBlendState is ignored if the pipeline has rasterization
1267 * disabled or if the subpass of the render pass the pipeline is
1268 * created against does not use any color attachments.
1270 assert(info->pColorBlendState || all_color_unused);
1274 for (uint32_t i = 0; i < info->stageCount; ++i) {
1275 switch (info->pStages[i].stage) {
1276 case VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT:
1277 case VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT:
1278 assert(info->pTessellationState);
1288 * Calculate the desired L3 partitioning based on the current state of the
1289 * pipeline. For now this simply returns the conservative defaults calculated
1290 * by get_default_l3_weights(), but we could probably do better by gathering
1291 * more statistics from the pipeline state (e.g. guess of expected URB usage
1292 * and bound surfaces), or by using feed-back from performance counters.
1295 anv_pipeline_setup_l3_config(struct anv_pipeline *pipeline, bool needs_slm)
1297 const struct gen_device_info *devinfo = &pipeline->device->info;
1299 const struct gen_l3_weights w =
1300 gen_get_default_l3_weights(devinfo, pipeline->needs_data_cache, needs_slm);
1302 pipeline->urb.l3_config = gen_get_l3_config(devinfo, w);
1303 pipeline->urb.total_size =
1304 gen_get_l3_config_urb_size(devinfo, pipeline->urb.l3_config);
1308 anv_pipeline_init(struct anv_pipeline *pipeline,
1309 struct anv_device *device,
1310 struct anv_pipeline_cache *cache,
1311 const VkGraphicsPipelineCreateInfo *pCreateInfo,
1312 const VkAllocationCallbacks *alloc)
1316 anv_pipeline_validate_create_info(pCreateInfo);
1319 alloc = &device->alloc;
1321 pipeline->device = device;
1323 ANV_FROM_HANDLE(anv_render_pass, render_pass, pCreateInfo->renderPass);
1324 assert(pCreateInfo->subpass < render_pass->subpass_count);
1325 pipeline->subpass = &render_pass->subpasses[pCreateInfo->subpass];
1327 result = anv_reloc_list_init(&pipeline->batch_relocs, alloc);
1328 if (result != VK_SUCCESS)
1331 pipeline->batch.alloc = alloc;
1332 pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1333 pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1334 pipeline->batch.relocs = &pipeline->batch_relocs;
1335 pipeline->batch.status = VK_SUCCESS;
1337 copy_non_dynamic_state(pipeline, pCreateInfo);
1338 pipeline->depth_clamp_enable = pCreateInfo->pRasterizationState &&
1339 pCreateInfo->pRasterizationState->depthClampEnable;
1341 pipeline->sample_shading_enable = pCreateInfo->pMultisampleState &&
1342 pCreateInfo->pMultisampleState->sampleShadingEnable;
1344 pipeline->needs_data_cache = false;
1346 /* When we free the pipeline, we detect stages based on the NULL status
1347 * of various prog_data pointers. Make them NULL by default.
1349 memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1351 pipeline->active_stages = 0;
1353 const VkPipelineShaderStageCreateInfo *pStages[MESA_SHADER_STAGES] = {};
1354 struct anv_shader_module *modules[MESA_SHADER_STAGES] = {};
1355 for (uint32_t i = 0; i < pCreateInfo->stageCount; i++) {
1356 VkShaderStageFlagBits vk_stage = pCreateInfo->pStages[i].stage;
1357 gl_shader_stage stage = vk_to_mesa_shader_stage(vk_stage);
1358 pStages[stage] = &pCreateInfo->pStages[i];
1359 modules[stage] = anv_shader_module_from_handle(pStages[stage]->module);
1360 pipeline->active_stages |= vk_stage;
1363 if (pipeline->active_stages & VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT)
1364 pipeline->active_stages |= VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT;
1366 assert(pipeline->active_stages & VK_SHADER_STAGE_VERTEX_BIT);
1368 if (modules[MESA_SHADER_VERTEX]) {
1369 result = anv_pipeline_compile_vs(pipeline, cache, pCreateInfo,
1370 modules[MESA_SHADER_VERTEX],
1371 pStages[MESA_SHADER_VERTEX]->pName,
1372 pStages[MESA_SHADER_VERTEX]->pSpecializationInfo);
1373 if (result != VK_SUCCESS)
1377 if (modules[MESA_SHADER_TESS_EVAL]) {
1378 result = anv_pipeline_compile_tcs_tes(pipeline, cache, pCreateInfo,
1379 modules[MESA_SHADER_TESS_CTRL],
1380 pStages[MESA_SHADER_TESS_CTRL]->pName,
1381 pStages[MESA_SHADER_TESS_CTRL]->pSpecializationInfo,
1382 modules[MESA_SHADER_TESS_EVAL],
1383 pStages[MESA_SHADER_TESS_EVAL]->pName,
1384 pStages[MESA_SHADER_TESS_EVAL]->pSpecializationInfo);
1385 if (result != VK_SUCCESS)
1389 if (modules[MESA_SHADER_GEOMETRY]) {
1390 result = anv_pipeline_compile_gs(pipeline, cache, pCreateInfo,
1391 modules[MESA_SHADER_GEOMETRY],
1392 pStages[MESA_SHADER_GEOMETRY]->pName,
1393 pStages[MESA_SHADER_GEOMETRY]->pSpecializationInfo);
1394 if (result != VK_SUCCESS)
1398 if (modules[MESA_SHADER_FRAGMENT]) {
1399 result = anv_pipeline_compile_fs(pipeline, cache, pCreateInfo,
1400 modules[MESA_SHADER_FRAGMENT],
1401 pStages[MESA_SHADER_FRAGMENT]->pName,
1402 pStages[MESA_SHADER_FRAGMENT]->pSpecializationInfo);
1403 if (result != VK_SUCCESS)
1407 assert(pipeline->shaders[MESA_SHADER_VERTEX]);
1409 anv_pipeline_setup_l3_config(pipeline, false);
1411 const VkPipelineVertexInputStateCreateInfo *vi_info =
1412 pCreateInfo->pVertexInputState;
1414 const uint64_t inputs_read = get_vs_prog_data(pipeline)->inputs_read;
1416 pipeline->vb_used = 0;
1417 for (uint32_t i = 0; i < vi_info->vertexAttributeDescriptionCount; i++) {
1418 const VkVertexInputAttributeDescription *desc =
1419 &vi_info->pVertexAttributeDescriptions[i];
1421 if (inputs_read & (1ull << (VERT_ATTRIB_GENERIC0 + desc->location)))
1422 pipeline->vb_used |= 1 << desc->binding;
1425 for (uint32_t i = 0; i < vi_info->vertexBindingDescriptionCount; i++) {
1426 const VkVertexInputBindingDescription *desc =
1427 &vi_info->pVertexBindingDescriptions[i];
1429 pipeline->binding_stride[desc->binding] = desc->stride;
1431 /* Step rate is programmed per vertex element (attribute), not
1432 * binding. Set up a map of which bindings step per instance, for
1433 * reference by vertex element setup. */
1434 switch (desc->inputRate) {
1436 case VK_VERTEX_INPUT_RATE_VERTEX:
1437 pipeline->instancing_enable[desc->binding] = false;
1439 case VK_VERTEX_INPUT_RATE_INSTANCE:
1440 pipeline->instancing_enable[desc->binding] = true;
1445 const VkPipelineInputAssemblyStateCreateInfo *ia_info =
1446 pCreateInfo->pInputAssemblyState;
1447 const VkPipelineTessellationStateCreateInfo *tess_info =
1448 pCreateInfo->pTessellationState;
1449 pipeline->primitive_restart = ia_info->primitiveRestartEnable;
1451 if (anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL))
1452 pipeline->topology = _3DPRIM_PATCHLIST(tess_info->patchControlPoints);
1454 pipeline->topology = vk_to_gen_primitive_type[ia_info->topology];
1459 for (unsigned s = 0; s < MESA_SHADER_STAGES; s++) {
1460 if (pipeline->shaders[s])
1461 anv_shader_bin_unref(device, pipeline->shaders[s]);
1464 anv_reloc_list_finish(&pipeline->batch_relocs, alloc);