OSDN Git Service

b531205508c075b49d6d02a6887dab4fab468da0
[android-x86/external-mesa.git] / src / intel / vulkan / genX_pipeline.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include "anv_private.h"
25
26 #include "genxml/gen_macros.h"
27 #include "genxml/genX_pack.h"
28
29 #include "common/gen_l3_config.h"
30 #include "common/gen_sample_positions.h"
31 #include "vk_util.h"
32 #include "vk_format_info.h"
33
34 static uint32_t
35 vertex_element_comp_control(enum isl_format format, unsigned comp)
36 {
37    uint8_t bits;
38    switch (comp) {
39    case 0: bits = isl_format_layouts[format].channels.r.bits; break;
40    case 1: bits = isl_format_layouts[format].channels.g.bits; break;
41    case 2: bits = isl_format_layouts[format].channels.b.bits; break;
42    case 3: bits = isl_format_layouts[format].channels.a.bits; break;
43    default: unreachable("Invalid component");
44    }
45
46    /*
47     * Take in account hardware restrictions when dealing with 64-bit floats.
48     *
49     * From Broadwell spec, command reference structures, page 586:
50     *  "When SourceElementFormat is set to one of the *64*_PASSTHRU formats,
51     *   64-bit components are stored * in the URB without any conversion. In
52     *   this case, vertex elements must be written as 128 or 256 bits, with
53     *   VFCOMP_STORE_0 being used to pad the output as required. E.g., if
54     *   R64_PASSTHRU is used to copy a 64-bit Red component into the URB,
55     *   Component 1 must be specified as VFCOMP_STORE_0 (with Components 2,3
56     *   set to VFCOMP_NOSTORE) in order to output a 128-bit vertex element, or
57     *   Components 1-3 must be specified as VFCOMP_STORE_0 in order to output
58     *   a 256-bit vertex element. Likewise, use of R64G64B64_PASSTHRU requires
59     *   Component 3 to be specified as VFCOMP_STORE_0 in order to output a
60     *   256-bit vertex element."
61     */
62    if (bits) {
63       return VFCOMP_STORE_SRC;
64    } else if (comp >= 2 &&
65               !isl_format_layouts[format].channels.b.bits &&
66               isl_format_layouts[format].channels.r.type == ISL_RAW) {
67       /* When emitting 64-bit attributes, we need to write either 128 or 256
68        * bit chunks, using VFCOMP_NOSTORE when not writing the chunk, and
69        * VFCOMP_STORE_0 to pad the written chunk */
70       return VFCOMP_NOSTORE;
71    } else if (comp < 3 ||
72               isl_format_layouts[format].channels.r.type == ISL_RAW) {
73       /* Note we need to pad with value 0, not 1, due hardware restrictions
74        * (see comment above) */
75       return VFCOMP_STORE_0;
76    } else if (isl_format_layouts[format].channels.r.type == ISL_UINT ||
77             isl_format_layouts[format].channels.r.type == ISL_SINT) {
78       assert(comp == 3);
79       return VFCOMP_STORE_1_INT;
80    } else {
81       assert(comp == 3);
82       return VFCOMP_STORE_1_FP;
83    }
84 }
85
86 static void
87 emit_vertex_input(struct anv_pipeline *pipeline,
88                   const VkPipelineVertexInputStateCreateInfo *info)
89 {
90    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
91
92    /* Pull inputs_read out of the VS prog data */
93    const uint64_t inputs_read = vs_prog_data->inputs_read;
94    const uint64_t double_inputs_read = vs_prog_data->double_inputs_read;
95    assert((inputs_read & ((1 << VERT_ATTRIB_GENERIC0) - 1)) == 0);
96    const uint32_t elements = inputs_read >> VERT_ATTRIB_GENERIC0;
97    const uint32_t elements_double = double_inputs_read >> VERT_ATTRIB_GENERIC0;
98    const bool needs_svgs_elem = vs_prog_data->uses_vertexid ||
99                                 vs_prog_data->uses_instanceid ||
100                                 vs_prog_data->uses_firstvertex ||
101                                 vs_prog_data->uses_baseinstance;
102
103    uint32_t elem_count = __builtin_popcount(elements) -
104       __builtin_popcount(elements_double) / 2;
105
106    const uint32_t total_elems =
107       elem_count + needs_svgs_elem + vs_prog_data->uses_drawid;
108    if (total_elems == 0)
109       return;
110
111    uint32_t *p;
112
113    const uint32_t num_dwords = 1 + total_elems * 2;
114    p = anv_batch_emitn(&pipeline->batch, num_dwords,
115                        GENX(3DSTATE_VERTEX_ELEMENTS));
116    if (!p)
117       return;
118
119    for (uint32_t i = 0; i < total_elems; i++) {
120       /* The SKL docs for VERTEX_ELEMENT_STATE say:
121        *
122        *    "All elements must be valid from Element[0] to the last valid
123        *    element. (I.e. if Element[2] is valid then Element[1] and
124        *    Element[0] must also be valid)."
125        *
126        * The SKL docs for 3D_Vertex_Component_Control say:
127        *
128        *    "Don't store this component. (Not valid for Component 0, but can
129        *    be used for Component 1-3)."
130        *
131        * So we can't just leave a vertex element blank and hope for the best.
132        * We have to tell the VF hardware to put something in it; so we just
133        * store a bunch of zero.
134        *
135        * TODO: Compact vertex elements so we never end up with holes.
136        */
137       struct GENX(VERTEX_ELEMENT_STATE) element = {
138          .Valid = true,
139          .Component0Control = VFCOMP_STORE_0,
140          .Component1Control = VFCOMP_STORE_0,
141          .Component2Control = VFCOMP_STORE_0,
142          .Component3Control = VFCOMP_STORE_0,
143       };
144       GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + i * 2], &element);
145    }
146
147    for (uint32_t i = 0; i < info->vertexAttributeDescriptionCount; i++) {
148       const VkVertexInputAttributeDescription *desc =
149          &info->pVertexAttributeDescriptions[i];
150       enum isl_format format = anv_get_isl_format(&pipeline->device->info,
151                                                   desc->format,
152                                                   VK_IMAGE_ASPECT_COLOR_BIT,
153                                                   VK_IMAGE_TILING_LINEAR);
154
155       assert(desc->binding < MAX_VBS);
156
157       if ((elements & (1 << desc->location)) == 0)
158          continue; /* Binding unused */
159
160       uint32_t slot =
161          __builtin_popcount(elements & ((1 << desc->location) - 1)) -
162          DIV_ROUND_UP(__builtin_popcount(elements_double &
163                                         ((1 << desc->location) -1)), 2);
164
165       struct GENX(VERTEX_ELEMENT_STATE) element = {
166          .VertexBufferIndex = desc->binding,
167          .Valid = true,
168          .SourceElementFormat = format,
169          .EdgeFlagEnable = false,
170          .SourceElementOffset = desc->offset,
171          .Component0Control = vertex_element_comp_control(format, 0),
172          .Component1Control = vertex_element_comp_control(format, 1),
173          .Component2Control = vertex_element_comp_control(format, 2),
174          .Component3Control = vertex_element_comp_control(format, 3),
175       };
176       GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + slot * 2], &element);
177
178 #if GEN_GEN >= 8
179       /* On Broadwell and later, we have a separate VF_INSTANCING packet
180        * that controls instancing.  On Haswell and prior, that's part of
181        * VERTEX_BUFFER_STATE which we emit later.
182        */
183       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
184          vfi.InstancingEnable = pipeline->vb[desc->binding].instanced;
185          vfi.VertexElementIndex = slot;
186          vfi.InstanceDataStepRate =
187             pipeline->vb[desc->binding].instance_divisor;
188       }
189 #endif
190    }
191
192    const uint32_t id_slot = elem_count;
193    if (needs_svgs_elem) {
194       /* From the Broadwell PRM for the 3D_Vertex_Component_Control enum:
195        *    "Within a VERTEX_ELEMENT_STATE structure, if a Component
196        *    Control field is set to something other than VFCOMP_STORE_SRC,
197        *    no higher-numbered Component Control fields may be set to
198        *    VFCOMP_STORE_SRC"
199        *
200        * This means, that if we have BaseInstance, we need BaseVertex as
201        * well.  Just do all or nothing.
202        */
203       uint32_t base_ctrl = (vs_prog_data->uses_firstvertex ||
204                             vs_prog_data->uses_baseinstance) ?
205                            VFCOMP_STORE_SRC : VFCOMP_STORE_0;
206
207       struct GENX(VERTEX_ELEMENT_STATE) element = {
208          .VertexBufferIndex = ANV_SVGS_VB_INDEX,
209          .Valid = true,
210          .SourceElementFormat = ISL_FORMAT_R32G32_UINT,
211          .Component0Control = base_ctrl,
212          .Component1Control = base_ctrl,
213 #if GEN_GEN >= 8
214          .Component2Control = VFCOMP_STORE_0,
215          .Component3Control = VFCOMP_STORE_0,
216 #else
217          .Component2Control = VFCOMP_STORE_VID,
218          .Component3Control = VFCOMP_STORE_IID,
219 #endif
220       };
221       GENX(VERTEX_ELEMENT_STATE_pack)(NULL, &p[1 + id_slot * 2], &element);
222    }
223
224 #if GEN_GEN >= 8
225    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_SGVS), sgvs) {
226       sgvs.VertexIDEnable              = vs_prog_data->uses_vertexid;
227       sgvs.VertexIDComponentNumber     = 2;
228       sgvs.VertexIDElementOffset       = id_slot;
229       sgvs.InstanceIDEnable            = vs_prog_data->uses_instanceid;
230       sgvs.InstanceIDComponentNumber   = 3;
231       sgvs.InstanceIDElementOffset     = id_slot;
232    }
233 #endif
234
235    const uint32_t drawid_slot = elem_count + needs_svgs_elem;
236    if (vs_prog_data->uses_drawid) {
237       struct GENX(VERTEX_ELEMENT_STATE) element = {
238          .VertexBufferIndex = ANV_DRAWID_VB_INDEX,
239          .Valid = true,
240          .SourceElementFormat = ISL_FORMAT_R32_UINT,
241          .Component0Control = VFCOMP_STORE_SRC,
242          .Component1Control = VFCOMP_STORE_0,
243          .Component2Control = VFCOMP_STORE_0,
244          .Component3Control = VFCOMP_STORE_0,
245       };
246       GENX(VERTEX_ELEMENT_STATE_pack)(NULL,
247                                       &p[1 + drawid_slot * 2],
248                                       &element);
249
250 #if GEN_GEN >= 8
251       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_INSTANCING), vfi) {
252          vfi.VertexElementIndex = drawid_slot;
253       }
254 #endif
255    }
256 }
257
258 void
259 genX(emit_urb_setup)(struct anv_device *device, struct anv_batch *batch,
260                      const struct gen_l3_config *l3_config,
261                      VkShaderStageFlags active_stages,
262                      const unsigned entry_size[4])
263 {
264    const struct gen_device_info *devinfo = &device->info;
265 #if GEN_IS_HASWELL
266    const unsigned push_constant_kb = devinfo->gt == 3 ? 32 : 16;
267 #else
268    const unsigned push_constant_kb = GEN_GEN >= 8 ? 32 : 16;
269 #endif
270
271    const unsigned urb_size_kb = gen_get_l3_config_urb_size(devinfo, l3_config);
272
273    unsigned entries[4];
274    unsigned start[4];
275    gen_get_urb_config(devinfo,
276                       1024 * push_constant_kb, 1024 * urb_size_kb,
277                       active_stages &
278                          VK_SHADER_STAGE_TESSELLATION_EVALUATION_BIT,
279                       active_stages & VK_SHADER_STAGE_GEOMETRY_BIT,
280                       entry_size, entries, start);
281
282 #if GEN_GEN == 7 && !GEN_IS_HASWELL
283    /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
284     *
285     *    "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth stall
286     *    needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
287     *    3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
288     *    3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one PIPE_CONTROL
289     *    needs to be sent before any combination of VS associated 3DSTATE."
290     */
291    anv_batch_emit(batch, GEN7_PIPE_CONTROL, pc) {
292       pc.DepthStallEnable  = true;
293       pc.PostSyncOperation = WriteImmediateData;
294       pc.Address           = (struct anv_address) { &device->workaround_bo, 0 };
295    }
296 #endif
297
298    for (int i = 0; i <= MESA_SHADER_GEOMETRY; i++) {
299       anv_batch_emit(batch, GENX(3DSTATE_URB_VS), urb) {
300          urb._3DCommandSubOpcode      += i;
301          urb.VSURBStartingAddress      = start[i];
302          urb.VSURBEntryAllocationSize  = entry_size[i] - 1;
303          urb.VSNumberofURBEntries      = entries[i];
304       }
305    }
306 }
307
308 static void
309 emit_urb_setup(struct anv_pipeline *pipeline)
310 {
311    unsigned entry_size[4];
312    for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
313       const struct brw_vue_prog_data *prog_data =
314          !anv_pipeline_has_stage(pipeline, i) ? NULL :
315          (const struct brw_vue_prog_data *) pipeline->shaders[i]->prog_data;
316
317       entry_size[i] = prog_data ? prog_data->urb_entry_size : 1;
318    }
319
320    genX(emit_urb_setup)(pipeline->device, &pipeline->batch,
321                         pipeline->urb.l3_config,
322                         pipeline->active_stages, entry_size);
323 }
324
325 static void
326 emit_3dstate_sbe(struct anv_pipeline *pipeline)
327 {
328    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
329
330    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
331       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE), sbe);
332 #if GEN_GEN >= 8
333       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ), sbe);
334 #endif
335       return;
336    }
337
338    const struct brw_vue_map *fs_input_map =
339       &anv_pipeline_get_last_vue_prog_data(pipeline)->vue_map;
340
341    struct GENX(3DSTATE_SBE) sbe = {
342       GENX(3DSTATE_SBE_header),
343       .AttributeSwizzleEnable = true,
344       .PointSpriteTextureCoordinateOrigin = UPPERLEFT,
345       .NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs,
346       .ConstantInterpolationEnable = wm_prog_data->flat_inputs,
347    };
348
349 #if GEN_GEN >= 9
350    for (unsigned i = 0; i < 32; i++)
351       sbe.AttributeActiveComponentFormat[i] = ACF_XYZW;
352 #endif
353
354 #if GEN_GEN >= 8
355    /* On Broadwell, they broke 3DSTATE_SBE into two packets */
356    struct GENX(3DSTATE_SBE_SWIZ) swiz = {
357       GENX(3DSTATE_SBE_SWIZ_header),
358    };
359 #else
360 #  define swiz sbe
361 #endif
362
363    /* Skip the VUE header and position slots by default */
364    unsigned urb_entry_read_offset = 1;
365    int max_source_attr = 0;
366    for (int attr = 0; attr < VARYING_SLOT_MAX; attr++) {
367       int input_index = wm_prog_data->urb_setup[attr];
368
369       if (input_index < 0)
370          continue;
371
372       /* gl_Layer is stored in the VUE header */
373       if (attr == VARYING_SLOT_LAYER) {
374          urb_entry_read_offset = 0;
375          continue;
376       }
377
378       if (attr == VARYING_SLOT_PNTC) {
379          sbe.PointSpriteTextureCoordinateEnable = 1 << input_index;
380          continue;
381       }
382
383       const int slot = fs_input_map->varying_to_slot[attr];
384
385       if (input_index >= 16)
386          continue;
387
388       if (slot == -1) {
389          /* This attribute does not exist in the VUE--that means that the
390           * vertex shader did not write to it.  It could be that it's a
391           * regular varying read by the fragment shader but not written by
392           * the vertex shader or it's gl_PrimitiveID. In the first case the
393           * value is undefined, in the second it needs to be
394           * gl_PrimitiveID.
395           */
396          swiz.Attribute[input_index].ConstantSource = PRIM_ID;
397          swiz.Attribute[input_index].ComponentOverrideX = true;
398          swiz.Attribute[input_index].ComponentOverrideY = true;
399          swiz.Attribute[input_index].ComponentOverrideZ = true;
400          swiz.Attribute[input_index].ComponentOverrideW = true;
401       } else {
402          /* We have to subtract two slots to accout for the URB entry output
403           * read offset in the VS and GS stages.
404           */
405          const int source_attr = slot - 2 * urb_entry_read_offset;
406          assert(source_attr >= 0 && source_attr < 32);
407          max_source_attr = MAX2(max_source_attr, source_attr);
408          swiz.Attribute[input_index].SourceAttribute = source_attr;
409       }
410    }
411
412    sbe.VertexURBEntryReadOffset = urb_entry_read_offset;
413    sbe.VertexURBEntryReadLength = DIV_ROUND_UP(max_source_attr + 1, 2);
414 #if GEN_GEN >= 8
415    sbe.ForceVertexURBEntryReadOffset = true;
416    sbe.ForceVertexURBEntryReadLength = true;
417 #endif
418
419    uint32_t *dw = anv_batch_emit_dwords(&pipeline->batch,
420                                         GENX(3DSTATE_SBE_length));
421    if (!dw)
422       return;
423    GENX(3DSTATE_SBE_pack)(&pipeline->batch, dw, &sbe);
424
425 #if GEN_GEN >= 8
426    dw = anv_batch_emit_dwords(&pipeline->batch, GENX(3DSTATE_SBE_SWIZ_length));
427    if (!dw)
428       return;
429    GENX(3DSTATE_SBE_SWIZ_pack)(&pipeline->batch, dw, &swiz);
430 #endif
431 }
432
433 static const uint32_t vk_to_gen_cullmode[] = {
434    [VK_CULL_MODE_NONE]                       = CULLMODE_NONE,
435    [VK_CULL_MODE_FRONT_BIT]                  = CULLMODE_FRONT,
436    [VK_CULL_MODE_BACK_BIT]                   = CULLMODE_BACK,
437    [VK_CULL_MODE_FRONT_AND_BACK]             = CULLMODE_BOTH
438 };
439
440 static const uint32_t vk_to_gen_fillmode[] = {
441    [VK_POLYGON_MODE_FILL]                    = FILL_MODE_SOLID,
442    [VK_POLYGON_MODE_LINE]                    = FILL_MODE_WIREFRAME,
443    [VK_POLYGON_MODE_POINT]                   = FILL_MODE_POINT,
444 };
445
446 static const uint32_t vk_to_gen_front_face[] = {
447    [VK_FRONT_FACE_COUNTER_CLOCKWISE]         = 1,
448    [VK_FRONT_FACE_CLOCKWISE]                 = 0
449 };
450
451 static void
452 emit_rs_state(struct anv_pipeline *pipeline,
453               const VkPipelineRasterizationStateCreateInfo *rs_info,
454               const VkPipelineMultisampleStateCreateInfo *ms_info,
455               const struct anv_render_pass *pass,
456               const struct anv_subpass *subpass)
457 {
458    struct GENX(3DSTATE_SF) sf = {
459       GENX(3DSTATE_SF_header),
460    };
461
462    sf.ViewportTransformEnable = true;
463    sf.StatisticsEnable = true;
464    sf.TriangleStripListProvokingVertexSelect = 0;
465    sf.LineStripListProvokingVertexSelect = 0;
466    sf.TriangleFanProvokingVertexSelect = 1;
467
468    const struct brw_vue_prog_data *last_vue_prog_data =
469       anv_pipeline_get_last_vue_prog_data(pipeline);
470
471    if (last_vue_prog_data->vue_map.slots_valid & VARYING_BIT_PSIZ) {
472       sf.PointWidthSource = Vertex;
473    } else {
474       sf.PointWidthSource = State;
475       sf.PointWidth = 1.0;
476    }
477
478 #if GEN_GEN >= 8
479    struct GENX(3DSTATE_RASTER) raster = {
480       GENX(3DSTATE_RASTER_header),
481    };
482 #else
483 #  define raster sf
484 #endif
485
486    /* For details on 3DSTATE_RASTER multisample state, see the BSpec table
487     * "Multisample Modes State".
488     */
489 #if GEN_GEN >= 8
490    raster.DXMultisampleRasterizationEnable = true;
491    /* NOTE: 3DSTATE_RASTER::ForcedSampleCount affects the BDW and SKL PMA fix
492     * computations.  If we ever set this bit to a different value, they will
493     * need to be updated accordingly.
494     */
495    raster.ForcedSampleCount = FSC_NUMRASTSAMPLES_0;
496    raster.ForceMultisampling = false;
497 #else
498    raster.MultisampleRasterizationMode =
499       (ms_info && ms_info->rasterizationSamples > 1) ?
500       MSRASTMODE_ON_PATTERN : MSRASTMODE_OFF_PIXEL;
501 #endif
502
503    raster.FrontWinding = vk_to_gen_front_face[rs_info->frontFace];
504    raster.CullMode = vk_to_gen_cullmode[rs_info->cullMode];
505    raster.FrontFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
506    raster.BackFaceFillMode = vk_to_gen_fillmode[rs_info->polygonMode];
507    raster.ScissorRectangleEnable = true;
508
509 #if GEN_GEN >= 9
510    /* GEN9+ splits ViewportZClipTestEnable into near and far enable bits */
511    raster.ViewportZFarClipTestEnable = !pipeline->depth_clamp_enable;
512    raster.ViewportZNearClipTestEnable = !pipeline->depth_clamp_enable;
513 #elif GEN_GEN >= 8
514    raster.ViewportZClipTestEnable = !pipeline->depth_clamp_enable;
515 #endif
516
517    raster.GlobalDepthOffsetEnableSolid = rs_info->depthBiasEnable;
518    raster.GlobalDepthOffsetEnableWireframe = rs_info->depthBiasEnable;
519    raster.GlobalDepthOffsetEnablePoint = rs_info->depthBiasEnable;
520
521 #if GEN_GEN == 7
522    /* Gen7 requires that we provide the depth format in 3DSTATE_SF so that it
523     * can get the depth offsets correct.
524     */
525    if (subpass->depth_stencil_attachment) {
526       VkFormat vk_format =
527          pass->attachments[subpass->depth_stencil_attachment->attachment].format;
528       assert(vk_format_is_depth_or_stencil(vk_format));
529       if (vk_format_aspects(vk_format) & VK_IMAGE_ASPECT_DEPTH_BIT) {
530          enum isl_format isl_format =
531             anv_get_isl_format(&pipeline->device->info, vk_format,
532                                VK_IMAGE_ASPECT_DEPTH_BIT,
533                                VK_IMAGE_TILING_OPTIMAL);
534          sf.DepthBufferSurfaceFormat =
535             isl_format_get_depth_format(isl_format, false);
536       }
537    }
538 #endif
539
540 #if GEN_GEN >= 8
541    GENX(3DSTATE_SF_pack)(NULL, pipeline->gen8.sf, &sf);
542    GENX(3DSTATE_RASTER_pack)(NULL, pipeline->gen8.raster, &raster);
543 #else
544 #  undef raster
545    GENX(3DSTATE_SF_pack)(NULL, &pipeline->gen7.sf, &sf);
546 #endif
547 }
548
549 static void
550 emit_ms_state(struct anv_pipeline *pipeline,
551               const VkPipelineMultisampleStateCreateInfo *info)
552 {
553    uint32_t samples = 1;
554    uint32_t log2_samples = 0;
555
556    /* From the Vulkan 1.0 spec:
557     *    If pSampleMask is NULL, it is treated as if the mask has all bits
558     *    enabled, i.e. no coverage is removed from fragments.
559     *
560     * 3DSTATE_SAMPLE_MASK.SampleMask is 16 bits.
561     */
562 #if GEN_GEN >= 8
563    uint32_t sample_mask = 0xffff;
564 #else
565    uint32_t sample_mask = 0xff;
566 #endif
567
568    if (info) {
569       samples = info->rasterizationSamples;
570       log2_samples = __builtin_ffs(samples) - 1;
571    }
572
573    if (info && info->pSampleMask)
574       sample_mask &= info->pSampleMask[0];
575
576    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_MULTISAMPLE), ms) {
577       ms.NumberofMultisamples       = log2_samples;
578
579       ms.PixelLocation              = CENTER;
580 #if GEN_GEN >= 8
581       /* The PRM says that this bit is valid only for DX9:
582        *
583        *    SW can choose to set this bit only for DX9 API. DX10/OGL API's
584        *    should not have any effect by setting or not setting this bit.
585        */
586       ms.PixelPositionOffsetEnable  = false;
587 #else
588
589       switch (samples) {
590       case 1:
591          GEN_SAMPLE_POS_1X(ms.Sample);
592          break;
593       case 2:
594          GEN_SAMPLE_POS_2X(ms.Sample);
595          break;
596       case 4:
597          GEN_SAMPLE_POS_4X(ms.Sample);
598          break;
599       case 8:
600          GEN_SAMPLE_POS_8X(ms.Sample);
601          break;
602       default:
603          break;
604       }
605 #endif
606    }
607
608    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_SAMPLE_MASK), sm) {
609       sm.SampleMask = sample_mask;
610    }
611 }
612
613 static const uint32_t vk_to_gen_logic_op[] = {
614    [VK_LOGIC_OP_COPY]                        = LOGICOP_COPY,
615    [VK_LOGIC_OP_CLEAR]                       = LOGICOP_CLEAR,
616    [VK_LOGIC_OP_AND]                         = LOGICOP_AND,
617    [VK_LOGIC_OP_AND_REVERSE]                 = LOGICOP_AND_REVERSE,
618    [VK_LOGIC_OP_AND_INVERTED]                = LOGICOP_AND_INVERTED,
619    [VK_LOGIC_OP_NO_OP]                       = LOGICOP_NOOP,
620    [VK_LOGIC_OP_XOR]                         = LOGICOP_XOR,
621    [VK_LOGIC_OP_OR]                          = LOGICOP_OR,
622    [VK_LOGIC_OP_NOR]                         = LOGICOP_NOR,
623    [VK_LOGIC_OP_EQUIVALENT]                  = LOGICOP_EQUIV,
624    [VK_LOGIC_OP_INVERT]                      = LOGICOP_INVERT,
625    [VK_LOGIC_OP_OR_REVERSE]                  = LOGICOP_OR_REVERSE,
626    [VK_LOGIC_OP_COPY_INVERTED]               = LOGICOP_COPY_INVERTED,
627    [VK_LOGIC_OP_OR_INVERTED]                 = LOGICOP_OR_INVERTED,
628    [VK_LOGIC_OP_NAND]                        = LOGICOP_NAND,
629    [VK_LOGIC_OP_SET]                         = LOGICOP_SET,
630 };
631
632 static const uint32_t vk_to_gen_blend[] = {
633    [VK_BLEND_FACTOR_ZERO]                    = BLENDFACTOR_ZERO,
634    [VK_BLEND_FACTOR_ONE]                     = BLENDFACTOR_ONE,
635    [VK_BLEND_FACTOR_SRC_COLOR]               = BLENDFACTOR_SRC_COLOR,
636    [VK_BLEND_FACTOR_ONE_MINUS_SRC_COLOR]     = BLENDFACTOR_INV_SRC_COLOR,
637    [VK_BLEND_FACTOR_DST_COLOR]               = BLENDFACTOR_DST_COLOR,
638    [VK_BLEND_FACTOR_ONE_MINUS_DST_COLOR]     = BLENDFACTOR_INV_DST_COLOR,
639    [VK_BLEND_FACTOR_SRC_ALPHA]               = BLENDFACTOR_SRC_ALPHA,
640    [VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA]     = BLENDFACTOR_INV_SRC_ALPHA,
641    [VK_BLEND_FACTOR_DST_ALPHA]               = BLENDFACTOR_DST_ALPHA,
642    [VK_BLEND_FACTOR_ONE_MINUS_DST_ALPHA]     = BLENDFACTOR_INV_DST_ALPHA,
643    [VK_BLEND_FACTOR_CONSTANT_COLOR]          = BLENDFACTOR_CONST_COLOR,
644    [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_COLOR]= BLENDFACTOR_INV_CONST_COLOR,
645    [VK_BLEND_FACTOR_CONSTANT_ALPHA]          = BLENDFACTOR_CONST_ALPHA,
646    [VK_BLEND_FACTOR_ONE_MINUS_CONSTANT_ALPHA]= BLENDFACTOR_INV_CONST_ALPHA,
647    [VK_BLEND_FACTOR_SRC_ALPHA_SATURATE]      = BLENDFACTOR_SRC_ALPHA_SATURATE,
648    [VK_BLEND_FACTOR_SRC1_COLOR]              = BLENDFACTOR_SRC1_COLOR,
649    [VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR]    = BLENDFACTOR_INV_SRC1_COLOR,
650    [VK_BLEND_FACTOR_SRC1_ALPHA]              = BLENDFACTOR_SRC1_ALPHA,
651    [VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA]    = BLENDFACTOR_INV_SRC1_ALPHA,
652 };
653
654 static const uint32_t vk_to_gen_blend_op[] = {
655    [VK_BLEND_OP_ADD]                         = BLENDFUNCTION_ADD,
656    [VK_BLEND_OP_SUBTRACT]                    = BLENDFUNCTION_SUBTRACT,
657    [VK_BLEND_OP_REVERSE_SUBTRACT]            = BLENDFUNCTION_REVERSE_SUBTRACT,
658    [VK_BLEND_OP_MIN]                         = BLENDFUNCTION_MIN,
659    [VK_BLEND_OP_MAX]                         = BLENDFUNCTION_MAX,
660 };
661
662 static const uint32_t vk_to_gen_compare_op[] = {
663    [VK_COMPARE_OP_NEVER]                        = PREFILTEROPNEVER,
664    [VK_COMPARE_OP_LESS]                         = PREFILTEROPLESS,
665    [VK_COMPARE_OP_EQUAL]                        = PREFILTEROPEQUAL,
666    [VK_COMPARE_OP_LESS_OR_EQUAL]                = PREFILTEROPLEQUAL,
667    [VK_COMPARE_OP_GREATER]                      = PREFILTEROPGREATER,
668    [VK_COMPARE_OP_NOT_EQUAL]                    = PREFILTEROPNOTEQUAL,
669    [VK_COMPARE_OP_GREATER_OR_EQUAL]             = PREFILTEROPGEQUAL,
670    [VK_COMPARE_OP_ALWAYS]                       = PREFILTEROPALWAYS,
671 };
672
673 static const uint32_t vk_to_gen_stencil_op[] = {
674    [VK_STENCIL_OP_KEEP]                         = STENCILOP_KEEP,
675    [VK_STENCIL_OP_ZERO]                         = STENCILOP_ZERO,
676    [VK_STENCIL_OP_REPLACE]                      = STENCILOP_REPLACE,
677    [VK_STENCIL_OP_INCREMENT_AND_CLAMP]          = STENCILOP_INCRSAT,
678    [VK_STENCIL_OP_DECREMENT_AND_CLAMP]          = STENCILOP_DECRSAT,
679    [VK_STENCIL_OP_INVERT]                       = STENCILOP_INVERT,
680    [VK_STENCIL_OP_INCREMENT_AND_WRAP]           = STENCILOP_INCR,
681    [VK_STENCIL_OP_DECREMENT_AND_WRAP]           = STENCILOP_DECR,
682 };
683
684 /* This function sanitizes the VkStencilOpState by looking at the compare ops
685  * and trying to determine whether or not a given stencil op can ever actually
686  * occur.  Stencil ops which can never occur are set to VK_STENCIL_OP_KEEP.
687  * This function returns true if, after sanitation, any of the stencil ops are
688  * set to something other than VK_STENCIL_OP_KEEP.
689  */
690 static bool
691 sanitize_stencil_face(VkStencilOpState *face,
692                       VkCompareOp depthCompareOp)
693 {
694    /* If compareOp is ALWAYS then the stencil test will never fail and failOp
695     * will never happen.  Set failOp to KEEP in this case.
696     */
697    if (face->compareOp == VK_COMPARE_OP_ALWAYS)
698       face->failOp = VK_STENCIL_OP_KEEP;
699
700    /* If compareOp is NEVER or depthCompareOp is NEVER then one of the depth
701     * or stencil tests will fail and passOp will never happen.
702     */
703    if (face->compareOp == VK_COMPARE_OP_NEVER ||
704        depthCompareOp == VK_COMPARE_OP_NEVER)
705       face->passOp = VK_STENCIL_OP_KEEP;
706
707    /* If compareOp is NEVER or depthCompareOp is ALWAYS then either the
708     * stencil test will fail or the depth test will pass.  In either case,
709     * depthFailOp will never happen.
710     */
711    if (face->compareOp == VK_COMPARE_OP_NEVER ||
712        depthCompareOp == VK_COMPARE_OP_ALWAYS)
713       face->depthFailOp = VK_STENCIL_OP_KEEP;
714
715    return face->failOp != VK_STENCIL_OP_KEEP ||
716           face->depthFailOp != VK_STENCIL_OP_KEEP ||
717           face->passOp != VK_STENCIL_OP_KEEP;
718 }
719
720 /* Intel hardware is fairly sensitive to whether or not depth/stencil writes
721  * are enabled.  In the presence of discards, it's fairly easy to get into the
722  * non-promoted case which means a fairly big performance hit.  From the Iron
723  * Lake PRM, Vol 2, pt. 1, section 8.4.3.2, "Early Depth Test Cases":
724  *
725  *    "Non-promoted depth (N) is active whenever the depth test can be done
726  *    early but it cannot determine whether or not to write source depth to
727  *    the depth buffer, therefore the depth write must be performed post pixel
728  *    shader. This includes cases where the pixel shader can kill pixels,
729  *    including via sampler chroma key, as well as cases where the alpha test
730  *    function is enabled, which kills pixels based on a programmable alpha
731  *    test. In this case, even if the depth test fails, the pixel cannot be
732  *    killed if a stencil write is indicated. Whether or not the stencil write
733  *    happens depends on whether or not the pixel is killed later. In these
734  *    cases if stencil test fails and stencil writes are off, the pixels can
735  *    also be killed early. If stencil writes are enabled, the pixels must be
736  *    treated as Computed depth (described above)."
737  *
738  * The same thing as mentioned in the stencil case can happen in the depth
739  * case as well if it thinks it writes depth but, thanks to the depth test
740  * being GL_EQUAL, the write doesn't actually matter.  A little extra work
741  * up-front to try and disable depth and stencil writes can make a big
742  * difference.
743  *
744  * Unfortunately, the way depth and stencil testing is specified, there are
745  * many case where, regardless of depth/stencil writes being enabled, nothing
746  * actually gets written due to some other bit of state being set.  This
747  * function attempts to "sanitize" the depth stencil state and disable writes
748  * and sometimes even testing whenever possible.
749  */
750 static void
751 sanitize_ds_state(VkPipelineDepthStencilStateCreateInfo *state,
752                   bool *stencilWriteEnable,
753                   VkImageAspectFlags ds_aspects)
754 {
755    *stencilWriteEnable = state->stencilTestEnable;
756
757    /* If the depth test is disabled, we won't be writing anything. */
758    if (!state->depthTestEnable)
759       state->depthWriteEnable = false;
760
761    /* The Vulkan spec requires that if either depth or stencil is not present,
762     * the pipeline is to act as if the test silently passes.
763     */
764    if (!(ds_aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
765       state->depthWriteEnable = false;
766       state->depthCompareOp = VK_COMPARE_OP_ALWAYS;
767    }
768
769    if (!(ds_aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
770       *stencilWriteEnable = false;
771       state->front.compareOp = VK_COMPARE_OP_ALWAYS;
772       state->back.compareOp = VK_COMPARE_OP_ALWAYS;
773    }
774
775    /* If the stencil test is enabled and always fails, then we will never get
776     * to the depth test so we can just disable the depth test entirely.
777     */
778    if (state->stencilTestEnable &&
779        state->front.compareOp == VK_COMPARE_OP_NEVER &&
780        state->back.compareOp == VK_COMPARE_OP_NEVER) {
781       state->depthTestEnable = false;
782       state->depthWriteEnable = false;
783    }
784
785    /* If depthCompareOp is EQUAL then the value we would be writing to the
786     * depth buffer is the same as the value that's already there so there's no
787     * point in writing it.
788     */
789    if (state->depthCompareOp == VK_COMPARE_OP_EQUAL)
790       state->depthWriteEnable = false;
791
792    /* If the stencil ops are such that we don't actually ever modify the
793     * stencil buffer, we should disable writes.
794     */
795    if (!sanitize_stencil_face(&state->front, state->depthCompareOp) &&
796        !sanitize_stencil_face(&state->back, state->depthCompareOp))
797       *stencilWriteEnable = false;
798
799    /* If the depth test always passes and we never write out depth, that's the
800     * same as if the depth test is disabled entirely.
801     */
802    if (state->depthCompareOp == VK_COMPARE_OP_ALWAYS &&
803        !state->depthWriteEnable)
804       state->depthTestEnable = false;
805
806    /* If the stencil test always passes and we never write out stencil, that's
807     * the same as if the stencil test is disabled entirely.
808     */
809    if (state->front.compareOp == VK_COMPARE_OP_ALWAYS &&
810        state->back.compareOp == VK_COMPARE_OP_ALWAYS &&
811        !*stencilWriteEnable)
812       state->stencilTestEnable = false;
813 }
814
815 static void
816 emit_ds_state(struct anv_pipeline *pipeline,
817               const VkPipelineDepthStencilStateCreateInfo *pCreateInfo,
818               const struct anv_render_pass *pass,
819               const struct anv_subpass *subpass)
820 {
821 #if GEN_GEN == 7
822 #  define depth_stencil_dw pipeline->gen7.depth_stencil_state
823 #elif GEN_GEN == 8
824 #  define depth_stencil_dw pipeline->gen8.wm_depth_stencil
825 #else
826 #  define depth_stencil_dw pipeline->gen9.wm_depth_stencil
827 #endif
828
829    if (pCreateInfo == NULL) {
830       /* We're going to OR this together with the dynamic state.  We need
831        * to make sure it's initialized to something useful.
832        */
833       pipeline->writes_stencil = false;
834       pipeline->stencil_test_enable = false;
835       pipeline->writes_depth = false;
836       pipeline->depth_test_enable = false;
837       memset(depth_stencil_dw, 0, sizeof(depth_stencil_dw));
838       return;
839    }
840
841    VkImageAspectFlags ds_aspects = 0;
842    if (subpass->depth_stencil_attachment) {
843       VkFormat depth_stencil_format =
844          pass->attachments[subpass->depth_stencil_attachment->attachment].format;
845       ds_aspects = vk_format_aspects(depth_stencil_format);
846    }
847
848    VkPipelineDepthStencilStateCreateInfo info = *pCreateInfo;
849    sanitize_ds_state(&info, &pipeline->writes_stencil, ds_aspects);
850    pipeline->stencil_test_enable = info.stencilTestEnable;
851    pipeline->writes_depth = info.depthWriteEnable;
852    pipeline->depth_test_enable = info.depthTestEnable;
853
854    /* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */
855
856 #if GEN_GEN <= 7
857    struct GENX(DEPTH_STENCIL_STATE) depth_stencil = {
858 #else
859    struct GENX(3DSTATE_WM_DEPTH_STENCIL) depth_stencil = {
860 #endif
861       .DepthTestEnable = info.depthTestEnable,
862       .DepthBufferWriteEnable = info.depthWriteEnable,
863       .DepthTestFunction = vk_to_gen_compare_op[info.depthCompareOp],
864       .DoubleSidedStencilEnable = true,
865
866       .StencilTestEnable = info.stencilTestEnable,
867       .StencilFailOp = vk_to_gen_stencil_op[info.front.failOp],
868       .StencilPassDepthPassOp = vk_to_gen_stencil_op[info.front.passOp],
869       .StencilPassDepthFailOp = vk_to_gen_stencil_op[info.front.depthFailOp],
870       .StencilTestFunction = vk_to_gen_compare_op[info.front.compareOp],
871       .BackfaceStencilFailOp = vk_to_gen_stencil_op[info.back.failOp],
872       .BackfaceStencilPassDepthPassOp = vk_to_gen_stencil_op[info.back.passOp],
873       .BackfaceStencilPassDepthFailOp =vk_to_gen_stencil_op[info.back.depthFailOp],
874       .BackfaceStencilTestFunction = vk_to_gen_compare_op[info.back.compareOp],
875    };
876
877 #if GEN_GEN <= 7
878    GENX(DEPTH_STENCIL_STATE_pack)(NULL, depth_stencil_dw, &depth_stencil);
879 #else
880    GENX(3DSTATE_WM_DEPTH_STENCIL_pack)(NULL, depth_stencil_dw, &depth_stencil);
881 #endif
882 }
883
884 static void
885 emit_cb_state(struct anv_pipeline *pipeline,
886               const VkPipelineColorBlendStateCreateInfo *info,
887               const VkPipelineMultisampleStateCreateInfo *ms_info)
888 {
889    struct anv_device *device = pipeline->device;
890
891
892    struct GENX(BLEND_STATE) blend_state = {
893 #if GEN_GEN >= 8
894       .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
895       .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
896 #endif
897    };
898
899    uint32_t surface_count = 0;
900    struct anv_pipeline_bind_map *map;
901    if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
902       map = &pipeline->shaders[MESA_SHADER_FRAGMENT]->bind_map;
903       surface_count = map->surface_count;
904    }
905
906    const uint32_t num_dwords = GENX(BLEND_STATE_length) +
907       GENX(BLEND_STATE_ENTRY_length) * surface_count;
908    pipeline->blend_state =
909       anv_state_pool_alloc(&device->dynamic_state_pool, num_dwords * 4, 64);
910
911    bool has_writeable_rt = false;
912    uint32_t *state_pos = pipeline->blend_state.map;
913    state_pos += GENX(BLEND_STATE_length);
914 #if GEN_GEN >= 8
915    struct GENX(BLEND_STATE_ENTRY) bs0 = { 0 };
916 #endif
917    for (unsigned i = 0; i < surface_count; i++) {
918       struct anv_pipeline_binding *binding = &map->surface_to_descriptor[i];
919
920       /* All color attachments are at the beginning of the binding table */
921       if (binding->set != ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS)
922          break;
923
924       /* We can have at most 8 attachments */
925       assert(i < 8);
926
927       if (info == NULL || binding->index >= info->attachmentCount) {
928          /* Default everything to disabled */
929          struct GENX(BLEND_STATE_ENTRY) entry = {
930             .WriteDisableAlpha = true,
931             .WriteDisableRed = true,
932             .WriteDisableGreen = true,
933             .WriteDisableBlue = true,
934          };
935          GENX(BLEND_STATE_ENTRY_pack)(NULL, state_pos, &entry);
936          state_pos += GENX(BLEND_STATE_ENTRY_length);
937          continue;
938       }
939
940       assert(binding->binding == 0);
941       const VkPipelineColorBlendAttachmentState *a =
942          &info->pAttachments[binding->index];
943
944       struct GENX(BLEND_STATE_ENTRY) entry = {
945 #if GEN_GEN < 8
946          .AlphaToCoverageEnable = ms_info && ms_info->alphaToCoverageEnable,
947          .AlphaToOneEnable = ms_info && ms_info->alphaToOneEnable,
948 #endif
949          .LogicOpEnable = info->logicOpEnable,
950          .LogicOpFunction = vk_to_gen_logic_op[info->logicOp],
951          .ColorBufferBlendEnable = a->blendEnable,
952          .ColorClampRange = COLORCLAMP_RTFORMAT,
953          .PreBlendColorClampEnable = true,
954          .PostBlendColorClampEnable = true,
955          .SourceBlendFactor = vk_to_gen_blend[a->srcColorBlendFactor],
956          .DestinationBlendFactor = vk_to_gen_blend[a->dstColorBlendFactor],
957          .ColorBlendFunction = vk_to_gen_blend_op[a->colorBlendOp],
958          .SourceAlphaBlendFactor = vk_to_gen_blend[a->srcAlphaBlendFactor],
959          .DestinationAlphaBlendFactor = vk_to_gen_blend[a->dstAlphaBlendFactor],
960          .AlphaBlendFunction = vk_to_gen_blend_op[a->alphaBlendOp],
961          .WriteDisableAlpha = !(a->colorWriteMask & VK_COLOR_COMPONENT_A_BIT),
962          .WriteDisableRed = !(a->colorWriteMask & VK_COLOR_COMPONENT_R_BIT),
963          .WriteDisableGreen = !(a->colorWriteMask & VK_COLOR_COMPONENT_G_BIT),
964          .WriteDisableBlue = !(a->colorWriteMask & VK_COLOR_COMPONENT_B_BIT),
965       };
966
967       if (a->srcColorBlendFactor != a->srcAlphaBlendFactor ||
968           a->dstColorBlendFactor != a->dstAlphaBlendFactor ||
969           a->colorBlendOp != a->alphaBlendOp) {
970 #if GEN_GEN >= 8
971          blend_state.IndependentAlphaBlendEnable = true;
972 #else
973          entry.IndependentAlphaBlendEnable = true;
974 #endif
975       }
976
977       if (a->colorWriteMask != 0)
978          has_writeable_rt = true;
979
980       /* Our hardware applies the blend factor prior to the blend function
981        * regardless of what function is used.  Technically, this means the
982        * hardware can do MORE than GL or Vulkan specify.  However, it also
983        * means that, for MIN and MAX, we have to stomp the blend factor to
984        * ONE to make it a no-op.
985        */
986       if (a->colorBlendOp == VK_BLEND_OP_MIN ||
987           a->colorBlendOp == VK_BLEND_OP_MAX) {
988          entry.SourceBlendFactor = BLENDFACTOR_ONE;
989          entry.DestinationBlendFactor = BLENDFACTOR_ONE;
990       }
991       if (a->alphaBlendOp == VK_BLEND_OP_MIN ||
992           a->alphaBlendOp == VK_BLEND_OP_MAX) {
993          entry.SourceAlphaBlendFactor = BLENDFACTOR_ONE;
994          entry.DestinationAlphaBlendFactor = BLENDFACTOR_ONE;
995       }
996       GENX(BLEND_STATE_ENTRY_pack)(NULL, state_pos, &entry);
997       state_pos += GENX(BLEND_STATE_ENTRY_length);
998 #if GEN_GEN >= 8
999       if (i == 0)
1000          bs0 = entry;
1001 #endif
1002    }
1003
1004 #if GEN_GEN >= 8
1005    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_BLEND), blend) {
1006       blend.AlphaToCoverageEnable         = blend_state.AlphaToCoverageEnable;
1007       blend.HasWriteableRT                = has_writeable_rt;
1008       blend.ColorBufferBlendEnable        = bs0.ColorBufferBlendEnable;
1009       blend.SourceAlphaBlendFactor        = bs0.SourceAlphaBlendFactor;
1010       blend.DestinationAlphaBlendFactor   = bs0.DestinationAlphaBlendFactor;
1011       blend.SourceBlendFactor             = bs0.SourceBlendFactor;
1012       blend.DestinationBlendFactor        = bs0.DestinationBlendFactor;
1013       blend.AlphaTestEnable               = false;
1014       blend.IndependentAlphaBlendEnable   =
1015          blend_state.IndependentAlphaBlendEnable;
1016    }
1017 #else
1018    (void)has_writeable_rt;
1019 #endif
1020
1021    GENX(BLEND_STATE_pack)(NULL, pipeline->blend_state.map, &blend_state);
1022    anv_state_flush(device, pipeline->blend_state);
1023
1024    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_BLEND_STATE_POINTERS), bsp) {
1025       bsp.BlendStatePointer      = pipeline->blend_state.offset;
1026 #if GEN_GEN >= 8
1027       bsp.BlendStatePointerValid = true;
1028 #endif
1029    }
1030 }
1031
1032 static void
1033 emit_3dstate_clip(struct anv_pipeline *pipeline,
1034                   const VkPipelineViewportStateCreateInfo *vp_info,
1035                   const VkPipelineRasterizationStateCreateInfo *rs_info)
1036 {
1037    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1038    (void) wm_prog_data;
1039    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_CLIP), clip) {
1040       clip.ClipEnable               = true;
1041       clip.StatisticsEnable         = true;
1042       clip.EarlyCullEnable          = true;
1043       clip.APIMode                  = APIMODE_D3D,
1044       clip.ViewportXYClipTestEnable = true;
1045
1046       clip.ClipMode = CLIPMODE_NORMAL;
1047
1048       clip.TriangleStripListProvokingVertexSelect = 0;
1049       clip.LineStripListProvokingVertexSelect     = 0;
1050       clip.TriangleFanProvokingVertexSelect       = 1;
1051
1052       clip.MinimumPointWidth = 0.125;
1053       clip.MaximumPointWidth = 255.875;
1054
1055       const struct brw_vue_prog_data *last =
1056          anv_pipeline_get_last_vue_prog_data(pipeline);
1057
1058       /* From the Vulkan 1.0.45 spec:
1059        *
1060        *    "If the last active vertex processing stage shader entry point's
1061        *    interface does not include a variable decorated with
1062        *    ViewportIndex, then the first viewport is used."
1063        */
1064       if (vp_info && (last->vue_map.slots_valid & VARYING_BIT_VIEWPORT)) {
1065          clip.MaximumVPIndex = vp_info->viewportCount - 1;
1066       } else {
1067          clip.MaximumVPIndex = 0;
1068       }
1069
1070       /* From the Vulkan 1.0.45 spec:
1071        *
1072        *    "If the last active vertex processing stage shader entry point's
1073        *    interface does not include a variable decorated with Layer, then
1074        *    the first layer is used."
1075        */
1076       clip.ForceZeroRTAIndexEnable =
1077          !(last->vue_map.slots_valid & VARYING_BIT_LAYER);
1078
1079 #if GEN_GEN == 7
1080       clip.FrontWinding            = vk_to_gen_front_face[rs_info->frontFace];
1081       clip.CullMode                = vk_to_gen_cullmode[rs_info->cullMode];
1082       clip.ViewportZClipTestEnable = !pipeline->depth_clamp_enable;
1083       if (last) {
1084          clip.UserClipDistanceClipTestEnableBitmask = last->clip_distance_mask;
1085          clip.UserClipDistanceCullTestEnableBitmask = last->cull_distance_mask;
1086       }
1087 #else
1088       clip.NonPerspectiveBarycentricEnable = wm_prog_data ?
1089          (wm_prog_data->barycentric_interp_modes &
1090           BRW_BARYCENTRIC_NONPERSPECTIVE_BITS) != 0 : 0;
1091 #endif
1092    }
1093 }
1094
1095 static void
1096 emit_3dstate_streamout(struct anv_pipeline *pipeline,
1097                        const VkPipelineRasterizationStateCreateInfo *rs_info)
1098 {
1099    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_STREAMOUT), so) {
1100       so.RenderingDisable = rs_info->rasterizerDiscardEnable;
1101    }
1102 }
1103
1104 static uint32_t
1105 get_sampler_count(const struct anv_shader_bin *bin)
1106 {
1107    uint32_t count_by_4 = DIV_ROUND_UP(bin->bind_map.sampler_count, 4);
1108
1109    /* We can potentially have way more than 32 samplers and that's ok.
1110     * However, the 3DSTATE_XS packets only have 3 bits to specify how
1111     * many to pre-fetch and all values above 4 are marked reserved.
1112     */
1113    return MIN2(count_by_4, 4);
1114 }
1115
1116 static uint32_t
1117 get_binding_table_entry_count(const struct anv_shader_bin *bin)
1118 {
1119    return DIV_ROUND_UP(bin->bind_map.surface_count, 32);
1120 }
1121
1122 static struct anv_address
1123 get_scratch_address(struct anv_pipeline *pipeline,
1124                     gl_shader_stage stage,
1125                     const struct anv_shader_bin *bin)
1126 {
1127    return (struct anv_address) {
1128       .bo = anv_scratch_pool_alloc(pipeline->device,
1129                                    &pipeline->device->scratch_pool,
1130                                    stage, bin->prog_data->total_scratch),
1131       .offset = 0,
1132    };
1133 }
1134
1135 static uint32_t
1136 get_scratch_space(const struct anv_shader_bin *bin)
1137 {
1138    return ffs(bin->prog_data->total_scratch / 2048);
1139 }
1140
1141 static void
1142 emit_3dstate_vs(struct anv_pipeline *pipeline)
1143 {
1144    const struct gen_device_info *devinfo = &pipeline->device->info;
1145    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
1146    const struct anv_shader_bin *vs_bin =
1147       pipeline->shaders[MESA_SHADER_VERTEX];
1148
1149    assert(anv_pipeline_has_stage(pipeline, MESA_SHADER_VERTEX));
1150
1151    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VS), vs) {
1152       vs.Enable               = true;
1153       vs.StatisticsEnable     = true;
1154       vs.KernelStartPointer   = vs_bin->kernel.offset;
1155 #if GEN_GEN >= 8
1156       vs.SIMD8DispatchEnable  =
1157          vs_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8;
1158 #endif
1159
1160       assert(!vs_prog_data->base.base.use_alt_mode);
1161 #if GEN_GEN < 11
1162       vs.SingleVertexDispatch       = false;
1163 #endif
1164       vs.VectorMaskEnable           = false;
1165       vs.SamplerCount               = get_sampler_count(vs_bin);
1166      /* Gen 11 workarounds table #2056 WABTPPrefetchDisable suggests to
1167       * disable prefetching of binding tables on A0 and B0 steppings.
1168       * TODO: Revisit this WA on newer steppings.
1169       */
1170       vs.BindingTableEntryCount     = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(vs_bin);
1171       vs.FloatingPointMode          = IEEE754;
1172       vs.IllegalOpcodeExceptionEnable = false;
1173       vs.SoftwareExceptionEnable    = false;
1174       vs.MaximumNumberofThreads     = devinfo->max_vs_threads - 1;
1175       vs.VertexCacheDisable         = false;
1176
1177       vs.VertexURBEntryReadLength      = vs_prog_data->base.urb_read_length;
1178       vs.VertexURBEntryReadOffset      = 0;
1179       vs.DispatchGRFStartRegisterForURBData =
1180          vs_prog_data->base.base.dispatch_grf_start_reg;
1181
1182 #if GEN_GEN >= 8
1183       vs.UserClipDistanceClipTestEnableBitmask =
1184          vs_prog_data->base.clip_distance_mask;
1185       vs.UserClipDistanceCullTestEnableBitmask =
1186          vs_prog_data->base.cull_distance_mask;
1187 #endif
1188
1189       vs.PerThreadScratchSpace   = get_scratch_space(vs_bin);
1190       vs.ScratchSpaceBasePointer =
1191          get_scratch_address(pipeline, MESA_SHADER_VERTEX, vs_bin);
1192    }
1193 }
1194
1195 static void
1196 emit_3dstate_hs_te_ds(struct anv_pipeline *pipeline,
1197                       const VkPipelineTessellationStateCreateInfo *tess_info)
1198 {
1199    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_TESS_EVAL)) {
1200       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs);
1201       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te);
1202       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds);
1203       return;
1204    }
1205
1206    const struct gen_device_info *devinfo = &pipeline->device->info;
1207    const struct anv_shader_bin *tcs_bin =
1208       pipeline->shaders[MESA_SHADER_TESS_CTRL];
1209    const struct anv_shader_bin *tes_bin =
1210       pipeline->shaders[MESA_SHADER_TESS_EVAL];
1211
1212    const struct brw_tcs_prog_data *tcs_prog_data = get_tcs_prog_data(pipeline);
1213    const struct brw_tes_prog_data *tes_prog_data = get_tes_prog_data(pipeline);
1214
1215    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_HS), hs) {
1216       hs.Enable = true;
1217       hs.StatisticsEnable = true;
1218       hs.KernelStartPointer = tcs_bin->kernel.offset;
1219
1220       hs.SamplerCount = get_sampler_count(tcs_bin);
1221       /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */
1222       hs.BindingTableEntryCount = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(tcs_bin);
1223       hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
1224       hs.IncludeVertexHandles = true;
1225       hs.InstanceCount = tcs_prog_data->instances - 1;
1226
1227       hs.VertexURBEntryReadLength = 0;
1228       hs.VertexURBEntryReadOffset = 0;
1229       hs.DispatchGRFStartRegisterForURBData =
1230          tcs_prog_data->base.base.dispatch_grf_start_reg;
1231
1232       hs.PerThreadScratchSpace = get_scratch_space(tcs_bin);
1233       hs.ScratchSpaceBasePointer =
1234          get_scratch_address(pipeline, MESA_SHADER_TESS_CTRL, tcs_bin);
1235    }
1236
1237    const VkPipelineTessellationDomainOriginStateCreateInfoKHR *domain_origin_state =
1238       tess_info ? vk_find_struct_const(tess_info, PIPELINE_TESSELLATION_DOMAIN_ORIGIN_STATE_CREATE_INFO_KHR) : NULL;
1239
1240    VkTessellationDomainOriginKHR uv_origin =
1241       domain_origin_state ? domain_origin_state->domainOrigin :
1242                             VK_TESSELLATION_DOMAIN_ORIGIN_UPPER_LEFT_KHR;
1243
1244    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_TE), te) {
1245       te.Partitioning = tes_prog_data->partitioning;
1246
1247       if (uv_origin == VK_TESSELLATION_DOMAIN_ORIGIN_LOWER_LEFT_KHR) {
1248          te.OutputTopology = tes_prog_data->output_topology;
1249       } else {
1250          /* When the origin is upper-left, we have to flip the winding order */
1251          if (tes_prog_data->output_topology == OUTPUT_TRI_CCW) {
1252             te.OutputTopology = OUTPUT_TRI_CW;
1253          } else if (tes_prog_data->output_topology == OUTPUT_TRI_CW) {
1254             te.OutputTopology = OUTPUT_TRI_CCW;
1255          } else {
1256             te.OutputTopology = tes_prog_data->output_topology;
1257          }
1258       }
1259
1260       te.TEDomain = tes_prog_data->domain;
1261       te.TEEnable = true;
1262       te.MaximumTessellationFactorOdd = 63.0;
1263       te.MaximumTessellationFactorNotOdd = 64.0;
1264    }
1265
1266    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_DS), ds) {
1267       ds.Enable = true;
1268       ds.StatisticsEnable = true;
1269       ds.KernelStartPointer = tes_bin->kernel.offset;
1270
1271       ds.SamplerCount = get_sampler_count(tes_bin);
1272       /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */
1273       ds.BindingTableEntryCount = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(tes_bin);
1274       ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
1275
1276       ds.ComputeWCoordinateEnable =
1277          tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
1278
1279       ds.PatchURBEntryReadLength = tes_prog_data->base.urb_read_length;
1280       ds.PatchURBEntryReadOffset = 0;
1281       ds.DispatchGRFStartRegisterForURBData =
1282          tes_prog_data->base.base.dispatch_grf_start_reg;
1283
1284 #if GEN_GEN >= 8
1285 #if GEN_GEN < 11
1286       ds.DispatchMode =
1287          tes_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8 ?
1288             DISPATCH_MODE_SIMD8_SINGLE_PATCH :
1289             DISPATCH_MODE_SIMD4X2;
1290 #else
1291       assert(tes_prog_data->base.dispatch_mode == DISPATCH_MODE_SIMD8);
1292       ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
1293 #endif
1294
1295       ds.UserClipDistanceClipTestEnableBitmask =
1296          tes_prog_data->base.clip_distance_mask;
1297       ds.UserClipDistanceCullTestEnableBitmask =
1298          tes_prog_data->base.cull_distance_mask;
1299 #endif
1300
1301       ds.PerThreadScratchSpace = get_scratch_space(tes_bin);
1302       ds.ScratchSpaceBasePointer =
1303          get_scratch_address(pipeline, MESA_SHADER_TESS_EVAL, tes_bin);
1304    }
1305 }
1306
1307 static void
1308 emit_3dstate_gs(struct anv_pipeline *pipeline)
1309 {
1310    const struct gen_device_info *devinfo = &pipeline->device->info;
1311    const struct anv_shader_bin *gs_bin =
1312       pipeline->shaders[MESA_SHADER_GEOMETRY];
1313
1314    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_GEOMETRY)) {
1315       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs);
1316       return;
1317    }
1318
1319    const struct brw_gs_prog_data *gs_prog_data = get_gs_prog_data(pipeline);
1320
1321    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_GS), gs) {
1322       gs.Enable                  = true;
1323       gs.StatisticsEnable        = true;
1324       gs.KernelStartPointer      = gs_bin->kernel.offset;
1325       gs.DispatchMode            = gs_prog_data->base.dispatch_mode;
1326
1327       gs.SingleProgramFlow       = false;
1328       gs.VectorMaskEnable        = false;
1329       gs.SamplerCount            = get_sampler_count(gs_bin);
1330       /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */
1331       gs.BindingTableEntryCount  = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(gs_bin);
1332       gs.IncludeVertexHandles    = gs_prog_data->base.include_vue_handles;
1333       gs.IncludePrimitiveID      = gs_prog_data->include_primitive_id;
1334
1335       if (GEN_GEN == 8) {
1336          /* Broadwell is weird.  It needs us to divide by 2. */
1337          gs.MaximumNumberofThreads = devinfo->max_gs_threads / 2 - 1;
1338       } else {
1339          gs.MaximumNumberofThreads = devinfo->max_gs_threads - 1;
1340       }
1341
1342       gs.OutputVertexSize        = gs_prog_data->output_vertex_size_hwords * 2 - 1;
1343       gs.OutputTopology          = gs_prog_data->output_topology;
1344       gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length;
1345       gs.ControlDataFormat       = gs_prog_data->control_data_format;
1346       gs.ControlDataHeaderSize   = gs_prog_data->control_data_header_size_hwords;
1347       gs.InstanceControl         = MAX2(gs_prog_data->invocations, 1) - 1;
1348       gs.ReorderMode             = TRAILING;
1349
1350 #if GEN_GEN >= 8
1351       gs.ExpectedVertexCount     = gs_prog_data->vertices_in;
1352       gs.StaticOutput            = gs_prog_data->static_vertex_count >= 0;
1353       gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count >= 0 ?
1354                                    gs_prog_data->static_vertex_count : 0;
1355 #endif
1356
1357       gs.VertexURBEntryReadOffset = 0;
1358       gs.VertexURBEntryReadLength = gs_prog_data->base.urb_read_length;
1359       gs.DispatchGRFStartRegisterForURBData =
1360          gs_prog_data->base.base.dispatch_grf_start_reg;
1361
1362 #if GEN_GEN >= 8
1363       gs.UserClipDistanceClipTestEnableBitmask =
1364          gs_prog_data->base.clip_distance_mask;
1365       gs.UserClipDistanceCullTestEnableBitmask =
1366          gs_prog_data->base.cull_distance_mask;
1367 #endif
1368
1369       gs.PerThreadScratchSpace   = get_scratch_space(gs_bin);
1370       gs.ScratchSpaceBasePointer =
1371          get_scratch_address(pipeline, MESA_SHADER_GEOMETRY, gs_bin);
1372    }
1373 }
1374
1375 static bool
1376 has_color_buffer_write_enabled(const struct anv_pipeline *pipeline,
1377                                const VkPipelineColorBlendStateCreateInfo *blend)
1378 {
1379    const struct anv_shader_bin *shader_bin =
1380       pipeline->shaders[MESA_SHADER_FRAGMENT];
1381    if (!shader_bin)
1382       return false;
1383
1384    const struct anv_pipeline_bind_map *bind_map = &shader_bin->bind_map;
1385    for (int i = 0; i < bind_map->surface_count; i++) {
1386       struct anv_pipeline_binding *binding = &bind_map->surface_to_descriptor[i];
1387
1388       if (binding->set != ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS)
1389          continue;
1390
1391       if (binding->index == UINT32_MAX)
1392          continue;
1393
1394       if (blend && blend->pAttachments[binding->index].colorWriteMask != 0)
1395          return true;
1396    }
1397
1398    return false;
1399 }
1400
1401 static void
1402 emit_3dstate_wm(struct anv_pipeline *pipeline, struct anv_subpass *subpass,
1403                 const VkPipelineColorBlendStateCreateInfo *blend,
1404                 const VkPipelineMultisampleStateCreateInfo *multisample)
1405 {
1406    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1407
1408    MAYBE_UNUSED uint32_t samples =
1409       multisample ? multisample->rasterizationSamples : 1;
1410
1411    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_WM), wm) {
1412       wm.StatisticsEnable                    = true;
1413       wm.LineEndCapAntialiasingRegionWidth   = _05pixels;
1414       wm.LineAntialiasingRegionWidth         = _10pixels;
1415       wm.PointRasterizationRule              = RASTRULE_UPPER_RIGHT;
1416
1417       if (anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1418          if (wm_prog_data->early_fragment_tests) {
1419             wm.EarlyDepthStencilControl         = EDSC_PREPS;
1420          } else if (wm_prog_data->has_side_effects) {
1421             wm.EarlyDepthStencilControl         = EDSC_PSEXEC;
1422          } else {
1423             wm.EarlyDepthStencilControl         = EDSC_NORMAL;
1424          }
1425
1426 #if GEN_GEN >= 8
1427          /* Gen8 hardware tries to compute ThreadDispatchEnable for us but
1428           * doesn't take into account KillPixels when no depth or stencil
1429           * writes are enabled.  In order for occlusion queries to work
1430           * correctly with no attachments, we need to force-enable PS thread
1431           * dispatch.
1432           *
1433           * The BDW docs are pretty clear that that this bit isn't validated
1434           * and probably shouldn't be used in production:
1435           *
1436           *    "This must always be set to Normal. This field should not be
1437           *    tested for functional validation."
1438           *
1439           * Unfortunately, however, the other mechanism we have for doing this
1440           * is 3DSTATE_PS_EXTRA::PixelShaderHasUAV which causes hangs on BDW.
1441           * Given two bad options, we choose the one which works.
1442           */
1443          if ((wm_prog_data->has_side_effects || wm_prog_data->uses_kill) &&
1444              !has_color_buffer_write_enabled(pipeline, blend))
1445             wm.ForceThreadDispatchEnable = ForceON;
1446 #endif
1447
1448          wm.BarycentricInterpolationMode =
1449             wm_prog_data->barycentric_interp_modes;
1450
1451 #if GEN_GEN < 8
1452          wm.PixelShaderComputedDepthMode  = wm_prog_data->computed_depth_mode;
1453          wm.PixelShaderUsesSourceDepth    = wm_prog_data->uses_src_depth;
1454          wm.PixelShaderUsesSourceW        = wm_prog_data->uses_src_w;
1455          wm.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
1456
1457          /* If the subpass has a depth or stencil self-dependency, then we
1458           * need to force the hardware to do the depth/stencil write *after*
1459           * fragment shader execution.  Otherwise, the writes may hit memory
1460           * before we get around to fetching from the input attachment and we
1461           * may get the depth or stencil value from the current draw rather
1462           * than the previous one.
1463           */
1464          wm.PixelShaderKillsPixel         = subpass->has_ds_self_dep ||
1465                                             wm_prog_data->uses_kill;
1466
1467          if (wm.PixelShaderComputedDepthMode != PSCDEPTH_OFF ||
1468              wm_prog_data->has_side_effects ||
1469              wm.PixelShaderKillsPixel ||
1470              has_color_buffer_write_enabled(pipeline, blend))
1471             wm.ThreadDispatchEnable = true;
1472
1473          if (samples > 1) {
1474             wm.MultisampleRasterizationMode = MSRASTMODE_ON_PATTERN;
1475             if (wm_prog_data->persample_dispatch) {
1476                wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
1477             } else {
1478                wm.MultisampleDispatchMode = MSDISPMODE_PERPIXEL;
1479             }
1480          } else {
1481             wm.MultisampleRasterizationMode = MSRASTMODE_OFF_PIXEL;
1482             wm.MultisampleDispatchMode = MSDISPMODE_PERSAMPLE;
1483          }
1484 #endif
1485       }
1486    }
1487 }
1488
1489 UNUSED static bool
1490 is_dual_src_blend_factor(VkBlendFactor factor)
1491 {
1492    return factor == VK_BLEND_FACTOR_SRC1_COLOR ||
1493           factor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_COLOR ||
1494           factor == VK_BLEND_FACTOR_SRC1_ALPHA ||
1495           factor == VK_BLEND_FACTOR_ONE_MINUS_SRC1_ALPHA;
1496 }
1497
1498 static void
1499 emit_3dstate_ps(struct anv_pipeline *pipeline,
1500                 const VkPipelineColorBlendStateCreateInfo *blend,
1501                 const VkPipelineMultisampleStateCreateInfo *multisample)
1502 {
1503    MAYBE_UNUSED const struct gen_device_info *devinfo = &pipeline->device->info;
1504    const struct anv_shader_bin *fs_bin =
1505       pipeline->shaders[MESA_SHADER_FRAGMENT];
1506
1507    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1508       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
1509 #if GEN_GEN == 7
1510          /* Even if no fragments are ever dispatched, gen7 hardware hangs if
1511           * we don't at least set the maximum number of threads.
1512           */
1513          ps.MaximumNumberofThreads = devinfo->max_wm_threads - 1;
1514 #endif
1515       }
1516       return;
1517    }
1518
1519    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1520
1521 #if GEN_GEN < 8
1522    /* The hardware wedges if you have this bit set but don't turn on any dual
1523     * source blend factors.
1524     */
1525    bool dual_src_blend = false;
1526    if (wm_prog_data->dual_src_blend && blend) {
1527       for (uint32_t i = 0; i < blend->attachmentCount; i++) {
1528          const VkPipelineColorBlendAttachmentState *bstate =
1529             &blend->pAttachments[i];
1530
1531          if (bstate->blendEnable &&
1532              (is_dual_src_blend_factor(bstate->srcColorBlendFactor) ||
1533               is_dual_src_blend_factor(bstate->dstColorBlendFactor) ||
1534               is_dual_src_blend_factor(bstate->srcAlphaBlendFactor) ||
1535               is_dual_src_blend_factor(bstate->dstAlphaBlendFactor))) {
1536             dual_src_blend = true;
1537             break;
1538          }
1539       }
1540    }
1541 #endif
1542
1543    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS), ps) {
1544       ps._8PixelDispatchEnable      = wm_prog_data->dispatch_8;
1545       ps._16PixelDispatchEnable     = wm_prog_data->dispatch_16;
1546       ps._32PixelDispatchEnable     = wm_prog_data->dispatch_32;
1547
1548       /* From the Sky Lake PRM 3DSTATE_PS::32 Pixel Dispatch Enable:
1549        *
1550        *    "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16, SIMD32
1551        *    Dispatch must not be enabled for PER_PIXEL dispatch mode."
1552        *
1553        * Since 16x MSAA is first introduced on SKL, we don't need to apply
1554        * the workaround on any older hardware.
1555        */
1556       if (GEN_GEN >= 9 && !wm_prog_data->persample_dispatch &&
1557           multisample && multisample->rasterizationSamples == 16) {
1558          assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
1559          ps._32PixelDispatchEnable = false;
1560       }
1561
1562       ps.KernelStartPointer0 = fs_bin->kernel.offset +
1563                                brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
1564       ps.KernelStartPointer1 = fs_bin->kernel.offset +
1565                                brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
1566       ps.KernelStartPointer2 = fs_bin->kernel.offset +
1567                                brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
1568
1569       ps.SingleProgramFlow          = false;
1570       ps.VectorMaskEnable           = true;
1571       ps.SamplerCount               = get_sampler_count(fs_bin);
1572       /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */
1573       ps.BindingTableEntryCount     = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(fs_bin);
1574       ps.PushConstantEnable         = wm_prog_data->base.nr_params > 0 ||
1575                                       wm_prog_data->base.ubo_ranges[0].length;
1576       ps.PositionXYOffsetSelect     = wm_prog_data->uses_pos_offset ?
1577                                       POSOFFSET_SAMPLE: POSOFFSET_NONE;
1578 #if GEN_GEN < 8
1579       ps.AttributeEnable            = wm_prog_data->num_varying_inputs > 0;
1580       ps.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
1581       ps.DualSourceBlendEnable      = dual_src_blend;
1582 #endif
1583
1584 #if GEN_IS_HASWELL
1585       /* Haswell requires the sample mask to be set in this packet as well
1586        * as in 3DSTATE_SAMPLE_MASK; the values should match.
1587        */
1588       ps.SampleMask                 = 0xff;
1589 #endif
1590
1591 #if GEN_GEN >= 9
1592       ps.MaximumNumberofThreadsPerPSD  = 64 - 1;
1593 #elif GEN_GEN >= 8
1594       ps.MaximumNumberofThreadsPerPSD  = 64 - 2;
1595 #else
1596       ps.MaximumNumberofThreads        = devinfo->max_wm_threads - 1;
1597 #endif
1598
1599       ps.DispatchGRFStartRegisterForConstantSetupData0 =
1600          brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
1601       ps.DispatchGRFStartRegisterForConstantSetupData1 =
1602          brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
1603       ps.DispatchGRFStartRegisterForConstantSetupData2 =
1604          brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
1605
1606       ps.PerThreadScratchSpace   = get_scratch_space(fs_bin);
1607       ps.ScratchSpaceBasePointer =
1608          get_scratch_address(pipeline, MESA_SHADER_FRAGMENT, fs_bin);
1609    }
1610 }
1611
1612 #if GEN_GEN >= 8
1613 static void
1614 emit_3dstate_ps_extra(struct anv_pipeline *pipeline,
1615                       struct anv_subpass *subpass,
1616                       const VkPipelineColorBlendStateCreateInfo *blend)
1617 {
1618    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1619
1620    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1621       anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps);
1622       return;
1623    }
1624
1625    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_PS_EXTRA), ps) {
1626       ps.PixelShaderValid              = true;
1627       ps.AttributeEnable               = wm_prog_data->num_varying_inputs > 0;
1628       ps.oMaskPresenttoRenderTarget    = wm_prog_data->uses_omask;
1629       ps.PixelShaderIsPerSample        = wm_prog_data->persample_dispatch;
1630       ps.PixelShaderComputedDepthMode  = wm_prog_data->computed_depth_mode;
1631       ps.PixelShaderUsesSourceDepth    = wm_prog_data->uses_src_depth;
1632       ps.PixelShaderUsesSourceW        = wm_prog_data->uses_src_w;
1633
1634       /* If the subpass has a depth or stencil self-dependency, then we need
1635        * to force the hardware to do the depth/stencil write *after* fragment
1636        * shader execution.  Otherwise, the writes may hit memory before we get
1637        * around to fetching from the input attachment and we may get the depth
1638        * or stencil value from the current draw rather than the previous one.
1639        */
1640       ps.PixelShaderKillsPixel         = subpass->has_ds_self_dep ||
1641                                          wm_prog_data->uses_kill;
1642
1643 #if GEN_GEN >= 9
1644       ps.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
1645       ps.PixelShaderPullsBary    = wm_prog_data->pulls_bary;
1646
1647       ps.InputCoverageMaskState  = ICMS_NONE;
1648       if (wm_prog_data->uses_sample_mask) {
1649          if (wm_prog_data->post_depth_coverage)
1650             ps.InputCoverageMaskState  = ICMS_DEPTH_COVERAGE;
1651          else
1652             ps.InputCoverageMaskState  = ICMS_INNER_CONSERVATIVE;
1653       }
1654 #else
1655       ps.PixelShaderUsesInputCoverageMask = wm_prog_data->uses_sample_mask;
1656 #endif
1657    }
1658 }
1659
1660 static void
1661 emit_3dstate_vf_topology(struct anv_pipeline *pipeline)
1662 {
1663    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_TOPOLOGY), vft) {
1664       vft.PrimitiveTopologyType = pipeline->topology;
1665    }
1666 }
1667 #endif
1668
1669 static void
1670 emit_3dstate_vf_statistics(struct anv_pipeline *pipeline)
1671 {
1672    anv_batch_emit(&pipeline->batch, GENX(3DSTATE_VF_STATISTICS), vfs) {
1673       vfs.StatisticsEnable = true;
1674    }
1675 }
1676
1677 static void
1678 compute_kill_pixel(struct anv_pipeline *pipeline,
1679                    const VkPipelineMultisampleStateCreateInfo *ms_info,
1680                    const struct anv_subpass *subpass)
1681 {
1682    if (!anv_pipeline_has_stage(pipeline, MESA_SHADER_FRAGMENT)) {
1683       pipeline->kill_pixel = false;
1684       return;
1685    }
1686
1687    const struct brw_wm_prog_data *wm_prog_data = get_wm_prog_data(pipeline);
1688
1689    /* This computes the KillPixel portion of the computation for whether or
1690     * not we want to enable the PMA fix on gen8 or gen9.  It's given by this
1691     * chunk of the giant formula:
1692     *
1693     *    (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1694     *     3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1695     *     3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1696     *     3DSTATE_PS_BLEND::AlphaTestEnable ||
1697     *     3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1698     *
1699     * 3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable is always false and so is
1700     * 3DSTATE_PS_BLEND::AlphaTestEnable since Vulkan doesn't have a concept
1701     * of an alpha test.
1702     */
1703    pipeline->kill_pixel =
1704       subpass->has_ds_self_dep || wm_prog_data->uses_kill ||
1705       wm_prog_data->uses_omask ||
1706       (ms_info && ms_info->alphaToCoverageEnable);
1707 }
1708
1709 static VkResult
1710 genX(graphics_pipeline_create)(
1711     VkDevice                                    _device,
1712     struct anv_pipeline_cache *                 cache,
1713     const VkGraphicsPipelineCreateInfo*         pCreateInfo,
1714     const VkAllocationCallbacks*                pAllocator,
1715     VkPipeline*                                 pPipeline)
1716 {
1717    ANV_FROM_HANDLE(anv_device, device, _device);
1718    ANV_FROM_HANDLE(anv_render_pass, pass, pCreateInfo->renderPass);
1719    struct anv_subpass *subpass = &pass->subpasses[pCreateInfo->subpass];
1720    struct anv_pipeline *pipeline;
1721    VkResult result;
1722
1723    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO);
1724
1725    /* Use the default pipeline cache if none is specified */
1726    if (cache == NULL && device->instance->pipeline_cache_enabled)
1727       cache = &device->default_pipeline_cache;
1728
1729    pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
1730                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1731    if (pipeline == NULL)
1732       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1733
1734    result = anv_pipeline_init(pipeline, device, cache,
1735                               pCreateInfo, pAllocator);
1736    if (result != VK_SUCCESS) {
1737       vk_free2(&device->alloc, pAllocator, pipeline);
1738       return result;
1739    }
1740
1741    assert(pCreateInfo->pVertexInputState);
1742    emit_vertex_input(pipeline, pCreateInfo->pVertexInputState);
1743    assert(pCreateInfo->pRasterizationState);
1744    emit_rs_state(pipeline, pCreateInfo->pRasterizationState,
1745                  pCreateInfo->pMultisampleState, pass, subpass);
1746    emit_ms_state(pipeline, pCreateInfo->pMultisampleState);
1747    emit_ds_state(pipeline, pCreateInfo->pDepthStencilState, pass, subpass);
1748    emit_cb_state(pipeline, pCreateInfo->pColorBlendState,
1749                            pCreateInfo->pMultisampleState);
1750    compute_kill_pixel(pipeline, pCreateInfo->pMultisampleState, subpass);
1751
1752    emit_urb_setup(pipeline);
1753
1754    emit_3dstate_clip(pipeline, pCreateInfo->pViewportState,
1755                      pCreateInfo->pRasterizationState);
1756    emit_3dstate_streamout(pipeline, pCreateInfo->pRasterizationState);
1757
1758 #if 0
1759    /* From gen7_vs_state.c */
1760
1761    /**
1762     * From Graphics BSpec: 3D-Media-GPGPU Engine > 3D Pipeline Stages >
1763     * Geometry > Geometry Shader > State:
1764     *
1765     *     "Note: Because of corruption in IVB:GT2, software needs to flush the
1766     *     whole fixed function pipeline when the GS enable changes value in
1767     *     the 3DSTATE_GS."
1768     *
1769     * The hardware architects have clarified that in this context "flush the
1770     * whole fixed function pipeline" means to emit a PIPE_CONTROL with the "CS
1771     * Stall" bit set.
1772     */
1773    if (!device->info.is_haswell && !device->info.is_baytrail)
1774       gen7_emit_vs_workaround_flush(brw);
1775 #endif
1776
1777    emit_3dstate_vs(pipeline);
1778    emit_3dstate_hs_te_ds(pipeline, pCreateInfo->pTessellationState);
1779    emit_3dstate_gs(pipeline);
1780    emit_3dstate_sbe(pipeline);
1781    emit_3dstate_wm(pipeline, subpass, pCreateInfo->pColorBlendState,
1782                    pCreateInfo->pMultisampleState);
1783    emit_3dstate_ps(pipeline, pCreateInfo->pColorBlendState,
1784                    pCreateInfo->pMultisampleState);
1785 #if GEN_GEN >= 8
1786    emit_3dstate_ps_extra(pipeline, subpass, pCreateInfo->pColorBlendState);
1787    emit_3dstate_vf_topology(pipeline);
1788 #endif
1789    emit_3dstate_vf_statistics(pipeline);
1790
1791    *pPipeline = anv_pipeline_to_handle(pipeline);
1792
1793    return pipeline->batch.status;
1794 }
1795
1796 static VkResult
1797 compute_pipeline_create(
1798     VkDevice                                    _device,
1799     struct anv_pipeline_cache *                 cache,
1800     const VkComputePipelineCreateInfo*          pCreateInfo,
1801     const VkAllocationCallbacks*                pAllocator,
1802     VkPipeline*                                 pPipeline)
1803 {
1804    ANV_FROM_HANDLE(anv_device, device, _device);
1805    const struct anv_physical_device *physical_device =
1806       &device->instance->physicalDevice;
1807    const struct gen_device_info *devinfo = &physical_device->info;
1808    struct anv_pipeline *pipeline;
1809    VkResult result;
1810
1811    assert(pCreateInfo->sType == VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO);
1812
1813    /* Use the default pipeline cache if none is specified */
1814    if (cache == NULL && device->instance->pipeline_cache_enabled)
1815       cache = &device->default_pipeline_cache;
1816
1817    pipeline = vk_alloc2(&device->alloc, pAllocator, sizeof(*pipeline), 8,
1818                          VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1819    if (pipeline == NULL)
1820       return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
1821
1822    pipeline->device = device;
1823
1824    pipeline->blend_state.map = NULL;
1825
1826    result = anv_reloc_list_init(&pipeline->batch_relocs,
1827                                 pAllocator ? pAllocator : &device->alloc);
1828    if (result != VK_SUCCESS) {
1829       vk_free2(&device->alloc, pAllocator, pipeline);
1830       return result;
1831    }
1832    pipeline->batch.next = pipeline->batch.start = pipeline->batch_data;
1833    pipeline->batch.end = pipeline->batch.start + sizeof(pipeline->batch_data);
1834    pipeline->batch.relocs = &pipeline->batch_relocs;
1835    pipeline->batch.status = VK_SUCCESS;
1836
1837    /* When we free the pipeline, we detect stages based on the NULL status
1838     * of various prog_data pointers.  Make them NULL by default.
1839     */
1840    memset(pipeline->shaders, 0, sizeof(pipeline->shaders));
1841
1842    pipeline->needs_data_cache = false;
1843
1844    assert(pCreateInfo->stage.stage == VK_SHADER_STAGE_COMPUTE_BIT);
1845    pipeline->active_stages |= VK_SHADER_STAGE_COMPUTE_BIT;
1846    ANV_FROM_HANDLE(anv_shader_module, module,  pCreateInfo->stage.module);
1847    result = anv_pipeline_compile_cs(pipeline, cache, pCreateInfo, module,
1848                                     pCreateInfo->stage.pName,
1849                                     pCreateInfo->stage.pSpecializationInfo);
1850    if (result != VK_SUCCESS) {
1851       vk_free2(&device->alloc, pAllocator, pipeline);
1852       return result;
1853    }
1854
1855    const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
1856
1857    anv_pipeline_setup_l3_config(pipeline, cs_prog_data->base.total_shared > 0);
1858
1859    uint32_t group_size = cs_prog_data->local_size[0] *
1860       cs_prog_data->local_size[1] * cs_prog_data->local_size[2];
1861    uint32_t remainder = group_size & (cs_prog_data->simd_size - 1);
1862
1863    if (remainder > 0)
1864       pipeline->cs_right_mask = ~0u >> (32 - remainder);
1865    else
1866       pipeline->cs_right_mask = ~0u >> (32 - cs_prog_data->simd_size);
1867
1868    const uint32_t vfe_curbe_allocation =
1869       ALIGN(cs_prog_data->push.per_thread.regs * cs_prog_data->threads +
1870             cs_prog_data->push.cross_thread.regs, 2);
1871
1872    const uint32_t subslices = MAX2(physical_device->subslice_total, 1);
1873
1874    const struct anv_shader_bin *cs_bin =
1875       pipeline->shaders[MESA_SHADER_COMPUTE];
1876
1877    anv_batch_emit(&pipeline->batch, GENX(MEDIA_VFE_STATE), vfe) {
1878 #if GEN_GEN > 7
1879       vfe.StackSize              = 0;
1880 #else
1881       vfe.GPGPUMode              = true;
1882 #endif
1883       vfe.MaximumNumberofThreads =
1884          devinfo->max_cs_threads * subslices - 1;
1885       vfe.NumberofURBEntries     = GEN_GEN <= 7 ? 0 : 2;
1886 #if GEN_GEN < 11
1887       vfe.ResetGatewayTimer      = true;
1888 #endif
1889 #if GEN_GEN <= 8
1890       vfe.BypassGatewayControl   = true;
1891 #endif
1892       vfe.URBEntryAllocationSize = GEN_GEN <= 7 ? 0 : 2;
1893       vfe.CURBEAllocationSize    = vfe_curbe_allocation;
1894
1895       vfe.PerThreadScratchSpace = get_scratch_space(cs_bin);
1896       vfe.ScratchSpaceBasePointer =
1897          get_scratch_address(pipeline, MESA_SHADER_COMPUTE, cs_bin);
1898    }
1899
1900    struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = {
1901       .KernelStartPointer     = cs_bin->kernel.offset,
1902
1903       .SamplerCount           = get_sampler_count(cs_bin),
1904       /* Gen 11 workarounds table #2056 WABTPPrefetchDisable */
1905       .BindingTableEntryCount = GEN_GEN == 11 ? 0 : get_binding_table_entry_count(cs_bin),
1906       .BarrierEnable          = cs_prog_data->uses_barrier,
1907       .SharedLocalMemorySize  =
1908          encode_slm_size(GEN_GEN, cs_prog_data->base.total_shared),
1909
1910 #if !GEN_IS_HASWELL
1911       .ConstantURBEntryReadOffset = 0,
1912 #endif
1913       .ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs,
1914 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1915       .CrossThreadConstantDataReadLength =
1916          cs_prog_data->push.cross_thread.regs,
1917 #endif
1918
1919       .NumberofThreadsinGPGPUThreadGroup = cs_prog_data->threads,
1920    };
1921    GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL,
1922                                         pipeline->interface_descriptor_data,
1923                                         &desc);
1924
1925    *pPipeline = anv_pipeline_to_handle(pipeline);
1926
1927    return pipeline->batch.status;
1928 }
1929
1930 VkResult genX(CreateGraphicsPipelines)(
1931     VkDevice                                    _device,
1932     VkPipelineCache                             pipelineCache,
1933     uint32_t                                    count,
1934     const VkGraphicsPipelineCreateInfo*         pCreateInfos,
1935     const VkAllocationCallbacks*                pAllocator,
1936     VkPipeline*                                 pPipelines)
1937 {
1938    ANV_FROM_HANDLE(anv_pipeline_cache, pipeline_cache, pipelineCache);
1939
1940    VkResult result = VK_SUCCESS;
1941
1942    unsigned i;
1943    for (i = 0; i < count; i++) {
1944       result = genX(graphics_pipeline_create)(_device,
1945                                               pipeline_cache,
1946                                               &pCreateInfos[i],
1947                                               pAllocator, &pPipelines[i]);
1948
1949       /* Bail out on the first error as it is not obvious what error should be
1950        * report upon 2 different failures. */
1951       if (result != VK_SUCCESS)
1952          break;
1953    }
1954
1955    for (; i < count; i++)
1956       pPipelines[i] = VK_NULL_HANDLE;
1957
1958    return result;
1959 }
1960
1961 VkResult genX(CreateComputePipelines)(
1962     VkDevice                                    _device,
1963     VkPipelineCache                             pipelineCache,
1964     uint32_t                                    count,
1965     const VkComputePipelineCreateInfo*          pCreateInfos,
1966     const VkAllocationCallbacks*                pAllocator,
1967     VkPipeline*                                 pPipelines)
1968 {
1969    ANV_FROM_HANDLE(anv_pipeline_cache, pipeline_cache, pipelineCache);
1970
1971    VkResult result = VK_SUCCESS;
1972
1973    unsigned i;
1974    for (i = 0; i < count; i++) {
1975       result = compute_pipeline_create(_device, pipeline_cache,
1976                                        &pCreateInfos[i],
1977                                        pAllocator, &pPipelines[i]);
1978
1979       /* Bail out on the first error as it is not obvious what error should be
1980        * report upon 2 different failures. */
1981       if (result != VK_SUCCESS)
1982          break;
1983    }
1984
1985    for (; i < count; i++)
1986       pPipelines[i] = VK_NULL_HANDLE;
1987
1988    return result;
1989 }