OSDN Git Service

anv/gen7: Remove unheeded helper begin_render_pass()
[android-x86/external-mesa.git] / src / vulkan / gen7_cmd_buffer.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "gen7_pack.h"
33 #include "gen75_pack.h"
34
35 static uint32_t
36 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer)
37 {
38    static const uint32_t push_constant_opcodes[] = {
39       [MESA_SHADER_VERTEX]                      = 21,
40       [MESA_SHADER_TESS_CTRL]                   = 25, /* HS */
41       [MESA_SHADER_TESS_EVAL]                   = 26, /* DS */
42       [MESA_SHADER_GEOMETRY]                    = 22,
43       [MESA_SHADER_FRAGMENT]                    = 23,
44       [MESA_SHADER_COMPUTE]                     = 0,
45    };
46
47    VkShaderStageFlags flushed = 0;
48
49    anv_foreach_stage(stage, cmd_buffer->state.push_constants_dirty) {
50       struct anv_state state = anv_cmd_buffer_push_constants(cmd_buffer, stage);
51
52       if (state.offset == 0)
53          continue;
54
55       anv_batch_emit(&cmd_buffer->batch, GEN7_3DSTATE_CONSTANT_VS,
56                      ._3DCommandSubOpcode = push_constant_opcodes[stage],
57                      .ConstantBody = {
58                         .PointerToConstantBuffer0 = { .offset = state.offset },
59                         .ConstantBuffer0ReadLength = DIV_ROUND_UP(state.alloc_size, 32),
60                      });
61
62       flushed |= mesa_to_vk_shader_stage(stage);
63    }
64
65    cmd_buffer->state.push_constants_dirty &= ~flushed;
66
67    return flushed;
68 }
69
70 GENX_FUNC(GEN7, GEN7) void
71 genX(cmd_buffer_emit_descriptor_pointers)(struct anv_cmd_buffer *cmd_buffer,
72                                           uint32_t stages)
73 {
74    static const uint32_t sampler_state_opcodes[] = {
75       [MESA_SHADER_VERTEX]                      = 43,
76       [MESA_SHADER_TESS_CTRL]                   = 44, /* HS */
77       [MESA_SHADER_TESS_EVAL]                   = 45, /* DS */
78       [MESA_SHADER_GEOMETRY]                    = 46,
79       [MESA_SHADER_FRAGMENT]                    = 47,
80       [MESA_SHADER_COMPUTE]                     = 0,
81    };
82
83    static const uint32_t binding_table_opcodes[] = {
84       [MESA_SHADER_VERTEX]                      = 38,
85       [MESA_SHADER_TESS_CTRL]                   = 39,
86       [MESA_SHADER_TESS_EVAL]                   = 40,
87       [MESA_SHADER_GEOMETRY]                    = 41,
88       [MESA_SHADER_FRAGMENT]                    = 42,
89       [MESA_SHADER_COMPUTE]                     = 0,
90    };
91
92    anv_foreach_stage(s, stages) {
93       if (cmd_buffer->state.samplers[s].alloc_size > 0) {
94          anv_batch_emit(&cmd_buffer->batch,
95                         GEN7_3DSTATE_SAMPLER_STATE_POINTERS_VS,
96                         ._3DCommandSubOpcode  = sampler_state_opcodes[s],
97                         .PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset);
98       }
99
100       /* Always emit binding table pointers if we're asked to, since on SKL
101        * this is what flushes push constants. */
102       anv_batch_emit(&cmd_buffer->batch,
103                      GEN7_3DSTATE_BINDING_TABLE_POINTERS_VS,
104                      ._3DCommandSubOpcode  = binding_table_opcodes[s],
105                      .PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset);
106    }
107 }
108
109 GENX_FUNC(GEN7, GEN7) uint32_t
110 genX(cmd_buffer_flush_descriptor_sets)(struct anv_cmd_buffer *cmd_buffer)
111 {
112    VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty &
113                               cmd_buffer->state.pipeline->active_stages;
114
115    VkResult result = VK_SUCCESS;
116    anv_foreach_stage(s, dirty) {
117       result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
118                                             &cmd_buffer->state.samplers[s]);
119       if (result != VK_SUCCESS)
120          break;
121       result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
122                                                  &cmd_buffer->state.binding_tables[s]);
123       if (result != VK_SUCCESS)
124          break;
125    }
126
127    if (result != VK_SUCCESS) {
128       assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
129
130       result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
131       assert(result == VK_SUCCESS);
132
133       /* Re-emit state base addresses so we get the new surface state base
134        * address before we start emitting binding tables etc.
135        */
136       anv_cmd_buffer_emit_state_base_address(cmd_buffer);
137
138       /* Re-emit all active binding tables */
139       dirty |= cmd_buffer->state.pipeline->active_stages;
140       anv_foreach_stage(s, dirty) {
141          result = anv_cmd_buffer_emit_samplers(cmd_buffer, s,
142                                                &cmd_buffer->state.samplers[s]);
143          if (result != VK_SUCCESS)
144             return result;
145          result = anv_cmd_buffer_emit_binding_table(cmd_buffer, s,
146                                                     &cmd_buffer->state.binding_tables[s]);
147          if (result != VK_SUCCESS)
148             return result;
149       }
150    }
151
152    cmd_buffer->state.descriptors_dirty &= ~dirty;
153
154    return dirty;
155 }
156
157 static inline int64_t
158 clamp_int64(int64_t x, int64_t min, int64_t max)
159 {
160    if (x < min)
161       return min;
162    else if (x < max)
163       return x;
164    else
165       return max;
166 }
167
168 static void
169 emit_scissor_state(struct anv_cmd_buffer *cmd_buffer,
170                    uint32_t count, const VkRect2D *scissors)
171 {
172    struct anv_state scissor_state =
173       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, count * 32, 32);
174
175    for (uint32_t i = 0; i < count; i++) {
176       const VkRect2D *s = &scissors[i];
177
178       /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
179        * ymax < ymin for empty clips.  In case clip x, y, width height are all
180        * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
181        * what we want. Just special case empty clips and produce a canonical
182        * empty clip. */
183       static const struct GEN7_SCISSOR_RECT empty_scissor = {
184          .ScissorRectangleYMin = 1,
185          .ScissorRectangleXMin = 1,
186          .ScissorRectangleYMax = 0,
187          .ScissorRectangleXMax = 0
188       };
189
190       const int max = 0xffff;
191       struct GEN7_SCISSOR_RECT scissor = {
192          /* Do this math using int64_t so overflow gets clamped correctly. */
193          .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
194          .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
195          .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
196          .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
197       };
198
199       if (s->extent.width <= 0 || s->extent.height <= 0) {
200          GEN7_SCISSOR_RECT_pack(NULL, scissor_state.map + i * 32,
201                                 &empty_scissor);
202       } else {
203          GEN7_SCISSOR_RECT_pack(NULL, scissor_state.map + i * 32, &scissor);
204       }
205    }
206
207    anv_batch_emit(&cmd_buffer->batch, GEN7_3DSTATE_SCISSOR_STATE_POINTERS,
208                   .ScissorRectPointer = scissor_state.offset);
209
210    if (!cmd_buffer->device->info.has_llc)
211       anv_state_clflush(scissor_state);
212 }
213
214 GENX_FUNC(GEN7, GEN7) void
215 genX(cmd_buffer_emit_scissor)(struct anv_cmd_buffer *cmd_buffer)
216 {
217    if (cmd_buffer->state.dynamic.scissor.count > 0) {
218       emit_scissor_state(cmd_buffer, cmd_buffer->state.dynamic.scissor.count,
219                          cmd_buffer->state.dynamic.scissor.scissors);
220    } else {
221       /* Emit a default scissor based on the currently bound framebuffer */
222       emit_scissor_state(cmd_buffer, 1,
223                          &(VkRect2D) {
224                             .offset = { .x = 0, .y = 0, },
225                             .extent = {
226                                .width = cmd_buffer->state.framebuffer->width,
227                                .height = cmd_buffer->state.framebuffer->height,
228                             },
229                          });
230    }
231 }
232
233 static const uint32_t vk_to_gen_index_type[] = {
234    [VK_INDEX_TYPE_UINT16]                       = INDEX_WORD,
235    [VK_INDEX_TYPE_UINT32]                       = INDEX_DWORD,
236 };
237
238 static const uint32_t restart_index_for_type[] = {
239    [VK_INDEX_TYPE_UINT16]                    = UINT16_MAX,
240    [VK_INDEX_TYPE_UINT32]                    = UINT32_MAX,
241 };
242
243 void genX(CmdBindIndexBuffer)(
244     VkCommandBuffer                             commandBuffer,
245     VkBuffer                                    _buffer,
246     VkDeviceSize                                offset,
247     VkIndexType                                 indexType)
248 {
249    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
250    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
251
252    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
253    if (ANV_IS_HASWELL)
254       cmd_buffer->state.restart_index = restart_index_for_type[indexType];
255    cmd_buffer->state.gen7.index_buffer = buffer;
256    cmd_buffer->state.gen7.index_type = vk_to_gen_index_type[indexType];
257    cmd_buffer->state.gen7.index_offset = offset;
258 }
259
260 static VkResult
261 flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
262 {
263    struct anv_device *device = cmd_buffer->device;
264    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
265    struct anv_state surfaces = { 0, }, samplers = { 0, };
266    VkResult result;
267
268    result = anv_cmd_buffer_emit_samplers(cmd_buffer,
269                                          MESA_SHADER_COMPUTE, &samplers);
270    if (result != VK_SUCCESS)
271       return result;
272    result = anv_cmd_buffer_emit_binding_table(cmd_buffer,
273                                               MESA_SHADER_COMPUTE, &surfaces);
274    if (result != VK_SUCCESS)
275       return result;
276
277    const struct brw_cs_prog_data *cs_prog_data = &pipeline->cs_prog_data;
278
279    struct anv_state state =
280       anv_state_pool_emit(&device->dynamic_state_pool,
281                           GEN7_INTERFACE_DESCRIPTOR_DATA, 64,
282                           .KernelStartPointer = pipeline->cs_simd,
283                           .BindingTablePointer = surfaces.offset,
284                           .SamplerStatePointer = samplers.offset,
285                           .BarrierEnable = cs_prog_data->uses_barrier,
286                           .NumberofThreadsinGPGPUThreadGroup =
287                              pipeline->cs_thread_width_max);
288
289    const uint32_t size = GEN7_INTERFACE_DESCRIPTOR_DATA_length * sizeof(uint32_t);
290    anv_batch_emit(&cmd_buffer->batch, GEN7_MEDIA_INTERFACE_DESCRIPTOR_LOAD,
291                   .InterfaceDescriptorTotalLength = size,
292                   .InterfaceDescriptorDataStartAddress = state.offset);
293
294    return VK_SUCCESS;
295 }
296
297 static void
298 cmd_buffer_flush_compute_state(struct anv_cmd_buffer *cmd_buffer)
299 {
300    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
301    VkResult result;
302
303    assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
304
305    if (cmd_buffer->state.current_pipeline != GPGPU) {
306       anv_batch_emit(&cmd_buffer->batch, GEN7_PIPELINE_SELECT,
307                      .PipelineSelection = GPGPU);
308       cmd_buffer->state.current_pipeline = GPGPU;
309    }
310
311    if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)
312       anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
313
314    if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
315        (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
316       /* FIXME: figure out descriptors for gen7 */
317       result = flush_compute_descriptor_set(cmd_buffer);
318       assert(result == VK_SUCCESS);
319       cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
320    }
321
322    cmd_buffer->state.compute_dirty = 0;
323 }
324
325 static void
326 cmd_buffer_flush_state(struct anv_cmd_buffer *cmd_buffer)
327 {
328    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
329    uint32_t *p;
330
331    uint32_t vb_emit = cmd_buffer->state.vb_dirty & pipeline->vb_used;
332
333    assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
334
335    if (cmd_buffer->state.current_pipeline != _3D) {
336       anv_batch_emit(&cmd_buffer->batch, GEN7_PIPELINE_SELECT,
337                      .PipelineSelection = _3D);
338       cmd_buffer->state.current_pipeline = _3D;
339    }
340
341    if (vb_emit) {
342       const uint32_t num_buffers = __builtin_popcount(vb_emit);
343       const uint32_t num_dwords = 1 + num_buffers * 4;
344
345       p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
346                           GEN7_3DSTATE_VERTEX_BUFFERS);
347       uint32_t vb, i = 0;
348       for_each_bit(vb, vb_emit) {
349          struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
350          uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
351
352          struct GEN7_VERTEX_BUFFER_STATE state = {
353             .VertexBufferIndex = vb,
354             .BufferAccessType = pipeline->instancing_enable[vb] ? INSTANCEDATA : VERTEXDATA,
355             .VertexBufferMemoryObjectControlState = GEN7_MOCS,
356             .AddressModifyEnable = true,
357             .BufferPitch = pipeline->binding_stride[vb],
358             .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
359             .EndAddress = { buffer->bo, buffer->offset + buffer->size - 1},
360             .InstanceDataStepRate = 1
361          };
362
363          GEN7_VERTEX_BUFFER_STATE_pack(&cmd_buffer->batch, &p[1 + i * 4], &state);
364          i++;
365       }
366    }
367
368    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_PIPELINE) {
369       /* If somebody compiled a pipeline after starting a command buffer the
370        * scratch bo may have grown since we started this cmd buffer (and
371        * emitted STATE_BASE_ADDRESS).  If we're binding that pipeline now,
372        * reemit STATE_BASE_ADDRESS so that we use the bigger scratch bo. */
373       if (cmd_buffer->state.scratch_size < pipeline->total_scratch)
374          gen7_cmd_buffer_emit_state_base_address(cmd_buffer);
375
376       anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
377    }
378
379    if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
380        cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
381       /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
382        *
383        *    "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
384        *    stall needs to be sent just prior to any 3DSTATE_VS,
385        *    3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
386        *    3DSTATE_BINDING_TABLE_POINTER_VS,
387        *    3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one
388        *    PIPE_CONTROL needs to be sent before any combination of VS
389        *    associated 3DSTATE."
390        */
391       anv_batch_emit(&cmd_buffer->batch, GEN7_PIPE_CONTROL,
392                      .DepthStallEnable = true,
393                      .PostSyncOperation = WriteImmediateData,
394                      .Address = { &cmd_buffer->device->workaround_bo, 0 });
395    }
396
397    uint32_t dirty = 0;
398    if (cmd_buffer->state.descriptors_dirty) {
399       dirty = gen7_cmd_buffer_flush_descriptor_sets(cmd_buffer);
400       gen7_cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
401    }
402
403    if (cmd_buffer->state.push_constants_dirty)
404       cmd_buffer_flush_push_constants(cmd_buffer);
405
406    /* We use the gen8 state here because it only contains the additional
407     * min/max fields and, since they occur at the end of the packet and
408     * don't change the stride, they work on gen7 too.
409     */
410    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
411       gen8_cmd_buffer_emit_viewport(cmd_buffer);
412
413    if (cmd_buffer->state.dirty & ANV_CMD_DIRTY_DYNAMIC_SCISSOR)
414       gen7_cmd_buffer_emit_scissor(cmd_buffer);
415
416    if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
417                                   ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH |
418                                   ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) {
419
420       bool enable_bias = cmd_buffer->state.dynamic.depth_bias.bias != 0.0f ||
421          cmd_buffer->state.dynamic.depth_bias.slope != 0.0f;
422
423       uint32_t sf_dw[GEN7_3DSTATE_SF_length];
424       struct GEN7_3DSTATE_SF sf = {
425          GEN7_3DSTATE_SF_header,
426          .LineWidth = cmd_buffer->state.dynamic.line_width,
427          .GlobalDepthOffsetEnableSolid = enable_bias,
428          .GlobalDepthOffsetEnableWireframe = enable_bias,
429          .GlobalDepthOffsetEnablePoint = enable_bias,
430          .GlobalDepthOffsetConstant = cmd_buffer->state.dynamic.depth_bias.bias,
431          .GlobalDepthOffsetScale = cmd_buffer->state.dynamic.depth_bias.slope,
432          .GlobalDepthOffsetClamp = cmd_buffer->state.dynamic.depth_bias.clamp
433       };
434       GEN7_3DSTATE_SF_pack(NULL, sf_dw, &sf);
435
436       anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gen7.sf);
437    }
438
439    if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
440                                   ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
441       struct anv_state cc_state =
442          anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
443                                             GEN7_COLOR_CALC_STATE_length * 4,
444                                             64);
445       struct GEN7_COLOR_CALC_STATE cc = {
446          .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0],
447          .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1],
448          .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2],
449          .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3],
450          .StencilReferenceValue =
451             cmd_buffer->state.dynamic.stencil_reference.front,
452          .BackFaceStencilReferenceValue =
453             cmd_buffer->state.dynamic.stencil_reference.back,
454       };
455       GEN7_COLOR_CALC_STATE_pack(NULL, cc_state.map, &cc);
456       if (!cmd_buffer->device->info.has_llc)
457          anv_state_clflush(cc_state);
458
459       anv_batch_emit(&cmd_buffer->batch,
460                      GEN7_3DSTATE_CC_STATE_POINTERS,
461                      .ColorCalcStatePointer = cc_state.offset);
462    }
463
464    if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
465                                   ANV_CMD_DIRTY_RENDER_TARGETS |
466                                   ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
467                                   ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) {
468       uint32_t depth_stencil_dw[GEN7_DEPTH_STENCIL_STATE_length];
469
470       const struct anv_image_view *iview =
471          anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
472
473       struct GEN7_DEPTH_STENCIL_STATE depth_stencil = {
474          .StencilBufferWriteEnable = iview && (iview->aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT),
475
476          .StencilTestMask =
477             cmd_buffer->state.dynamic.stencil_compare_mask.front & 0xff,
478          .StencilWriteMask =
479             cmd_buffer->state.dynamic.stencil_write_mask.front & 0xff,
480
481          .BackfaceStencilTestMask =
482             cmd_buffer->state.dynamic.stencil_compare_mask.back & 0xff,
483          .BackfaceStencilWriteMask =
484             cmd_buffer->state.dynamic.stencil_write_mask.back & 0xff,
485       };
486       GEN7_DEPTH_STENCIL_STATE_pack(NULL, depth_stencil_dw, &depth_stencil);
487
488       struct anv_state ds_state =
489          anv_cmd_buffer_merge_dynamic(cmd_buffer, depth_stencil_dw,
490                                       pipeline->gen7.depth_stencil_state,
491                                       GEN7_DEPTH_STENCIL_STATE_length, 64);
492
493       anv_batch_emit(&cmd_buffer->batch,
494                      GEN7_3DSTATE_DEPTH_STENCIL_STATE_POINTERS,
495                      .PointertoDEPTH_STENCIL_STATE = ds_state.offset);
496    }
497
498    if (cmd_buffer->state.gen7.index_buffer &&
499        cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
500                                   ANV_CMD_DIRTY_INDEX_BUFFER)) {
501       struct anv_buffer *buffer = cmd_buffer->state.gen7.index_buffer;
502       uint32_t offset = cmd_buffer->state.gen7.index_offset;
503
504       if (ANV_IS_HASWELL) {
505          anv_batch_emit(&cmd_buffer->batch, GEN75_3DSTATE_VF,
506                         .IndexedDrawCutIndexEnable = pipeline->primitive_restart,
507                         .CutIndex = cmd_buffer->state.restart_index);
508       }
509
510       anv_batch_emit(&cmd_buffer->batch, GEN7_3DSTATE_INDEX_BUFFER,
511                      .CutIndexEnable = pipeline->primitive_restart,
512                      .IndexFormat = cmd_buffer->state.gen7.index_type,
513                      .MemoryObjectControlState = GEN7_MOCS,
514                      .BufferStartingAddress = { buffer->bo, buffer->offset + offset },
515                      .BufferEndingAddress = { buffer->bo, buffer->offset + buffer->size });
516    }
517
518    cmd_buffer->state.vb_dirty &= ~vb_emit;
519    cmd_buffer->state.dirty = 0;
520 }
521
522 void genX(CmdDraw)(
523     VkCommandBuffer                             commandBuffer,
524     uint32_t                                    vertexCount,
525     uint32_t                                    instanceCount,
526     uint32_t                                    firstVertex,
527     uint32_t                                    firstInstance)
528 {
529    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
530    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
531
532    cmd_buffer_flush_state(cmd_buffer);
533
534    anv_batch_emit(&cmd_buffer->batch, GEN7_3DPRIMITIVE,
535                   .VertexAccessType = SEQUENTIAL,
536                   .PrimitiveTopologyType = pipeline->topology,
537                   .VertexCountPerInstance = vertexCount,
538                   .StartVertexLocation = firstVertex,
539                   .InstanceCount = instanceCount,
540                   .StartInstanceLocation = firstInstance,
541                   .BaseVertexLocation = 0);
542 }
543
544 void genX(CmdDrawIndexed)(
545     VkCommandBuffer                             commandBuffer,
546     uint32_t                                    indexCount,
547     uint32_t                                    instanceCount,
548     uint32_t                                    firstIndex,
549     int32_t                                     vertexOffset,
550     uint32_t                                    firstInstance)
551 {
552    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
553    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
554
555    cmd_buffer_flush_state(cmd_buffer);
556
557    anv_batch_emit(&cmd_buffer->batch, GEN7_3DPRIMITIVE,
558                   .VertexAccessType = RANDOM,
559                   .PrimitiveTopologyType = pipeline->topology,
560                   .VertexCountPerInstance = indexCount,
561                   .StartVertexLocation = firstIndex,
562                   .InstanceCount = instanceCount,
563                   .StartInstanceLocation = firstInstance,
564                   .BaseVertexLocation = vertexOffset);
565 }
566
567 static void
568 gen7_batch_lrm(struct anv_batch *batch,
569               uint32_t reg, struct anv_bo *bo, uint32_t offset)
570 {
571    anv_batch_emit(batch, GEN7_MI_LOAD_REGISTER_MEM,
572                   .RegisterAddress = reg,
573                   .MemoryAddress = { bo, offset });
574 }
575
576 static void
577 gen7_batch_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
578 {
579    anv_batch_emit(batch, GEN7_MI_LOAD_REGISTER_IMM,
580                   .RegisterOffset = reg,
581                   .DataDWord = imm);
582 }
583
584 /* Auto-Draw / Indirect Registers */
585 #define GEN7_3DPRIM_END_OFFSET          0x2420
586 #define GEN7_3DPRIM_START_VERTEX        0x2430
587 #define GEN7_3DPRIM_VERTEX_COUNT        0x2434
588 #define GEN7_3DPRIM_INSTANCE_COUNT      0x2438
589 #define GEN7_3DPRIM_START_INSTANCE      0x243C
590 #define GEN7_3DPRIM_BASE_VERTEX         0x2440
591
592 void genX(CmdDrawIndirect)(
593     VkCommandBuffer                             commandBuffer,
594     VkBuffer                                    _buffer,
595     VkDeviceSize                                offset,
596     uint32_t                                    drawCount,
597     uint32_t                                    stride)
598 {
599    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
600    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
601    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
602    struct anv_bo *bo = buffer->bo;
603    uint32_t bo_offset = buffer->offset + offset;
604
605    cmd_buffer_flush_state(cmd_buffer);
606
607    gen7_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
608    gen7_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
609    gen7_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
610    gen7_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 12);
611    gen7_batch_lri(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, 0);
612
613    anv_batch_emit(&cmd_buffer->batch, GEN7_3DPRIMITIVE,
614                   .IndirectParameterEnable = true,
615                   .VertexAccessType = SEQUENTIAL,
616                   .PrimitiveTopologyType = pipeline->topology);
617 }
618
619 void genX(CmdDrawIndexedIndirect)(
620     VkCommandBuffer                             commandBuffer,
621     VkBuffer                                    _buffer,
622     VkDeviceSize                                offset,
623     uint32_t                                    drawCount,
624     uint32_t                                    stride)
625 {
626    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
627    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
628    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
629    struct anv_bo *bo = buffer->bo;
630    uint32_t bo_offset = buffer->offset + offset;
631
632    cmd_buffer_flush_state(cmd_buffer);
633
634    gen7_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_VERTEX_COUNT, bo, bo_offset);
635    gen7_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_INSTANCE_COUNT, bo, bo_offset + 4);
636    gen7_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_VERTEX, bo, bo_offset + 8);
637    gen7_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_BASE_VERTEX, bo, bo_offset + 12);
638    gen7_batch_lrm(&cmd_buffer->batch, GEN7_3DPRIM_START_INSTANCE, bo, bo_offset + 16);
639
640    anv_batch_emit(&cmd_buffer->batch, GEN7_3DPRIMITIVE,
641                   .IndirectParameterEnable = true,
642                   .VertexAccessType = RANDOM,
643                   .PrimitiveTopologyType = pipeline->topology);
644 }
645
646 void genX(CmdDispatch)(
647     VkCommandBuffer                             commandBuffer,
648     uint32_t                                    x,
649     uint32_t                                    y,
650     uint32_t                                    z)
651 {
652    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
653    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
654    struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
655
656    cmd_buffer_flush_compute_state(cmd_buffer);
657
658    anv_batch_emit(&cmd_buffer->batch, GEN7_GPGPU_WALKER,
659                   .SIMDSize = prog_data->simd_size / 16,
660                   .ThreadDepthCounterMaximum = 0,
661                   .ThreadHeightCounterMaximum = 0,
662                   .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
663                   .ThreadGroupIDXDimension = x,
664                   .ThreadGroupIDYDimension = y,
665                   .ThreadGroupIDZDimension = z,
666                   .RightExecutionMask = pipeline->cs_right_mask,
667                   .BottomExecutionMask = 0xffffffff);
668
669    anv_batch_emit(&cmd_buffer->batch, GEN7_MEDIA_STATE_FLUSH);
670 }
671
672 #define GPGPU_DISPATCHDIMX 0x2500
673 #define GPGPU_DISPATCHDIMY 0x2504
674 #define GPGPU_DISPATCHDIMZ 0x2508
675
676 void genX(CmdDispatchIndirect)(
677     VkCommandBuffer                             commandBuffer,
678     VkBuffer                                    _buffer,
679     VkDeviceSize                                offset)
680 {
681    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
682    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
683    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
684    struct brw_cs_prog_data *prog_data = &pipeline->cs_prog_data;
685    struct anv_bo *bo = buffer->bo;
686    uint32_t bo_offset = buffer->offset + offset;
687
688    cmd_buffer_flush_compute_state(cmd_buffer);
689
690    gen7_batch_lrm(&cmd_buffer->batch, GPGPU_DISPATCHDIMX, bo, bo_offset);
691    gen7_batch_lrm(&cmd_buffer->batch, GPGPU_DISPATCHDIMY, bo, bo_offset + 4);
692    gen7_batch_lrm(&cmd_buffer->batch, GPGPU_DISPATCHDIMZ, bo, bo_offset + 8);
693
694    anv_batch_emit(&cmd_buffer->batch, GEN7_GPGPU_WALKER,
695                   .IndirectParameterEnable = true,
696                   .SIMDSize = prog_data->simd_size / 16,
697                   .ThreadDepthCounterMaximum = 0,
698                   .ThreadHeightCounterMaximum = 0,
699                   .ThreadWidthCounterMaximum = pipeline->cs_thread_width_max - 1,
700                   .RightExecutionMask = pipeline->cs_right_mask,
701                   .BottomExecutionMask = 0xffffffff);
702
703    anv_batch_emit(&cmd_buffer->batch, GEN7_MEDIA_STATE_FLUSH);
704 }
705
706 static void
707 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
708 {
709    const struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
710    const struct anv_image_view *iview =
711       anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
712    const struct anv_image *image = iview ? iview->image : NULL;
713
714    /* XXX: isl needs to grow depth format support */
715    const struct anv_format *anv_format =
716       iview ? anv_format_for_vk_format(iview->vk_format) : NULL;
717
718    const bool has_depth = iview && anv_format->depth_format;
719    const bool has_stencil = iview && anv_format->has_stencil;
720
721    /* Emit 3DSTATE_DEPTH_BUFFER */
722    if (has_depth) {
723       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
724          .SurfaceType = SURFTYPE_2D,
725          .DepthWriteEnable = true,
726          .StencilWriteEnable = has_stencil,
727          .HierarchicalDepthBufferEnable = false,
728          .SurfaceFormat = anv_format->depth_format,
729          .SurfacePitch = image->depth_surface.isl.row_pitch - 1,
730          .SurfaceBaseAddress = {
731             .bo = image->bo,
732             .offset = image->depth_surface.offset,
733          },
734          .Height = fb->height - 1,
735          .Width = fb->width - 1,
736          .LOD = 0,
737          .Depth = 1 - 1,
738          .MinimumArrayElement = 0,
739          .DepthBufferObjectControlState = GENX(MOCS),
740          .RenderTargetViewExtent = 1 - 1);
741    } else {
742       /* Even when no depth buffer is present, the hardware requires that
743        * 3DSTATE_DEPTH_BUFFER be programmed correctly. The Broadwell PRM says:
744        *
745        *    If a null depth buffer is bound, the driver must instead bind depth as:
746        *       3DSTATE_DEPTH.SurfaceType = SURFTYPE_2D
747        *       3DSTATE_DEPTH.Width = 1
748        *       3DSTATE_DEPTH.Height = 1
749        *       3DSTATE_DEPTH.SuraceFormat = D16_UNORM
750        *       3DSTATE_DEPTH.SurfaceBaseAddress = 0
751        *       3DSTATE_DEPTH.HierarchicalDepthBufferEnable = 0
752        *       3DSTATE_WM_DEPTH_STENCIL.DepthTestEnable = 0
753        *       3DSTATE_WM_DEPTH_STENCIL.DepthBufferWriteEnable = 0
754        *
755        * The PRM is wrong, though. The width and height must be programmed to
756        * actual framebuffer's width and height, even when neither depth buffer
757        * nor stencil buffer is present.
758        */
759       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_DEPTH_BUFFER),
760          .SurfaceType = SURFTYPE_2D,
761          .SurfaceFormat = D16_UNORM,
762          .Width = fb->width - 1,
763          .Height = fb->height - 1,
764          .StencilWriteEnable = has_stencil);
765    }
766
767    /* Emit 3DSTATE_STENCIL_BUFFER */
768    if (has_stencil) {
769       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_STENCIL_BUFFER),
770 #     if (ANV_IS_HASWELL)
771          .StencilBufferEnable = true,
772 #     endif
773          .StencilBufferObjectControlState = GENX(MOCS),
774
775          /* Stencil buffers have strange pitch. The PRM says:
776           *
777           *    The pitch must be set to 2x the value computed based on width,
778           *    as the stencil buffer is stored with two rows interleaved.
779           */
780          .SurfacePitch = 2 * image->stencil_surface.isl.row_pitch - 1,
781
782          .SurfaceBaseAddress = {
783             .bo = image->bo,
784             .offset = image->offset + image->stencil_surface.offset,
785          });
786    } else {
787       anv_batch_emit(&cmd_buffer->batch, GEN7_3DSTATE_STENCIL_BUFFER);
788    }
789
790    /* Disable hierarchial depth buffers. */
791    anv_batch_emit(&cmd_buffer->batch, GEN7_3DSTATE_HIER_DEPTH_BUFFER);
792
793    /* Clear the clear params. */
794    anv_batch_emit(&cmd_buffer->batch, GEN7_3DSTATE_CLEAR_PARAMS);
795 }
796
797 GENX_FUNC(GEN7, GEN7) void
798 genX(cmd_buffer_begin_subpass)(struct anv_cmd_buffer *cmd_buffer,
799                                struct anv_subpass *subpass)
800 {
801    cmd_buffer->state.subpass = subpass;
802    cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
803    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
804
805    cmd_buffer_emit_depth_stencil(cmd_buffer);
806 }
807
808 void genX(CmdBeginRenderPass)(
809     VkCommandBuffer                             commandBuffer,
810     const VkRenderPassBeginInfo*                pRenderPassBegin,
811     VkSubpassContents                           contents)
812 {
813    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
814    ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
815    ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
816
817    cmd_buffer->state.framebuffer = framebuffer;
818    cmd_buffer->state.pass = pass;
819
820    const VkRect2D *render_area = &pRenderPassBegin->renderArea;
821
822    anv_batch_emit(&cmd_buffer->batch, GEN7_3DSTATE_DRAWING_RECTANGLE,
823                   .ClippedDrawingRectangleYMin = render_area->offset.y,
824                   .ClippedDrawingRectangleXMin = render_area->offset.x,
825                   .ClippedDrawingRectangleYMax =
826                      render_area->offset.y + render_area->extent.height - 1,
827                   .ClippedDrawingRectangleXMax =
828                      render_area->offset.x + render_area->extent.width - 1,
829                   .DrawingRectangleOriginY = 0,
830                   .DrawingRectangleOriginX = 0);
831
832    anv_cmd_buffer_clear_attachments(cmd_buffer, pass,
833                                     pRenderPassBegin->pClearValues);
834
835    gen7_cmd_buffer_begin_subpass(cmd_buffer, pass->subpasses);
836 }
837
838 void genX(CmdNextSubpass)(
839     VkCommandBuffer                             commandBuffer,
840     VkSubpassContents                           contents)
841 {
842    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
843
844    assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
845
846    gen7_cmd_buffer_begin_subpass(cmd_buffer, cmd_buffer->state.subpass + 1);
847 }
848
849 void genX(CmdEndRenderPass)(
850     VkCommandBuffer                             commandBuffer)
851 {
852    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
853
854    /* Emit a flushing pipe control at the end of a pass.  This is kind of a
855     * hack but it ensures that render targets always actually get written.
856     * Eventually, we should do flushing based on image format transitions
857     * or something of that nature.
858     */
859    anv_batch_emit(&cmd_buffer->batch, GEN7_PIPE_CONTROL,
860                   .PostSyncOperation = NoWrite,
861                   .RenderTargetCacheFlushEnable = true,
862                   .InstructionCacheInvalidateEnable = true,
863                   .DepthCacheFlushEnable = true,
864                   .VFCacheInvalidationEnable = true,
865                   .TextureCacheInvalidationEnable = true,
866                   .CommandStreamerStallEnable = true);
867 }
868
869 void genX(CmdSetEvent)(
870     VkCommandBuffer                             commandBuffer,
871     VkEvent                                     event,
872     VkPipelineStageFlags                        stageMask)
873 {
874    stub();
875 }
876
877 void genX(CmdResetEvent)(
878     VkCommandBuffer                             commandBuffer,
879     VkEvent                                     event,
880     VkPipelineStageFlags                        stageMask)
881 {
882    stub();
883 }
884
885 void genX(CmdWaitEvents)(
886     VkCommandBuffer                             commandBuffer,
887     uint32_t                                    eventCount,
888     const VkEvent*                              pEvents,
889     VkPipelineStageFlags                        srcStageMask,
890     VkPipelineStageFlags                        destStageMask,
891     uint32_t                                    memBarrierCount,
892     const void* const*                          ppMemBarriers)
893 {
894    stub();
895 }