OSDN Git Service

anv: Add a struct for storing a compiled shader
[android-x86/external-mesa.git] / src / intel / vulkan / gen7_cmd_buffer.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include <assert.h>
25 #include <stdbool.h>
26 #include <string.h>
27 #include <unistd.h>
28 #include <fcntl.h>
29
30 #include "anv_private.h"
31
32 #include "genxml/gen_macros.h"
33 #include "genxml/genX_pack.h"
34
35 static inline int64_t
36 clamp_int64(int64_t x, int64_t min, int64_t max)
37 {
38    if (x < min)
39       return min;
40    else if (x < max)
41       return x;
42    else
43       return max;
44 }
45
46 #if GEN_GEN == 7 && !GEN_IS_HASWELL
47 void
48 gen7_cmd_buffer_emit_scissor(struct anv_cmd_buffer *cmd_buffer)
49 {
50    uint32_t count = cmd_buffer->state.dynamic.scissor.count;
51    const VkRect2D *scissors =  cmd_buffer->state.dynamic.scissor.scissors;
52    struct anv_state scissor_state =
53       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, count * 8, 32);
54
55    for (uint32_t i = 0; i < count; i++) {
56       const VkRect2D *s = &scissors[i];
57
58       /* Since xmax and ymax are inclusive, we have to have xmax < xmin or
59        * ymax < ymin for empty clips.  In case clip x, y, width height are all
60        * 0, the clamps below produce 0 for xmin, ymin, xmax, ymax, which isn't
61        * what we want. Just special case empty clips and produce a canonical
62        * empty clip. */
63       static const struct GEN7_SCISSOR_RECT empty_scissor = {
64          .ScissorRectangleYMin = 1,
65          .ScissorRectangleXMin = 1,
66          .ScissorRectangleYMax = 0,
67          .ScissorRectangleXMax = 0
68       };
69
70       const int max = 0xffff;
71       struct GEN7_SCISSOR_RECT scissor = {
72          /* Do this math using int64_t so overflow gets clamped correctly. */
73          .ScissorRectangleYMin = clamp_int64(s->offset.y, 0, max),
74          .ScissorRectangleXMin = clamp_int64(s->offset.x, 0, max),
75          .ScissorRectangleYMax = clamp_int64((uint64_t) s->offset.y + s->extent.height - 1, 0, max),
76          .ScissorRectangleXMax = clamp_int64((uint64_t) s->offset.x + s->extent.width - 1, 0, max)
77       };
78
79       if (s->extent.width <= 0 || s->extent.height <= 0) {
80          GEN7_SCISSOR_RECT_pack(NULL, scissor_state.map + i * 8,
81                                 &empty_scissor);
82       } else {
83          GEN7_SCISSOR_RECT_pack(NULL, scissor_state.map + i * 8, &scissor);
84       }
85    }
86
87    anv_batch_emit(&cmd_buffer->batch,
88                   GEN7_3DSTATE_SCISSOR_STATE_POINTERS, ssp) {
89       ssp.ScissorRectPointer = scissor_state.offset;
90    }
91
92    if (!cmd_buffer->device->info.has_llc)
93       anv_state_clflush(scissor_state);
94 }
95 #endif
96
97 static const uint32_t vk_to_gen_index_type[] = {
98    [VK_INDEX_TYPE_UINT16]                       = INDEX_WORD,
99    [VK_INDEX_TYPE_UINT32]                       = INDEX_DWORD,
100 };
101
102 static const uint32_t restart_index_for_type[] = {
103    [VK_INDEX_TYPE_UINT16]                    = UINT16_MAX,
104    [VK_INDEX_TYPE_UINT32]                    = UINT32_MAX,
105 };
106
107 void genX(CmdBindIndexBuffer)(
108     VkCommandBuffer                             commandBuffer,
109     VkBuffer                                    _buffer,
110     VkDeviceSize                                offset,
111     VkIndexType                                 indexType)
112 {
113    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
114    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
115
116    cmd_buffer->state.dirty |= ANV_CMD_DIRTY_INDEX_BUFFER;
117    if (GEN_IS_HASWELL)
118       cmd_buffer->state.restart_index = restart_index_for_type[indexType];
119    cmd_buffer->state.gen7.index_buffer = buffer;
120    cmd_buffer->state.gen7.index_type = vk_to_gen_index_type[indexType];
121    cmd_buffer->state.gen7.index_offset = offset;
122 }
123
124 static VkResult
125 flush_compute_descriptor_set(struct anv_cmd_buffer *cmd_buffer)
126 {
127    struct anv_device *device = cmd_buffer->device;
128    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
129    struct anv_state surfaces = { 0, }, samplers = { 0, };
130    VkResult result;
131
132    result = anv_cmd_buffer_emit_samplers(cmd_buffer,
133                                          MESA_SHADER_COMPUTE, &samplers);
134    if (result != VK_SUCCESS)
135       return result;
136    result = anv_cmd_buffer_emit_binding_table(cmd_buffer,
137                                               MESA_SHADER_COMPUTE, &surfaces);
138    if (result != VK_SUCCESS)
139       return result;
140
141    struct anv_state push_state = anv_cmd_buffer_cs_push_constants(cmd_buffer);
142
143    const struct brw_cs_prog_data *cs_prog_data = get_cs_prog_data(pipeline);
144    const struct brw_stage_prog_data *prog_data = &cs_prog_data->base;
145
146    if (push_state.alloc_size) {
147       anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
148          curbe.CURBETotalDataLength    = push_state.alloc_size;
149          curbe.CURBEDataStartAddress   = push_state.offset;
150       }
151    }
152
153    const uint32_t slm_size = encode_slm_size(GEN_GEN, prog_data->total_shared);
154
155    struct anv_state state =
156       anv_state_pool_emit(&device->dynamic_state_pool,
157                           GENX(INTERFACE_DESCRIPTOR_DATA), 64,
158                           .KernelStartPointer = pipeline->cs_simd,
159                           .BindingTablePointer = surfaces.offset,
160                           .SamplerStatePointer = samplers.offset,
161                           .ConstantURBEntryReadLength =
162                              cs_prog_data->push.per_thread.regs,
163 #if GEN_IS_HASWELL
164                           .CrossThreadConstantDataReadLength =
165                              cs_prog_data->push.cross_thread.regs,
166 #else
167                           .ConstantURBEntryReadOffset = 0,
168 #endif
169                           .BarrierEnable = cs_prog_data->uses_barrier,
170                           .SharedLocalMemorySize = slm_size,
171                           .NumberofThreadsinGPGPUThreadGroup =
172                              cs_prog_data->threads);
173
174    const uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
175    anv_batch_emit(&cmd_buffer->batch,
176                   GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), idl) {
177       idl.InterfaceDescriptorTotalLength        = size;
178       idl.InterfaceDescriptorDataStartAddress = state.offset;
179    }
180
181    return VK_SUCCESS;
182 }
183
184 void
185 genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
186 {
187    struct anv_pipeline *pipeline = cmd_buffer->state.compute_pipeline;
188    MAYBE_UNUSED VkResult result;
189
190    assert(pipeline->active_stages == VK_SHADER_STAGE_COMPUTE_BIT);
191
192    genX(cmd_buffer_config_l3)(cmd_buffer, pipeline);
193
194    genX(flush_pipeline_select_gpgpu)(cmd_buffer);
195
196    if (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)
197       anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->batch);
198
199    if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
200        (cmd_buffer->state.compute_dirty & ANV_CMD_DIRTY_PIPELINE)) {
201       /* FIXME: figure out descriptors for gen7 */
202       result = flush_compute_descriptor_set(cmd_buffer);
203       assert(result == VK_SUCCESS);
204       cmd_buffer->state.descriptors_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
205    }
206
207    cmd_buffer->state.compute_dirty = 0;
208
209    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
210 }
211
212 void
213 genX(cmd_buffer_flush_dynamic_state)(struct anv_cmd_buffer *cmd_buffer)
214 {
215    struct anv_pipeline *pipeline = cmd_buffer->state.pipeline;
216
217    if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
218                                   ANV_CMD_DIRTY_RENDER_TARGETS |
219                                   ANV_CMD_DIRTY_DYNAMIC_LINE_WIDTH |
220                                   ANV_CMD_DIRTY_DYNAMIC_DEPTH_BIAS)) {
221
222       const struct anv_image_view *iview =
223          anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
224       const struct anv_image *image = iview ? iview->image : NULL;
225       const bool has_depth =
226          image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT);
227       const uint32_t depth_format = has_depth ?
228          isl_surf_get_depth_format(&cmd_buffer->device->isl_dev,
229                                    &image->depth_surface.isl) : D16_UNORM;
230
231       uint32_t sf_dw[GENX(3DSTATE_SF_length)];
232       struct GENX(3DSTATE_SF) sf = {
233          GENX(3DSTATE_SF_header),
234          .DepthBufferSurfaceFormat = depth_format,
235          .LineWidth = cmd_buffer->state.dynamic.line_width,
236          .GlobalDepthOffsetConstant = cmd_buffer->state.dynamic.depth_bias.bias,
237          .GlobalDepthOffsetScale = cmd_buffer->state.dynamic.depth_bias.slope,
238          .GlobalDepthOffsetClamp = cmd_buffer->state.dynamic.depth_bias.clamp
239       };
240       GENX(3DSTATE_SF_pack)(NULL, sf_dw, &sf);
241
242       anv_batch_emit_merge(&cmd_buffer->batch, sf_dw, pipeline->gen7.sf);
243    }
244
245    if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_DYNAMIC_BLEND_CONSTANTS |
246                                   ANV_CMD_DIRTY_DYNAMIC_STENCIL_REFERENCE)) {
247       struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
248       struct anv_state cc_state =
249          anv_cmd_buffer_alloc_dynamic_state(cmd_buffer,
250                                             GENX(COLOR_CALC_STATE_length) * 4,
251                                             64);
252       struct GENX(COLOR_CALC_STATE) cc = {
253          .BlendConstantColorRed = cmd_buffer->state.dynamic.blend_constants[0],
254          .BlendConstantColorGreen = cmd_buffer->state.dynamic.blend_constants[1],
255          .BlendConstantColorBlue = cmd_buffer->state.dynamic.blend_constants[2],
256          .BlendConstantColorAlpha = cmd_buffer->state.dynamic.blend_constants[3],
257          .StencilReferenceValue = d->stencil_reference.front & 0xff,
258          .BackfaceStencilReferenceValue = d->stencil_reference.back & 0xff,
259       };
260       GENX(COLOR_CALC_STATE_pack)(NULL, cc_state.map, &cc);
261       if (!cmd_buffer->device->info.has_llc)
262          anv_state_clflush(cc_state);
263
264       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), ccp) {
265          ccp.ColorCalcStatePointer = cc_state.offset;
266       }
267    }
268
269    if (cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
270                                   ANV_CMD_DIRTY_RENDER_TARGETS |
271                                   ANV_CMD_DIRTY_DYNAMIC_STENCIL_COMPARE_MASK |
272                                   ANV_CMD_DIRTY_DYNAMIC_STENCIL_WRITE_MASK)) {
273       uint32_t depth_stencil_dw[GENX(DEPTH_STENCIL_STATE_length)];
274       struct anv_dynamic_state *d = &cmd_buffer->state.dynamic;
275
276       struct GENX(DEPTH_STENCIL_STATE) depth_stencil = {
277          .StencilTestMask = d->stencil_compare_mask.front & 0xff,
278          .StencilWriteMask = d->stencil_write_mask.front & 0xff,
279
280          .BackfaceStencilTestMask = d->stencil_compare_mask.back & 0xff,
281          .BackfaceStencilWriteMask = d->stencil_write_mask.back & 0xff,
282       };
283       GENX(DEPTH_STENCIL_STATE_pack)(NULL, depth_stencil_dw, &depth_stencil);
284
285       struct anv_state ds_state =
286          anv_cmd_buffer_merge_dynamic(cmd_buffer, depth_stencil_dw,
287                                       pipeline->gen7.depth_stencil_state,
288                                       GENX(DEPTH_STENCIL_STATE_length), 64);
289
290       anv_batch_emit(&cmd_buffer->batch,
291                      GENX(3DSTATE_DEPTH_STENCIL_STATE_POINTERS), dsp) {
292          dsp.PointertoDEPTH_STENCIL_STATE = ds_state.offset;
293       }
294    }
295
296    if (cmd_buffer->state.gen7.index_buffer &&
297        cmd_buffer->state.dirty & (ANV_CMD_DIRTY_PIPELINE |
298                                   ANV_CMD_DIRTY_INDEX_BUFFER)) {
299       struct anv_buffer *buffer = cmd_buffer->state.gen7.index_buffer;
300       uint32_t offset = cmd_buffer->state.gen7.index_offset;
301
302 #if GEN_IS_HASWELL
303       anv_batch_emit(&cmd_buffer->batch, GEN75_3DSTATE_VF, vf) {
304          vf.IndexedDrawCutIndexEnable  = pipeline->primitive_restart;
305          vf.CutIndex                   = cmd_buffer->state.restart_index;
306       }
307 #endif
308
309       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_INDEX_BUFFER), ib) {
310 #if !GEN_IS_HASWELL
311          ib.CutIndexEnable             = pipeline->primitive_restart;
312 #endif
313          ib.IndexFormat                = cmd_buffer->state.gen7.index_type;
314          ib.MemoryObjectControlState   = GENX(MOCS);
315
316          ib.BufferStartingAddress =
317             (struct anv_address) { buffer->bo, buffer->offset + offset };
318          ib.BufferEndingAddress =
319             (struct anv_address) { buffer->bo, buffer->offset + buffer->size };
320       }
321    }
322
323    cmd_buffer->state.dirty = 0;
324 }
325
326 void genX(CmdSetEvent)(
327     VkCommandBuffer                             commandBuffer,
328     VkEvent                                     event,
329     VkPipelineStageFlags                        stageMask)
330 {
331    stub();
332 }
333
334 void genX(CmdResetEvent)(
335     VkCommandBuffer                             commandBuffer,
336     VkEvent                                     event,
337     VkPipelineStageFlags                        stageMask)
338 {
339    stub();
340 }
341
342 void genX(CmdWaitEvents)(
343     VkCommandBuffer                             commandBuffer,
344     uint32_t                                    eventCount,
345     const VkEvent*                              pEvents,
346     VkPipelineStageFlags                        srcStageMask,
347     VkPipelineStageFlags                        destStageMask,
348     uint32_t                                    memoryBarrierCount,
349     const VkMemoryBarrier*                      pMemoryBarriers,
350     uint32_t                                    bufferMemoryBarrierCount,
351     const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
352     uint32_t                                    imageMemoryBarrierCount,
353     const VkImageMemoryBarrier*                 pImageMemoryBarriers)
354 {
355    stub();
356
357    genX(CmdPipelineBarrier)(commandBuffer, srcStageMask, destStageMask,
358                             false, /* byRegion */
359                             memoryBarrierCount, pMemoryBarriers,
360                             bufferMemoryBarrierCount, pBufferMemoryBarriers,
361                             imageMemoryBarrierCount, pImageMemoryBarriers);
362 }