OSDN Git Service

anv: Add a struct for storing a compiled shader
[android-x86/external-mesa.git] / src / intel / vulkan / anv_nir_apply_dynamic_offsets.c
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23
24 #include "anv_nir.h"
25 #include "nir/nir_builder.h"
26
27 static void
28 apply_dynamic_offsets_block(nir_block *block, nir_builder *b,
29                             const struct anv_pipeline_layout *layout,
30                             bool add_bounds_checks,
31                             uint32_t indices_start)
32 {
33    struct anv_descriptor_set_layout *set_layout;
34
35    nir_foreach_instr_safe(instr, block) {
36       if (instr->type != nir_instr_type_intrinsic)
37          continue;
38
39       nir_intrinsic_instr *intrin = nir_instr_as_intrinsic(instr);
40
41       unsigned block_idx_src;
42       switch (intrin->intrinsic) {
43       case nir_intrinsic_load_ubo:
44       case nir_intrinsic_load_ssbo:
45          block_idx_src = 0;
46          break;
47       case nir_intrinsic_store_ssbo:
48          block_idx_src = 1;
49          break;
50       default:
51          continue; /* the loop */
52       }
53
54       nir_instr *res_instr = intrin->src[block_idx_src].ssa->parent_instr;
55       assert(res_instr->type == nir_instr_type_intrinsic);
56       nir_intrinsic_instr *res_intrin = nir_instr_as_intrinsic(res_instr);
57       assert(res_intrin->intrinsic == nir_intrinsic_vulkan_resource_index);
58
59       unsigned set = res_intrin->const_index[0];
60       unsigned binding = res_intrin->const_index[1];
61
62       set_layout = layout->set[set].layout;
63       if (set_layout->binding[binding].dynamic_offset_index < 0)
64          continue;
65
66       b->cursor = nir_before_instr(&intrin->instr);
67
68       /* First, we need to generate the uniform load for the buffer offset */
69       uint32_t index = layout->set[set].dynamic_offset_start +
70                        set_layout->binding[binding].dynamic_offset_index;
71       uint32_t array_size = set_layout->binding[binding].array_size;
72
73       nir_intrinsic_instr *offset_load =
74          nir_intrinsic_instr_create(b->shader, nir_intrinsic_load_uniform);
75       offset_load->num_components = 2;
76       nir_intrinsic_set_base(offset_load, indices_start + index * 8);
77       nir_intrinsic_set_range(offset_load, array_size * 8);
78       offset_load->src[0] = nir_src_for_ssa(nir_imul(b, res_intrin->src[0].ssa,
79                                                      nir_imm_int(b, 8)));
80
81       nir_ssa_dest_init(&offset_load->instr, &offset_load->dest, 2, 32, NULL);
82       nir_builder_instr_insert(b, &offset_load->instr);
83
84       nir_src *offset_src = nir_get_io_offset_src(intrin);
85       nir_ssa_def *old_offset = nir_ssa_for_src(b, *offset_src, 1);
86       nir_ssa_def *new_offset = nir_iadd(b, old_offset, &offset_load->dest.ssa);
87       nir_instr_rewrite_src(&intrin->instr, offset_src,
88                             nir_src_for_ssa(new_offset));
89
90       if (!add_bounds_checks)
91          continue;
92
93       /* In order to avoid out-of-bounds access, we predicate */
94       nir_ssa_def *pred = nir_uge(b, nir_channel(b, &offset_load->dest.ssa, 1),
95                                   old_offset);
96       nir_if *if_stmt = nir_if_create(b->shader);
97       if_stmt->condition = nir_src_for_ssa(pred);
98       nir_cf_node_insert(b->cursor, &if_stmt->cf_node);
99
100       nir_instr_remove(&intrin->instr);
101       nir_instr_insert_after_cf_list(&if_stmt->then_list, &intrin->instr);
102
103       if (intrin->intrinsic != nir_intrinsic_store_ssbo) {
104          /* It's a load, we need a phi node */
105          nir_phi_instr *phi = nir_phi_instr_create(b->shader);
106          nir_ssa_dest_init(&phi->instr, &phi->dest,
107                            intrin->num_components,
108                            intrin->dest.ssa.bit_size, NULL);
109
110          nir_phi_src *src1 = ralloc(phi, nir_phi_src);
111          struct exec_node *tnode = exec_list_get_tail(&if_stmt->then_list);
112          src1->pred = exec_node_data(nir_block, tnode, cf_node.node);
113          src1->src = nir_src_for_ssa(&intrin->dest.ssa);
114          exec_list_push_tail(&phi->srcs, &src1->node);
115
116          b->cursor = nir_after_cf_list(&if_stmt->else_list);
117          nir_const_value zero_val = { .u32 = { 0, 0, 0, 0 } };
118          nir_ssa_def *zero = nir_build_imm(b, intrin->num_components,
119                                            intrin->dest.ssa.bit_size, zero_val);
120
121          nir_phi_src *src2 = ralloc(phi, nir_phi_src);
122          struct exec_node *enode = exec_list_get_tail(&if_stmt->else_list);
123          src2->pred = exec_node_data(nir_block, enode, cf_node.node);
124          src2->src = nir_src_for_ssa(zero);
125          exec_list_push_tail(&phi->srcs, &src2->node);
126
127          assert(intrin->dest.is_ssa);
128          nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
129                                   nir_src_for_ssa(&phi->dest.ssa));
130
131          nir_instr_insert_after_cf(&if_stmt->cf_node, &phi->instr);
132       }
133    }
134 }
135
136 void
137 anv_nir_apply_dynamic_offsets(struct anv_pipeline *pipeline,
138                               nir_shader *shader,
139                               struct brw_stage_prog_data *prog_data)
140 {
141    const struct anv_pipeline_layout *layout = pipeline->layout;
142    if (!layout || !layout->stage[shader->stage].has_dynamic_offsets)
143       return;
144
145    const bool add_bounds_checks = pipeline->device->robust_buffer_access;
146
147    nir_foreach_function(function, shader) {
148       if (!function->impl)
149          continue;
150
151       nir_builder builder;
152       nir_builder_init(&builder, function->impl);
153
154       nir_foreach_block(block, function->impl) {
155          apply_dynamic_offsets_block(block, &builder, pipeline->layout,
156                                      add_bounds_checks, shader->num_uniforms);
157       }
158
159       nir_metadata_preserve(function->impl, nir_metadata_block_index |
160                                             nir_metadata_dominance);
161    }
162
163    struct anv_push_constants *null_data = NULL;
164    for (unsigned i = 0; i < MAX_DYNAMIC_BUFFERS; i++) {
165       prog_data->param[i * 2 + shader->num_uniforms / 4] =
166          (const union gl_constant_value *)&null_data->dynamic[i].offset;
167       prog_data->param[i * 2 + 1 + shader->num_uniforms / 4] =
168          (const union gl_constant_value *)&null_data->dynamic[i].range;
169    }
170
171    shader->num_uniforms += MAX_DYNAMIC_BUFFERS * 8;
172 }