OSDN Git Service

render: clear background using 3D pipeline on GEN8+
[android-x86/hardware-intel-common-vaapi.git] / src / gen10_hcp_common.c
1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the
6  * "Software"), to deal in the Software without restriction, including
7  * without limitation the rights to use, copy, modify, merge, publish,
8  * distribute, sub license, and/or sell copies of the Software, and to
9  * permit persons to whom the Software is furnished to do so, subject to
10  * the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the
13  * next paragraph) shall be included in all copies or substantial portions
14  * of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19  * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20  * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23  *
24  * Authors:
25  *    Peng Chen <peng.c.chen@intel.com>
26  *
27  */
28
29 #include <stdio.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <math.h>
33 #include <assert.h>
34
35 #include "intel_batchbuffer.h"
36 #include "i965_defines.h"
37 #include "i965_drv_video.h"
38 #include "gen10_hcp_common.h"
39
40 #define HCP_WRITE_COMMANDS(command_flag)                        \
41     {                                                           \
42         int cmd_size = sizeof(*param) / sizeof(uint32_t);       \
43         BEGIN_BCS_BATCH(batch, cmd_size + 1);                   \
44         OUT_BCS_BATCH(batch, (command_flag) | (cmd_size - 1));  \
45         intel_batchbuffer_data(batch, param, sizeof(*param));   \
46         ADVANCE_BCS_BATCH(batch);                               \
47     }
48
49 void
50 gen10_hcp_pipe_mode_select(VADriverContextP ctx,
51                            struct intel_batchbuffer *batch,
52                            gen10_hcp_pipe_mode_select_param *param)
53 {
54     HCP_WRITE_COMMANDS(HCP_PIPE_MODE_SELECT);
55 }
56
57 void
58 gen10_hcp_surface_state(VADriverContextP ctx,
59                         struct intel_batchbuffer *batch,
60                         gen10_hcp_surface_state_param *param)
61 {
62     HCP_WRITE_COMMANDS(HCP_SURFACE_STATE);
63 }
64
65 void
66 gen10_hcp_pic_state(VADriverContextP ctx,
67                     struct intel_batchbuffer *batch,
68                     gen10_hcp_pic_state_param *param)
69 {
70     HCP_WRITE_COMMANDS(HCP_PIC_STATE);
71 }
72
73 void
74 gen10_hcp_vp9_pic_state(VADriverContextP ctx,
75                         struct intel_batchbuffer *batch,
76                         gen10_hcp_vp9_pic_state_param *param)
77 {
78     HCP_WRITE_COMMANDS(HCP_VP9_PIC_STATE);
79 }
80
81 void
82 gen10_hcp_qm_state(VADriverContextP ctx,
83                    struct intel_batchbuffer *batch,
84                    gen10_hcp_qm_state_param *param)
85 {
86     HCP_WRITE_COMMANDS(HCP_QM_STATE);
87 }
88
89
90 void
91 gen10_hcp_fqm_state(VADriverContextP ctx,
92                     struct intel_batchbuffer *batch,
93                     gen10_hcp_fqm_state_param *param)
94 {
95     HCP_WRITE_COMMANDS(HCP_FQM_STATE);
96 }
97
98 void
99 gen10_hcp_rdoq_state(VADriverContextP ctx,
100                      struct intel_batchbuffer *batch,
101                      gen10_hcp_rdoq_state_param *param)
102 {
103     HCP_WRITE_COMMANDS(HCP_RDOQ_STATE);
104 }
105
106 void
107 gen10_hcp_weightoffset_state(VADriverContextP ctx,
108                              struct intel_batchbuffer *batch,
109                              gen10_hcp_weightoffset_state_param *param)
110 {
111     HCP_WRITE_COMMANDS(HCP_WEIGHTOFFSET);
112 }
113
114 void
115 gen10_hcp_slice_state(VADriverContextP ctx,
116                       struct intel_batchbuffer *batch,
117                       gen10_hcp_slice_state_param *param)
118 {
119     HCP_WRITE_COMMANDS(HCP_SLICE_STATE);
120 }
121
122 void
123 gen10_hcp_ref_idx_state(VADriverContextP ctx,
124                         struct intel_batchbuffer *batch,
125                         gen10_hcp_ref_idx_state_param *param)
126 {
127     HCP_WRITE_COMMANDS(HCP_REF_IDX_STATE);
128 }
129
130 void
131 gen10_hcp_vp9_segment_state(VADriverContextP ctx,
132                             struct intel_batchbuffer *batch,
133                             gen10_hcp_vp9_segment_state_param *param)
134 {
135     HCP_WRITE_COMMANDS(HCP_VP9_SEGMENT_STATE);
136 }
137
138 void
139 gen10_hcp_pak_insert_object(VADriverContextP ctx,
140                             struct intel_batchbuffer *batch,
141                             gen10_hcp_pak_insert_object_param *param)
142 {
143     int payload_bits = param->inline_payload_bits;
144     int cmd_size_in_dw = ALIGN(payload_bits, 32) >> 5;
145
146     BEGIN_BCS_BATCH(batch, cmd_size_in_dw + 2);
147
148     OUT_BCS_BATCH(batch, HCP_INSERT_PAK_OBJECT | (cmd_size_in_dw));
149
150     OUT_BCS_BATCH(batch, param->dw1.value);
151     intel_batchbuffer_data(batch, param->inline_payload_ptr,
152                            cmd_size_in_dw * 4);
153
154     ADVANCE_BCS_BATCH(batch);
155 }
156
157 #define OUT_BUFFER_2DW(batch, gpe_res, is_target, delta)  do {              \
158         if (gpe_res) {                                                      \
159             struct i965_gpe_resource * res = gpe_res;                       \
160             dri_bo *bo = res->bo;                                           \
161             if (bo) {                                                       \
162                 OUT_BCS_RELOC64(batch,                                      \
163                                 bo,                                         \
164                                 I915_GEM_DOMAIN_RENDER,                     \
165                                 is_target ? I915_GEM_DOMAIN_RENDER : 0,     \
166                                 delta);                                     \
167             }                                                               \
168             else {                                                          \
169               OUT_BCS_BATCH(batch, 0);                                      \
170               OUT_BCS_BATCH(batch, 0);                                      \
171             }                                                               \
172         } else {                                                            \
173             OUT_BCS_BATCH(batch, 0);                                        \
174             OUT_BCS_BATCH(batch, 0);                                        \
175         }                                                                   \
176     } while (0)
177
178 #define OUT_BUFFER_3DW(batch, gpe_res, is_target, delta)        do { \
179         OUT_BUFFER_2DW(batch, gpe_res, is_target, delta);            \
180         if (gpe_res)                                                 \
181             OUT_BCS_BATCH(batch, i965->intel.mocs_state);            \
182         else                                                         \
183             OUT_BCS_BATCH(batch, 0);                                 \
184     } while (0)
185
186 void
187 gen10_hcp_pipe_buf_addr_state(VADriverContextP ctx,
188                               struct intel_batchbuffer *batch,
189                               gen10_hcp_pipe_buf_addr_state_param *param)
190 {
191     struct i965_driver_data *i965 = i965_driver_data(ctx);
192     int i = 0;
193
194     BEGIN_BCS_BATCH(batch, 104);
195
196     OUT_BCS_BATCH(batch, HCP_PIPE_BUF_ADDR_STATE | (104 - 2));
197
198     /* DW1..3 */
199     OUT_BUFFER_3DW(batch, param->reconstructed,
200                    1, 0);
201
202     /* DW4..6 */
203     OUT_BUFFER_3DW(batch, param->deblocking_filter_line,
204                    1, 0);
205
206     /* DW7..9 */
207     OUT_BUFFER_3DW(batch, param->deblocking_filter_tile_line,
208                    1, 0);
209
210     /* DW10..12 */
211     OUT_BUFFER_3DW(batch, param->deblocking_filter_tile_column,
212                    1, 0);
213
214     /* DW13..15 */
215     OUT_BUFFER_3DW(batch, param->metadata_line,
216                    1, 0);
217
218     /* DW16..18 */
219     OUT_BUFFER_3DW(batch, param->metadata_tile_line,
220                    1, 0);
221
222     /* DW19..21 */
223     OUT_BUFFER_3DW(batch, param->metadata_tile_column,
224                    1, 0);
225
226     /* DW 22..24 */
227     OUT_BUFFER_3DW(batch, param->sao_line,
228                    1, 0);
229
230     /* DW 25..27 */
231     OUT_BUFFER_3DW(batch, param->sao_tile_line,
232                    1, 0);
233
234     /* DW 28..30 */
235     OUT_BUFFER_3DW(batch, param->sao_tile_column,
236                    1, 0);
237
238     /* DW 31..33 */
239     OUT_BUFFER_3DW(batch, param->current_motion_vector_temporal,
240                    1, 0);
241
242     /* DW 34..36 */
243     OUT_BUFFER_3DW(batch, NULL, 0, 0);
244
245     /* DW 37..52 */
246     for (i = 0; i < 8; i++)
247         OUT_BUFFER_2DW(batch, param->reference_picture[i],
248                        0, 0);
249
250     OUT_BCS_BATCH(batch, i965->intel.mocs_state);
251
252     /* DW 54..56 */
253     OUT_BUFFER_3DW(batch, param->uncompressed_picture,
254                    0, 0);
255
256     /* DW 57..59 */
257     OUT_BUFFER_3DW(batch, param->streamout_data_destination,
258                    1, 0);
259
260     /* DW 60..62 */
261     OUT_BUFFER_3DW(batch, param->picture_status,
262                    1, 0);
263
264     /* DW 63..65 */
265     OUT_BUFFER_3DW(batch, param->ildb_streamout,
266                    1, 0);
267
268     /* DW 66..81 */
269     for (i = 0; i < 8; i++)
270         OUT_BUFFER_2DW(batch, param->collocated_motion_vector_temporal[i],
271                        0, 0);
272
273     OUT_BCS_BATCH(batch, i965->intel.mocs_state);
274
275     /* DW 83..85 */
276     OUT_BUFFER_3DW(batch, param->vp9_probability,
277                    1, 0);
278
279     /* DW 86..88 */
280     OUT_BUFFER_3DW(batch, param->vp9_segmentid,
281                    1, 0);
282
283     /* DW 89..91 */
284     OUT_BUFFER_3DW(batch, param->vp9_hvd_line_rowstore,
285                    1, 0);
286
287     /* DW 92..94 */
288     OUT_BUFFER_3DW(batch, param->vp9_hvd_time_rowstore,
289                    1, 0);
290
291     /* DW 95..97 */
292     OUT_BUFFER_3DW(batch, param->sao_streamout_data_destination,
293                    1, 0);
294
295     /* DW 98..100 */
296     OUT_BUFFER_3DW(batch, param->frame_statics_streamout_data_destination,
297                    1, 0);
298
299     /* DW 101..103. */
300     OUT_BUFFER_3DW(batch, param->sse_source_pixel_rowstore,
301                    1, 0);
302
303     ADVANCE_BCS_BATCH(batch);
304 }
305
306 void
307 gen10_hcp_ind_obj_base_addr_state(VADriverContextP ctx,
308                                   struct intel_batchbuffer *batch,
309                                   gen10_hcp_ind_obj_base_addr_state_param *param)
310 {
311     struct i965_driver_data *i965 = i965_driver_data(ctx);
312
313     BEGIN_BCS_BATCH(batch, 29);
314
315     OUT_BCS_BATCH(batch, HCP_IND_OBJ_BASE_ADDR_STATE | (29 - 2));
316
317     /* DW 1..5 */
318     OUT_BUFFER_3DW(batch, NULL, 0, 0);
319     OUT_BUFFER_2DW(batch, NULL, 0, 0);
320
321     /* DW 6..8 */
322     OUT_BUFFER_3DW(batch,
323                    param->ind_cu_obj_bse,
324                    0,
325                    param->ind_cu_obj_bse_offset);
326
327     /* DW 9..13 */
328     OUT_BUFFER_3DW(batch,
329                    param->ind_pak_bse,
330                    1,
331                    param->ind_pak_bse_offset);
332
333     OUT_BUFFER_2DW(batch,
334                    param->ind_pak_bse,
335                    1,
336                    param->ind_pak_bse_upper);
337
338     /* DW 14..16 */
339     OUT_BUFFER_3DW(batch, NULL, 0, 0);
340
341     /* DW 17..19 */
342     OUT_BUFFER_3DW(batch, NULL, 0, 0);
343
344     /* DW 20..22 */
345     OUT_BUFFER_3DW(batch, NULL, 0, 0);
346
347     /* DW 23..28 */
348     OUT_BUFFER_3DW(batch, NULL, 0, 0);
349     OUT_BUFFER_3DW(batch, NULL, 0, 0);
350
351     ADVANCE_BCS_BATCH(batch);
352 }