2 * Copyright © 2018 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Peng Chen <peng.c.chen@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "i965_defines.h"
37 #include "i965_drv_video.h"
38 #include "gen10_vdenc_common.h"
40 #define VDENC_WRITE_COMMANDS(command_flag, batch, param) \
42 int cmd_size = sizeof(*param) / sizeof(uint32_t); \
43 BEGIN_BCS_BATCH(batch, cmd_size + 1); \
44 OUT_BCS_BATCH(batch, (command_flag) | (cmd_size - 1)); \
45 intel_batchbuffer_data(batch, param, sizeof(*param)); \
46 ADVANCE_BCS_BATCH(batch); \
50 gen10_vdenc_vd_pipeline_flush(VADriverContextP ctx,
51 struct intel_batchbuffer *batch,
52 gen10_vdenc_vd_pipeline_flush_param *param)
54 VDENC_WRITE_COMMANDS(VD_PIPELINE_FLUSH, batch, param);
58 gen10_vdenc_pipe_mode_select(VADriverContextP ctx,
59 struct intel_batchbuffer *batch,
60 gen10_vdenc_pipe_mode_select_param *param)
62 VDENC_WRITE_COMMANDS(VDENC_PIPE_MODE_SELECT, batch, param);
66 gen10_vdenc_surface_state(VADriverContextP ctx,
67 struct intel_batchbuffer *batch,
68 enum GEN10_VDENC_SURFACE_TYPE type,
69 gen10_vdenc_surface_state_param *surface0,
70 gen10_vdenc_surface_state_param *surface1)
75 cmd_size = 1 + sizeof(gen10_vdenc_surface_state_param) / sizeof(uint32_t);
76 if (type == GEN10_VDENC_DS_REF_SURFACE) {
79 dw0 = VDENC_DS_REF_SURFACE_STATE;
80 } else if (type == GEN10_VDENC_REF_SURFACE)
81 dw0 = VDENC_REF_SURFACE_STATE;
83 dw0 = VDENC_SRC_SURFACE_STATE;
85 dw0 |= (cmd_size - 1);
87 BEGIN_BCS_BATCH(batch, cmd_size + 1);
89 OUT_BCS_BATCH(batch, dw0);
91 OUT_BCS_BATCH(batch, 0);
93 intel_batchbuffer_data(batch, surface0, sizeof(*surface0));
94 if (type == GEN10_VDENC_DS_REF_SURFACE)
95 intel_batchbuffer_data(batch, surface1, sizeof(*surface1));
97 ADVANCE_BCS_BATCH(batch);
101 gen10_vdenc_walker_state(VADriverContextP ctx,
102 struct intel_batchbuffer *batch,
103 gen10_vdenc_walker_state_param *param)
105 VDENC_WRITE_COMMANDS(VDENC_WALKER_STATE, batch, param);
109 gen10_vdenc_weightsoffsets_state(VADriverContextP ctx,
110 struct intel_batchbuffer *batch,
111 gen10_vdenc_weightsoffsets_state_param *param)
113 VDENC_WRITE_COMMANDS(VDENC_WEIGHTSOFFSETS_STATE, batch, param);
116 #define OUT_BUFFER_2DW(batch, bo, is_target, delta) do { \
118 OUT_BCS_RELOC64(batch, \
120 I915_GEM_DOMAIN_RENDER, \
121 is_target ? I915_GEM_DOMAIN_RENDER : 0, \
124 OUT_BCS_BATCH(batch, 0); \
125 OUT_BCS_BATCH(batch, 0); \
129 #define OUT_BUFFER_3DW(batch, bo, is_target, delta) do { \
130 OUT_BUFFER_2DW(batch, bo, is_target, delta); \
132 OUT_BCS_BATCH(batch, i965->intel.mocs_state); \
134 OUT_BCS_BATCH(batch, 0); \
138 gen10_vdenc_pipe_buf_addr_state(VADriverContextP ctx,
139 struct intel_batchbuffer *batch,
140 gen10_vdenc_pipe_buf_addr_state_param *param)
142 struct i965_driver_data *i965 = i965_driver_data(ctx);
145 BEGIN_BCS_BATCH(batch, 62);
147 OUT_BCS_BATCH(batch, VDENC_PIPE_BUF_ADDR_STATE | (62 - 2));
150 for (i = 0; i < 2; i++)
151 OUT_BUFFER_3DW(batch, param->downscaled_fwd_ref[i]->bo,
154 OUT_BUFFER_3DW(batch, param->downscaled_bwd_ref[0]->bo,
158 OUT_BUFFER_3DW(batch, param->uncompressed_picture->bo,
162 OUT_BUFFER_3DW(batch, param->stream_data_picture->bo,
166 OUT_BUFFER_3DW(batch, param->row_store_scratch_buf->bo,
170 OUT_BUFFER_3DW(batch, param->collocated_mv_buf->bo,
174 for (i = 0; i < 3; i++)
175 OUT_BUFFER_3DW(batch, param->fwd_ref[i]->bo,
178 OUT_BUFFER_3DW(batch, param->bwd_ref[0]->bo,
182 OUT_BUFFER_3DW(batch, param->statictics_streamout_buf->bo,
186 for (i = 0; i < 2; i++)
187 OUT_BUFFER_3DW(batch, param->downscaled_fwd_ref_4x[i]->bo,
191 OUT_BUFFER_3DW(batch, NULL, 0, 0);
194 OUT_BUFFER_3DW(batch, param->lcu_pak_obj_cmd_buf->bo,
198 OUT_BUFFER_3DW(batch, param->scaled_ref_8x->bo,
202 OUT_BUFFER_3DW(batch, param->scaled_ref_4x->bo,
206 OUT_BUFFER_3DW(batch, param->vp9_segmentation_map_streamin_buf->bo,
208 OUT_BUFFER_3DW(batch, param->vp9_segmentation_map_streamout_buf->bo,
212 OUT_BCS_BATCH(batch, param->dw61.weights_histogram_streamout_offset);
214 ADVANCE_BCS_BATCH(batch);