2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Zhao Yakui <yakui.zhao@intel.com>
26 * Xiang Haihao <haihao.xiang@intel.com>
35 #include "intel_batchbuffer.h"
36 #include "intel_driver.h"
38 #include "i965_defines.h"
39 #include "i965_drv_video.h"
40 #include "i965_encoder.h"
44 #include "intel_media.h"
46 #ifdef SURFACE_STATE_PADDED_SIZE
47 #undef SURFACE_STATE_PADDED_SIZE
50 #define SURFACE_STATE_PADDED_SIZE SURFACE_STATE_PADDED_SIZE_GEN8
51 #define SURFACE_STATE_OFFSET(index) (SURFACE_STATE_PADDED_SIZE * index)
52 #define BINDING_TABLE_OFFSET(index) (SURFACE_STATE_OFFSET(MAX_MEDIA_SURFACES_GEN6) + sizeof(unsigned int) * index)
54 #define VME_INTRA_SHADER 0
55 #define VME_INTER_SHADER 1
56 #define VME_BINTER_SHADER 2
58 #define CURBE_ALLOCATION_SIZE 37 /* in 256-bit */
59 #define CURBE_TOTAL_DATA_LENGTH (4 * 32) /* in byte, it should be less than or equal to CURBE_ALLOCATION_SIZE * 32 */
60 #define CURBE_URB_ENTRY_LENGTH 4 /* in 256-bit, it should be less than or equal to CURBE_TOTAL_DATA_LENGTH / 32 */
62 #define VME_MSG_LENGTH 32
64 static const uint32_t gen9_vme_intra_frame[][4] = {
65 #include "shaders/vme/intra_frame_gen9.g9b"
68 static const uint32_t gen9_vme_inter_frame[][4] = {
69 #include "shaders/vme/inter_frame_gen9.g9b"
72 static const uint32_t gen9_vme_inter_bframe[][4] = {
73 #include "shaders/vme/inter_bframe_gen9.g9b"
76 static struct i965_kernel gen9_vme_kernels[] = {
79 VME_INTRA_SHADER, /*index*/
81 sizeof(gen9_vme_intra_frame),
88 sizeof(gen9_vme_inter_frame),
94 gen9_vme_inter_bframe,
95 sizeof(gen9_vme_inter_bframe),
100 static const uint32_t gen9_vme_mpeg2_intra_frame[][4] = {
101 #include "shaders/vme/intra_frame_gen9.g9b"
104 static const uint32_t gen9_vme_mpeg2_inter_frame[][4] = {
105 #include "shaders/vme/mpeg2_inter_gen9.g9b"
108 static struct i965_kernel gen9_vme_mpeg2_kernels[] = {
111 VME_INTRA_SHADER, /*index*/
112 gen9_vme_mpeg2_intra_frame,
113 sizeof(gen9_vme_mpeg2_intra_frame),
119 gen9_vme_mpeg2_inter_frame,
120 sizeof(gen9_vme_mpeg2_inter_frame),
125 static const uint32_t gen9_vme_vp8_intra_frame[][4] = {
126 #include "shaders/vme/vp8_intra_frame_gen9.g9b"
129 static const uint32_t gen9_vme_vp8_inter_frame[][4] = {
130 #include "shaders/vme/vp8_inter_frame_gen9.g9b"
133 static struct i965_kernel gen9_vme_vp8_kernels[] = {
136 VME_INTRA_SHADER, /*index*/
137 gen9_vme_vp8_intra_frame,
138 sizeof(gen9_vme_vp8_intra_frame),
144 gen9_vme_vp8_inter_frame,
145 sizeof(gen9_vme_vp8_inter_frame),
152 static const uint32_t gen9_vme_hevc_intra_frame[][4] = {
153 #include "shaders/vme/intra_frame_gen9.g9b"
156 static const uint32_t gen9_vme_hevc_inter_frame[][4] = {
157 #include "shaders/vme/inter_frame_gen9.g9b"
160 static const uint32_t gen9_vme_hevc_inter_bframe[][4] = {
161 #include "shaders/vme/inter_bframe_gen9.g9b"
164 static struct i965_kernel gen9_vme_hevc_kernels[] = {
167 VME_INTRA_SHADER, /*index*/
168 gen9_vme_hevc_intra_frame,
169 sizeof(gen9_vme_hevc_intra_frame),
175 gen9_vme_hevc_inter_frame,
176 sizeof(gen9_vme_hevc_inter_frame),
182 gen9_vme_hevc_inter_bframe,
183 sizeof(gen9_vme_hevc_inter_bframe),
187 /* only used for VME source surface state */
189 gen9_vme_source_surface_state(VADriverContextP ctx,
191 struct object_surface *obj_surface,
192 struct intel_encoder_context *encoder_context)
194 struct gen6_vme_context *vme_context = encoder_context->vme_context;
196 vme_context->vme_surface2_setup(ctx,
197 &vme_context->gpe_context,
199 BINDING_TABLE_OFFSET(index),
200 SURFACE_STATE_OFFSET(index));
204 gen9_vme_media_source_surface_state(VADriverContextP ctx,
206 struct object_surface *obj_surface,
207 struct intel_encoder_context *encoder_context)
209 struct gen6_vme_context *vme_context = encoder_context->vme_context;
211 vme_context->vme_media_rw_surface_setup(ctx,
212 &vme_context->gpe_context,
214 BINDING_TABLE_OFFSET(index),
215 SURFACE_STATE_OFFSET(index),
220 gen9_vme_media_chroma_source_surface_state(VADriverContextP ctx,
222 struct object_surface *obj_surface,
223 struct intel_encoder_context *encoder_context)
225 struct gen6_vme_context *vme_context = encoder_context->vme_context;
227 vme_context->vme_media_chroma_surface_setup(ctx,
228 &vme_context->gpe_context,
230 BINDING_TABLE_OFFSET(index),
231 SURFACE_STATE_OFFSET(index),
236 gen9_vme_output_buffer_setup(VADriverContextP ctx,
237 struct encode_state *encode_state,
239 struct intel_encoder_context *encoder_context,
245 struct i965_driver_data *i965 = i965_driver_data(ctx);
246 struct gen6_vme_context *vme_context = encoder_context->vme_context;
248 vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
249 vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
252 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 2;
254 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 24;
256 * Inter MV . 32-byte Intra search + 16 IME info + 128 IME MV + 32 IME Ref
257 * + 16 FBR Info + 128 FBR MV + 32 FBR Ref.
258 * 16 * (2 + 2 * (1 + 8 + 2))= 16 * 24.
261 vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr,
263 vme_context->vme_output.num_blocks * vme_context->vme_output.size_block,
265 assert(vme_context->vme_output.bo);
266 vme_context->vme_buffer_suface_setup(ctx,
267 &vme_context->gpe_context,
268 &vme_context->vme_output,
269 BINDING_TABLE_OFFSET(index),
270 SURFACE_STATE_OFFSET(index));
274 gen9_vme_avc_output_buffer_setup(VADriverContextP ctx,
275 struct encode_state *encode_state,
277 struct intel_encoder_context *encoder_context)
279 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
280 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
281 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
282 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
283 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
285 gen9_vme_output_buffer_setup(ctx, encode_state, index, encoder_context, is_intra, width_in_mbs, height_in_mbs);
290 gen9_vme_output_vme_batchbuffer_setup(VADriverContextP ctx,
291 struct encode_state *encode_state,
293 struct intel_encoder_context *encoder_context,
297 struct i965_driver_data *i965 = i965_driver_data(ctx);
298 struct gen6_vme_context *vme_context = encoder_context->vme_context;
300 vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
301 vme_context->vme_batchbuffer.size_block = 64; /* 4 OWORDs */
302 vme_context->vme_batchbuffer.pitch = 16;
303 vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr,
305 vme_context->vme_batchbuffer.num_blocks * vme_context->vme_batchbuffer.size_block,
307 vme_context->vme_buffer_suface_setup(ctx,
308 &vme_context->gpe_context,
309 &vme_context->vme_batchbuffer,
310 BINDING_TABLE_OFFSET(index),
311 SURFACE_STATE_OFFSET(index));
315 gen9_vme_avc_output_vme_batchbuffer_setup(VADriverContextP ctx,
316 struct encode_state *encode_state,
318 struct intel_encoder_context *encoder_context)
320 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
321 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
322 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
324 gen9_vme_output_vme_batchbuffer_setup(ctx, encode_state, index, encoder_context, width_in_mbs, height_in_mbs);
329 gen9_vme_surface_setup(VADriverContextP ctx,
330 struct encode_state *encode_state,
332 struct intel_encoder_context *encoder_context)
334 struct object_surface *obj_surface;
336 /*Setup surfaces state*/
337 /* current picture for encoding */
338 obj_surface = encode_state->input_yuv_object;
340 gen9_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
341 gen9_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
342 gen9_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
345 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
348 slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
349 assert(slice_type != SLICE_TYPE_I && slice_type != SLICE_TYPE_SI);
351 intel_avc_vme_reference_state(ctx, encode_state, encoder_context, 0, 1, gen9_vme_source_surface_state);
353 if (slice_type == SLICE_TYPE_B)
354 intel_avc_vme_reference_state(ctx, encode_state, encoder_context, 1, 2, gen9_vme_source_surface_state);
358 gen9_vme_avc_output_buffer_setup(ctx, encode_state, 3, encoder_context);
359 gen9_vme_avc_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
360 intel_h264_setup_cost_surface(ctx, encode_state, encoder_context,
361 BINDING_TABLE_OFFSET(INTEL_COST_TABLE_OFFSET),
362 SURFACE_STATE_OFFSET(INTEL_COST_TABLE_OFFSET));
364 return VA_STATUS_SUCCESS;
367 static VAStatus gen9_vme_interface_setup(VADriverContextP ctx,
368 struct encode_state *encode_state,
369 struct intel_encoder_context *encoder_context)
371 struct gen6_vme_context *vme_context = encoder_context->vme_context;
372 struct gen8_interface_descriptor_data *desc;
375 unsigned char *desc_ptr;
377 bo = vme_context->gpe_context.dynamic_state.bo;
380 desc_ptr = (unsigned char *)bo->virtual + vme_context->gpe_context.idrt_offset;
382 desc = (struct gen8_interface_descriptor_data *)desc_ptr;
384 for (i = 0; i < vme_context->vme_kernel_sum; i++) {
385 struct i965_kernel *kernel;
386 kernel = &vme_context->gpe_context.kernels[i];
387 assert(sizeof(*desc) == 32);
388 /*Setup the descritor table*/
389 memset(desc, 0, sizeof(*desc));
390 desc->desc0.kernel_start_pointer = kernel->kernel_offset >> 6;
391 desc->desc3.sampler_count = 0; /* FIXME: */
392 desc->desc3.sampler_state_pointer = 0;
393 desc->desc4.binding_table_entry_count = 1; /* FIXME: */
394 desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
395 desc->desc5.constant_urb_entry_read_offset = 0;
396 desc->desc5.constant_urb_entry_read_length = CURBE_URB_ENTRY_LENGTH;
403 return VA_STATUS_SUCCESS;
406 static VAStatus gen9_vme_constant_setup(VADriverContextP ctx,
407 struct encode_state *encode_state,
408 struct intel_encoder_context *encoder_context,
411 struct gen6_vme_context *vme_context = encoder_context->vme_context;
412 unsigned char *constant_buffer;
413 unsigned int *vme_state_message;
416 vme_state_message = (unsigned int *)vme_context->vme_state_message;
418 if (encoder_context->codec == CODEC_H264 ||
419 encoder_context->codec == CODEC_H264_MVC) {
420 if (vme_context->h264_level >= 30) {
423 if (vme_context->h264_level >= 31)
426 } else if (encoder_context->codec == CODEC_MPEG2) {
428 }else if (encoder_context->codec == CODEC_HEVC) {
429 if (vme_context->hevc_level >= 30*3) {
432 if (vme_context->hevc_level >= 31*3)
434 }/* use the avc level setting */
437 vme_state_message[31] = mv_num;
439 dri_bo_map(vme_context->gpe_context.dynamic_state.bo, 1);
440 assert(vme_context->gpe_context.dynamic_state.bo->virtual);
441 constant_buffer = (unsigned char *)vme_context->gpe_context.dynamic_state.bo->virtual +
442 vme_context->gpe_context.curbe_offset;
444 /* VME MV/Mb cost table is passed by using const buffer */
445 /* Now it uses the fixed search path. So it is constructed directly
448 memcpy(constant_buffer, (char *)vme_context->vme_state_message, 128);
450 dri_bo_unmap(vme_context->gpe_context.dynamic_state.bo);
452 return VA_STATUS_SUCCESS;
455 #define MB_SCOREBOARD_A (1 << 0)
456 #define MB_SCOREBOARD_B (1 << 1)
457 #define MB_SCOREBOARD_C (1 << 2)
459 /* check whether the mb of (x_index, y_index) is out of bound */
460 static inline int loop_in_bounds(int x_index, int y_index, int first_mb, int num_mb, int mb_width, int mb_height)
463 if (x_index < 0 || x_index >= mb_width)
465 if (y_index < 0 || y_index >= mb_height)
468 mb_index = y_index * mb_width + x_index;
469 if (mb_index < first_mb || mb_index > (first_mb + num_mb))
475 gen9wa_vme_walker_fill_vme_batchbuffer(VADriverContextP ctx,
476 struct encode_state *encode_state,
477 int mb_width, int mb_height,
479 int transform_8x8_mode_flag,
480 struct intel_encoder_context *encoder_context)
482 struct gen6_vme_context *vme_context = encoder_context->vme_context;
485 unsigned int *command_ptr;
487 #define USE_SCOREBOARD (1 << 21)
489 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
490 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
492 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
493 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
494 int first_mb = pSliceParameter->macroblock_address;
495 int num_mb = pSliceParameter->num_macroblocks;
496 unsigned int mb_intra_ub, score_dep;
497 int x_outer, y_outer, x_inner, y_inner;
500 x_outer = first_mb % mb_width;
501 y_outer = first_mb / mb_width;
504 for (; x_outer < (mb_width -2 ) && !loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
507 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
511 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
512 score_dep |= MB_SCOREBOARD_A;
514 if (y_inner != mb_row) {
515 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
516 score_dep |= MB_SCOREBOARD_B;
518 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
519 if (x_inner != (mb_width -1)) {
520 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
521 score_dep |= MB_SCOREBOARD_C;
525 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
526 *command_ptr++ = kernel;
527 *command_ptr++ = USE_SCOREBOARD;
530 /* the (X, Y) term of scoreboard */
531 *command_ptr++ = ((y_inner << 16) | x_inner);
532 *command_ptr++ = score_dep;
534 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
535 *command_ptr++ = ((1 << 18) | (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
536 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
545 xtemp_outer = mb_width - 2;
548 x_outer = xtemp_outer;
549 y_outer = first_mb / mb_width;
550 for (;!loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
553 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
557 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
558 score_dep |= MB_SCOREBOARD_A;
560 if (y_inner != mb_row) {
561 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
562 score_dep |= MB_SCOREBOARD_B;
564 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
566 if (x_inner != (mb_width -1)) {
567 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
568 score_dep |= MB_SCOREBOARD_C;
572 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
573 *command_ptr++ = kernel;
574 *command_ptr++ = USE_SCOREBOARD;
577 /* the (X, Y) term of scoreboard */
578 *command_ptr++ = ((y_inner << 16) | x_inner);
579 *command_ptr++ = score_dep;
581 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
582 *command_ptr++ = ((1 << 18) | (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
584 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
590 if (x_outer >= mb_width) {
592 x_outer = xtemp_outer;
597 *command_ptr++ = MI_BATCH_BUFFER_END;
600 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
604 gen9_vme_fill_vme_batchbuffer(VADriverContextP ctx,
605 struct encode_state *encode_state,
606 int mb_width, int mb_height,
608 int transform_8x8_mode_flag,
609 struct intel_encoder_context *encoder_context)
611 struct gen6_vme_context *vme_context = encoder_context->vme_context;
612 int mb_x = 0, mb_y = 0;
614 unsigned int *command_ptr;
615 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
616 VAEncPictureParameterBufferH264 *pic_param = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
617 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
619 int slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
622 if (encoder_context->rate_control_mode == VA_RC_CQP)
623 qp = pic_param->pic_init_qp + slice_param->slice_qp_delta;
625 qp = mfc_context->bit_rate_control_context[slice_type].QpPrimeY;
627 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
628 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
630 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
631 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
632 int slice_mb_begin = pSliceParameter->macroblock_address;
633 int slice_mb_number = pSliceParameter->num_macroblocks;
634 unsigned int mb_intra_ub;
635 int slice_mb_x = pSliceParameter->macroblock_address % mb_width;
636 for (i = 0; i < slice_mb_number; ) {
637 int mb_count = i + slice_mb_begin;
638 mb_x = mb_count % mb_width;
639 mb_y = mb_count / mb_width;
642 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
645 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
647 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
648 if (mb_x != (mb_width -1))
649 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
653 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_AE);
654 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_BCD_MASK);
655 if ((i == (mb_width - 1)) && slice_mb_x) {
656 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
660 if ((i == mb_width) && slice_mb_x) {
661 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_D);
663 *command_ptr++ = (CMD_MEDIA_OBJECT | (9 - 2));
664 *command_ptr++ = kernel;
671 *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
672 *command_ptr++ = ((encoder_context->quality_level << 24) | (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
673 /* qp occupies one byte */
674 if (vme_context->roi_enabled) {
675 qp_index = mb_y * mb_width + mb_x;
676 qp_mb = *(vme_context->qp_per_mb + qp_index);
679 *command_ptr++ = qp_mb;
681 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
687 *command_ptr++ = MI_BATCH_BUFFER_END;
690 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
693 static void gen9_vme_media_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
695 struct gen6_vme_context *vme_context = encoder_context->vme_context;
697 gen8_gpe_context_init(ctx, &vme_context->gpe_context);
699 /* VME output buffer */
700 dri_bo_unreference(vme_context->vme_output.bo);
701 vme_context->vme_output.bo = NULL;
703 dri_bo_unreference(vme_context->vme_batchbuffer.bo);
704 vme_context->vme_batchbuffer.bo = NULL;
707 dri_bo_unreference(vme_context->vme_state.bo);
708 vme_context->vme_state.bo = NULL;
711 static void gen9_vme_pipeline_programing(VADriverContextP ctx,
712 struct encode_state *encode_state,
713 struct intel_encoder_context *encoder_context)
715 struct gen6_vme_context *vme_context = encoder_context->vme_context;
716 struct intel_batchbuffer *batch = encoder_context->base.batch;
717 VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
718 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
719 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
720 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
721 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
723 bool allow_hwscore = true;
725 unsigned int is_low_quality = (encoder_context->quality_level == ENCODER_LOW_QUALITY);
728 allow_hwscore = false;
730 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
731 pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
732 if ((pSliceParameter->macroblock_address % width_in_mbs)) {
733 allow_hwscore = false;
739 if ((pSliceParameter->slice_type == SLICE_TYPE_I) ||
740 (pSliceParameter->slice_type == SLICE_TYPE_SI)) {
741 kernel_shader = VME_INTRA_SHADER;
742 } else if ((pSliceParameter->slice_type == SLICE_TYPE_P) ||
743 (pSliceParameter->slice_type == SLICE_TYPE_SP)) {
744 kernel_shader = VME_INTER_SHADER;
746 kernel_shader = VME_BINTER_SHADER;
748 kernel_shader = VME_INTER_SHADER;
751 gen9wa_vme_walker_fill_vme_batchbuffer(ctx,
753 width_in_mbs, height_in_mbs,
755 pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
758 gen9_vme_fill_vme_batchbuffer(ctx,
760 width_in_mbs, height_in_mbs,
762 pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
765 intel_batchbuffer_start_atomic(batch, 0x1000);
766 gen9_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
767 BEGIN_BATCH(batch, 3);
768 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
770 vme_context->vme_batchbuffer.bo,
771 I915_GEM_DOMAIN_COMMAND, 0,
774 ADVANCE_BATCH(batch);
776 gen9_gpe_pipeline_end(ctx, &vme_context->gpe_context, batch);
778 intel_batchbuffer_end_atomic(batch);
781 static VAStatus gen9_vme_prepare(VADriverContextP ctx,
782 struct encode_state *encode_state,
783 struct intel_encoder_context *encoder_context)
785 VAStatus vaStatus = VA_STATUS_SUCCESS;
786 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
787 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
788 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
789 struct gen6_vme_context *vme_context = encoder_context->vme_context;
791 if (!vme_context->h264_level ||
792 (vme_context->h264_level != pSequenceParameter->level_idc)) {
793 vme_context->h264_level = pSequenceParameter->level_idc;
796 intel_vme_update_mbmv_cost(ctx, encode_state, encoder_context);
797 intel_h264_initialize_mbmv_cost(ctx, encode_state, encoder_context);
798 intel_h264_enc_roi_config(ctx, encode_state, encoder_context);
800 /*Setup all the memory object*/
801 gen9_vme_surface_setup(ctx, encode_state, is_intra, encoder_context);
802 gen9_vme_interface_setup(ctx, encode_state, encoder_context);
803 //gen9_vme_vme_state_setup(ctx, encode_state, is_intra, encoder_context);
804 gen9_vme_constant_setup(ctx, encode_state, encoder_context, (pSliceParameter->slice_type == SLICE_TYPE_B) ? 2 : 1);
806 /*Programing media pipeline*/
807 gen9_vme_pipeline_programing(ctx, encode_state, encoder_context);
812 static VAStatus gen9_vme_run(VADriverContextP ctx,
813 struct encode_state *encode_state,
814 struct intel_encoder_context *encoder_context)
816 struct intel_batchbuffer *batch = encoder_context->base.batch;
818 intel_batchbuffer_flush(batch);
820 return VA_STATUS_SUCCESS;
823 static VAStatus gen9_vme_stop(VADriverContextP ctx,
824 struct encode_state *encode_state,
825 struct intel_encoder_context *encoder_context)
827 return VA_STATUS_SUCCESS;
831 gen9_vme_pipeline(VADriverContextP ctx,
833 struct encode_state *encode_state,
834 struct intel_encoder_context *encoder_context)
836 gen9_vme_media_init(ctx, encoder_context);
837 gen9_vme_prepare(ctx, encode_state, encoder_context);
838 gen9_vme_run(ctx, encode_state, encoder_context);
839 gen9_vme_stop(ctx, encode_state, encoder_context);
841 return VA_STATUS_SUCCESS;
845 gen9_vme_mpeg2_output_buffer_setup(VADriverContextP ctx,
846 struct encode_state *encode_state,
849 struct intel_encoder_context *encoder_context)
852 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
853 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
854 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
856 gen9_vme_output_buffer_setup(ctx, encode_state, index, encoder_context, is_intra, width_in_mbs, height_in_mbs);
860 gen9_vme_mpeg2_output_vme_batchbuffer_setup(VADriverContextP ctx,
861 struct encode_state *encode_state,
863 struct intel_encoder_context *encoder_context)
866 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
867 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
868 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
870 gen9_vme_output_vme_batchbuffer_setup(ctx, encode_state, index, encoder_context, width_in_mbs, height_in_mbs);
874 gen9_vme_mpeg2_surface_setup(VADriverContextP ctx,
875 struct encode_state *encode_state,
877 struct intel_encoder_context *encoder_context)
879 struct object_surface *obj_surface;
881 /*Setup surfaces state*/
882 /* current picture for encoding */
883 obj_surface = encode_state->input_yuv_object;
884 gen9_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
885 gen9_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
886 gen9_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
890 obj_surface = encode_state->reference_objects[0];
892 if (obj_surface->bo != NULL)
893 gen9_vme_source_surface_state(ctx, 1, obj_surface, encoder_context);
896 obj_surface = encode_state->reference_objects[1];
898 if (obj_surface && obj_surface->bo != NULL)
899 gen9_vme_source_surface_state(ctx, 2, obj_surface, encoder_context);
903 gen9_vme_mpeg2_output_buffer_setup(ctx, encode_state, 3, is_intra, encoder_context);
904 gen9_vme_mpeg2_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
906 return VA_STATUS_SUCCESS;
910 gen9wa_vme_mpeg2_walker_fill_vme_batchbuffer(VADriverContextP ctx,
911 struct encode_state *encode_state,
912 int mb_width, int mb_height,
914 struct intel_encoder_context *encoder_context)
916 struct gen6_vme_context *vme_context = encoder_context->vme_context;
917 unsigned int *command_ptr;
919 #define MPEG2_SCOREBOARD (1 << 21)
921 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
922 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
925 unsigned int mb_intra_ub, score_dep;
926 int x_outer, y_outer, x_inner, y_inner;
929 int num_mb = mb_width * mb_height;
934 for (; x_outer < (mb_width -2 ) && !loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
937 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
941 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
942 score_dep |= MB_SCOREBOARD_A;
945 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
946 score_dep |= MB_SCOREBOARD_B;
949 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
951 if (x_inner != (mb_width -1)) {
952 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
953 score_dep |= MB_SCOREBOARD_C;
957 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
958 *command_ptr++ = kernel;
959 *command_ptr++ = MPEG2_SCOREBOARD;
962 /* the (X, Y) term of scoreboard */
963 *command_ptr++ = ((y_inner << 16) | x_inner);
964 *command_ptr++ = score_dep;
966 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
967 *command_ptr++ = ((1 << 18) | (1 << 16) | (mb_intra_ub << 8));
968 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
977 xtemp_outer = mb_width - 2;
980 x_outer = xtemp_outer;
982 for (;!loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
985 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
989 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
990 score_dep |= MB_SCOREBOARD_A;
993 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
994 score_dep |= MB_SCOREBOARD_B;
997 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
999 if (x_inner != (mb_width -1)) {
1000 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1001 score_dep |= MB_SCOREBOARD_C;
1005 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
1006 *command_ptr++ = kernel;
1007 *command_ptr++ = MPEG2_SCOREBOARD;
1010 /* the (X, Y) term of scoreboard */
1011 *command_ptr++ = ((y_inner << 16) | x_inner);
1012 *command_ptr++ = score_dep;
1014 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
1015 *command_ptr++ = ((1 << 18) | (1 << 16) | (mb_intra_ub << 8));
1017 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1023 if (x_outer >= mb_width) {
1025 x_outer = xtemp_outer;
1030 *command_ptr++ = MI_BATCH_BUFFER_END;
1033 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
1038 gen9_vme_mpeg2_fill_vme_batchbuffer(VADriverContextP ctx,
1039 struct encode_state *encode_state,
1040 int mb_width, int mb_height,
1042 int transform_8x8_mode_flag,
1043 struct intel_encoder_context *encoder_context)
1045 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1046 int mb_x = 0, mb_y = 0;
1048 unsigned int *command_ptr;
1051 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
1052 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
1054 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
1055 VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[s]->buffer;
1057 for (j = 0; j < encode_state->slice_params_ext[s]->num_elements; j++) {
1058 int slice_mb_begin = slice_param->macroblock_address;
1059 int slice_mb_number = slice_param->num_macroblocks;
1060 unsigned int mb_intra_ub;
1062 for (i = 0; i < slice_mb_number;) {
1063 int mb_count = i + slice_mb_begin;
1065 mb_x = mb_count % mb_width;
1066 mb_y = mb_count / mb_width;
1070 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
1074 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
1077 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
1079 if (mb_x != (mb_width -1))
1080 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1083 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
1084 *command_ptr++ = kernel;
1091 *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
1092 *command_ptr++ = ( (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
1094 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1103 *command_ptr++ = MI_BATCH_BUFFER_END;
1106 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
1110 gen9_vme_mpeg2_pipeline_programing(VADriverContextP ctx,
1111 struct encode_state *encode_state,
1113 struct intel_encoder_context *encoder_context)
1115 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1116 struct intel_batchbuffer *batch = encoder_context->base.batch;
1117 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
1118 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
1119 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
1120 bool allow_hwscore = true;
1123 VAEncPictureParameterBufferMPEG2 *pic_param = NULL;
1125 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
1127 VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[s]->buffer;
1129 for (j = 0; j < encode_state->slice_params_ext[s]->num_elements; j++) {
1130 if (slice_param->macroblock_address % width_in_mbs) {
1131 allow_hwscore = false;
1137 pic_param = (VAEncPictureParameterBufferMPEG2 *)encode_state->pic_param_ext->buffer;
1138 if (pic_param->picture_type == VAEncPictureTypeIntra) {
1139 allow_hwscore = false;
1140 kernel_shader = VME_INTRA_SHADER;
1142 kernel_shader = VME_INTER_SHADER;
1146 gen9wa_vme_mpeg2_walker_fill_vme_batchbuffer(ctx,
1148 width_in_mbs, height_in_mbs,
1152 gen9_vme_mpeg2_fill_vme_batchbuffer(ctx,
1154 width_in_mbs, height_in_mbs,
1155 is_intra ? VME_INTRA_SHADER : VME_INTER_SHADER,
1159 intel_batchbuffer_start_atomic(batch, 0x1000);
1160 gen9_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
1161 BEGIN_BATCH(batch, 4);
1162 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1164 vme_context->vme_batchbuffer.bo,
1165 I915_GEM_DOMAIN_COMMAND, 0,
1167 OUT_BATCH(batch, 0);
1168 OUT_BATCH(batch, 0);
1169 ADVANCE_BATCH(batch);
1171 gen9_gpe_pipeline_end(ctx, &vme_context->gpe_context, batch);
1173 intel_batchbuffer_end_atomic(batch);
1177 gen9_vme_mpeg2_prepare(VADriverContextP ctx,
1178 struct encode_state *encode_state,
1179 struct intel_encoder_context *encoder_context)
1181 VAStatus vaStatus = VA_STATUS_SUCCESS;
1182 VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[0]->buffer;
1183 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
1184 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1186 if ((!vme_context->mpeg2_level) ||
1187 (vme_context->mpeg2_level != (seq_param->sequence_extension.bits.profile_and_level_indication & MPEG2_LEVEL_MASK))) {
1188 vme_context->mpeg2_level = seq_param->sequence_extension.bits.profile_and_level_indication & MPEG2_LEVEL_MASK;
1191 /*Setup all the memory object*/
1192 gen9_vme_mpeg2_surface_setup(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
1193 gen9_vme_interface_setup(ctx, encode_state, encoder_context);
1194 //gen9_vme_vme_state_setup(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
1195 intel_vme_mpeg2_state_setup(ctx, encode_state, encoder_context);
1196 gen9_vme_constant_setup(ctx, encode_state, encoder_context, 1);
1198 /*Programing media pipeline*/
1199 gen9_vme_mpeg2_pipeline_programing(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
1205 gen9_vme_mpeg2_pipeline(VADriverContextP ctx,
1207 struct encode_state *encode_state,
1208 struct intel_encoder_context *encoder_context)
1210 gen9_vme_media_init(ctx, encoder_context);
1211 gen9_vme_mpeg2_prepare(ctx, encode_state, encoder_context);
1212 gen9_vme_run(ctx, encode_state, encoder_context);
1213 gen9_vme_stop(ctx, encode_state, encoder_context);
1215 return VA_STATUS_SUCCESS;
1219 gen9_vme_vp8_output_buffer_setup(VADriverContextP ctx,
1220 struct encode_state *encode_state,
1223 struct intel_encoder_context *encoder_context)
1225 VAEncSequenceParameterBufferVP8 *seq_param = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
1226 int width_in_mbs = ALIGN(seq_param->frame_width, 16) / 16;
1227 int height_in_mbs = ALIGN(seq_param->frame_height, 16) / 16;
1229 gen9_vme_output_buffer_setup(ctx, encode_state, index, encoder_context, is_intra, width_in_mbs, height_in_mbs);
1233 gen9_vme_vp8_output_vme_batchbuffer_setup(VADriverContextP ctx,
1234 struct encode_state *encode_state,
1236 struct intel_encoder_context *encoder_context)
1238 VAEncSequenceParameterBufferVP8 *seq_param = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
1239 int width_in_mbs = ALIGN(seq_param->frame_width, 16) / 16;
1240 int height_in_mbs = ALIGN(seq_param->frame_height, 16) / 16;
1242 gen9_vme_output_vme_batchbuffer_setup(ctx, encode_state, index, encoder_context, width_in_mbs, height_in_mbs);
1246 gen9_vme_vp8_surface_setup(VADriverContextP ctx,
1247 struct encode_state *encode_state,
1249 struct intel_encoder_context *encoder_context)
1251 struct object_surface *obj_surface;
1253 /*Setup surfaces state*/
1254 /* current picture for encoding */
1255 obj_surface = encode_state->input_yuv_object;
1256 gen9_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
1257 gen9_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
1258 gen9_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
1262 obj_surface = encode_state->reference_objects[0];
1264 if (obj_surface->bo != NULL)
1265 gen9_vme_source_surface_state(ctx, 1, obj_surface, encoder_context);
1268 obj_surface = encode_state->reference_objects[1];
1270 if (obj_surface && obj_surface->bo != NULL)
1271 gen9_vme_source_surface_state(ctx, 2, obj_surface, encoder_context);
1275 gen9_vme_vp8_output_buffer_setup(ctx, encode_state, 3, is_intra, encoder_context);
1276 gen9_vme_vp8_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
1278 return VA_STATUS_SUCCESS;
1282 gen9_vme_vp8_pipeline_programing(VADriverContextP ctx,
1283 struct encode_state *encode_state,
1285 struct intel_encoder_context *encoder_context)
1287 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1288 struct intel_batchbuffer *batch = encoder_context->base.batch;
1289 VAEncSequenceParameterBufferVP8 *seq_param = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
1290 int width_in_mbs = ALIGN(seq_param->frame_width, 16) / 16;
1291 int height_in_mbs = ALIGN(seq_param->frame_height, 16) / 16;
1292 int kernel_shader = (is_intra ? VME_INTRA_SHADER : VME_INTER_SHADER);
1294 gen9wa_vme_mpeg2_walker_fill_vme_batchbuffer(ctx,
1296 width_in_mbs, height_in_mbs,
1300 intel_batchbuffer_start_atomic(batch, 0x1000);
1301 gen9_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
1302 BEGIN_BATCH(batch, 4);
1303 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1305 vme_context->vme_batchbuffer.bo,
1306 I915_GEM_DOMAIN_COMMAND, 0,
1308 OUT_BATCH(batch, 0);
1309 OUT_BATCH(batch, 0);
1310 ADVANCE_BATCH(batch);
1312 gen9_gpe_pipeline_end(ctx, &vme_context->gpe_context, batch);
1314 intel_batchbuffer_end_atomic(batch);
1317 static VAStatus gen9_vme_vp8_prepare(VADriverContextP ctx,
1318 struct encode_state *encode_state,
1319 struct intel_encoder_context *encoder_context)
1321 VAStatus vaStatus = VA_STATUS_SUCCESS;
1322 VAEncPictureParameterBufferVP8 *pPicParameter = (VAEncPictureParameterBufferVP8 *)encode_state->pic_param_ext->buffer;
1323 int is_intra = !pPicParameter->pic_flags.bits.frame_type;
1325 /* update vp8 mbmv cost */
1326 intel_vme_vp8_update_mbmv_cost(ctx, encode_state, encoder_context);
1328 /*Setup all the memory object*/
1329 gen9_vme_vp8_surface_setup(ctx, encode_state, is_intra, encoder_context);
1330 gen9_vme_interface_setup(ctx, encode_state, encoder_context);
1331 gen9_vme_constant_setup(ctx, encode_state, encoder_context, 1);
1333 /*Programing media pipeline*/
1334 gen9_vme_vp8_pipeline_programing(ctx, encode_state, is_intra, encoder_context);
1340 gen9_vme_vp8_pipeline(VADriverContextP ctx,
1342 struct encode_state *encode_state,
1343 struct intel_encoder_context *encoder_context)
1345 gen9_vme_media_init(ctx, encoder_context);
1346 gen9_vme_vp8_prepare(ctx, encode_state, encoder_context);
1347 gen9_vme_run(ctx, encode_state, encoder_context);
1348 gen9_vme_stop(ctx, encode_state, encoder_context);
1350 return VA_STATUS_SUCCESS;
1356 gen9_vme_hevc_output_buffer_setup(VADriverContextP ctx,
1357 struct encode_state *encode_state,
1359 struct intel_encoder_context *encoder_context)
1362 struct i965_driver_data *i965 = i965_driver_data(ctx);
1363 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1364 VAEncSequenceParameterBufferHEVC *pSequenceParameter = (VAEncSequenceParameterBufferHEVC *)encode_state->seq_param_ext->buffer;
1365 VAEncSliceParameterBufferHEVC *pSliceParameter = (VAEncSliceParameterBufferHEVC *)encode_state->slice_params_ext[0]->buffer;
1366 int is_intra = pSliceParameter->slice_type == HEVC_SLICE_I;
1367 int width_in_mbs = (pSequenceParameter->pic_width_in_luma_samples + 15)/16;
1368 int height_in_mbs = (pSequenceParameter->pic_height_in_luma_samples + 15)/16;
1371 vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
1372 vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
1375 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 2;
1377 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 24;
1379 * Inter MV . 32-byte Intra search + 16 IME info + 128 IME MV + 32 IME Ref
1380 * + 16 FBR Info + 128 FBR MV + 32 FBR Ref.
1381 * 16 * (2 + 2 * (1 + 8 + 2))= 16 * 24.
1384 vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr,
1385 "VME output buffer",
1386 vme_context->vme_output.num_blocks * vme_context->vme_output.size_block,
1388 assert(vme_context->vme_output.bo);
1389 vme_context->vme_buffer_suface_setup(ctx,
1390 &vme_context->gpe_context,
1391 &vme_context->vme_output,
1392 BINDING_TABLE_OFFSET(index),
1393 SURFACE_STATE_OFFSET(index));
1397 gen9_vme_hevc_output_vme_batchbuffer_setup(VADriverContextP ctx,
1398 struct encode_state *encode_state,
1400 struct intel_encoder_context *encoder_context)
1403 struct i965_driver_data *i965 = i965_driver_data(ctx);
1404 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1405 VAEncSequenceParameterBufferHEVC *pSequenceParameter = (VAEncSequenceParameterBufferHEVC *)encode_state->seq_param_ext->buffer;
1406 int width_in_mbs = (pSequenceParameter->pic_width_in_luma_samples + 15)/16;
1407 int height_in_mbs = (pSequenceParameter->pic_height_in_luma_samples + 15)/16;
1409 vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
1410 vme_context->vme_batchbuffer.size_block = 64; /* 4 OWORDs */
1411 vme_context->vme_batchbuffer.pitch = 16;
1412 vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr,
1414 vme_context->vme_batchbuffer.num_blocks * vme_context->vme_batchbuffer.size_block,
1418 gen9_vme_hevc_surface_setup(VADriverContextP ctx,
1419 struct encode_state *encode_state,
1421 struct intel_encoder_context *encoder_context)
1423 struct object_surface *obj_surface;
1424 VAEncSequenceParameterBufferHEVC *pSequenceParameter = (VAEncSequenceParameterBufferHEVC *)encode_state->seq_param_ext->buffer;
1425 GenHevcSurface *hevc_encoder_surface = NULL;
1427 /*Setup surfaces state*/
1428 /* current picture for encoding */
1429 obj_surface = encode_state->input_yuv_object;
1431 if((pSequenceParameter->seq_fields.bits.bit_depth_luma_minus8 > 0)
1432 || (pSequenceParameter->seq_fields.bits.bit_depth_chroma_minus8 > 0)) {
1433 hevc_encoder_surface = (GenHevcSurface *) obj_surface->private_data;
1434 assert(hevc_encoder_surface);
1435 obj_surface = hevc_encoder_surface->nv12_surface_obj;
1437 gen9_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
1438 gen9_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
1439 gen9_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
1442 VAEncSliceParameterBufferHEVC *slice_param = (VAEncSliceParameterBufferHEVC *)encode_state->slice_params_ext[0]->buffer;
1445 slice_type = slice_param->slice_type;
1446 assert(slice_type != HEVC_SLICE_I);
1449 intel_hevc_vme_reference_state(ctx, encode_state, encoder_context, 0, 1, gen9_vme_source_surface_state);
1451 if (slice_type == HEVC_SLICE_B)
1452 intel_hevc_vme_reference_state(ctx, encode_state, encoder_context, 1, 2, gen9_vme_source_surface_state);
1456 gen9_vme_hevc_output_buffer_setup(ctx, encode_state, 3, encoder_context);
1457 gen9_vme_hevc_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
1459 return VA_STATUS_SUCCESS;
1462 gen9wa_vme_hevc_walker_fill_vme_batchbuffer(VADriverContextP ctx,
1463 struct encode_state *encode_state,
1464 int mb_width, int mb_height,
1466 int transform_8x8_mode_flag,
1467 struct intel_encoder_context *encoder_context)
1469 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1472 unsigned int *command_ptr;
1473 VAEncSequenceParameterBufferHEVC *pSequenceParameter = (VAEncSequenceParameterBufferHEVC *)encode_state->seq_param_ext->buffer;
1474 int log2_cu_size = pSequenceParameter->log2_min_luma_coding_block_size_minus3 + 3;
1475 int log2_ctb_size = pSequenceParameter->log2_diff_max_min_luma_coding_block_size + log2_cu_size;
1476 int ctb_size = 1 << log2_ctb_size;
1477 int num_mb_in_ctb = (ctb_size + 15)/16;
1478 num_mb_in_ctb = num_mb_in_ctb * num_mb_in_ctb;
1480 #define USE_SCOREBOARD (1 << 21)
1482 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
1483 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
1485 /*slice_segment_address must picture_width_in_ctb alainment */
1486 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
1487 VAEncSliceParameterBufferHEVC *pSliceParameter = (VAEncSliceParameterBufferHEVC *)encode_state->slice_params_ext[s]->buffer;
1488 int first_mb = pSliceParameter->slice_segment_address * num_mb_in_ctb;
1489 int num_mb = pSliceParameter->num_ctu_in_slice * num_mb_in_ctb;
1490 unsigned int mb_intra_ub, score_dep;
1491 int x_outer, y_outer, x_inner, y_inner;
1492 int xtemp_outer = 0;
1494 x_outer = first_mb % mb_width;
1495 y_outer = first_mb / mb_width;
1498 for (; x_outer < (mb_width -2 ) && !loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
1501 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
1505 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
1506 score_dep |= MB_SCOREBOARD_A;
1508 if (y_inner != mb_row) {
1509 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
1510 score_dep |= MB_SCOREBOARD_B;
1512 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
1513 if (x_inner != (mb_width -1)) {
1514 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1515 score_dep |= MB_SCOREBOARD_C;
1519 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
1520 *command_ptr++ = kernel;
1521 *command_ptr++ = USE_SCOREBOARD;
1524 /* the (X, Y) term of scoreboard */
1525 *command_ptr++ = ((y_inner << 16) | x_inner);
1526 *command_ptr++ = score_dep;
1528 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
1529 *command_ptr++ = ((1 << 18) | (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
1530 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1539 xtemp_outer = mb_width - 2;
1540 if (xtemp_outer < 0)
1542 x_outer = xtemp_outer;
1543 y_outer = first_mb / mb_width;
1544 for (;!loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
1547 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
1551 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
1552 score_dep |= MB_SCOREBOARD_A;
1554 if (y_inner != mb_row) {
1555 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
1556 score_dep |= MB_SCOREBOARD_B;
1558 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
1560 if (x_inner != (mb_width -1)) {
1561 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1562 score_dep |= MB_SCOREBOARD_C;
1566 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
1567 *command_ptr++ = kernel;
1568 *command_ptr++ = USE_SCOREBOARD;
1571 /* the (X, Y) term of scoreboard */
1572 *command_ptr++ = ((y_inner << 16) | x_inner);
1573 *command_ptr++ = score_dep;
1575 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
1576 *command_ptr++ = ((1 << 18) | (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
1578 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1584 if (x_outer >= mb_width) {
1586 x_outer = xtemp_outer;
1591 *command_ptr++ = MI_BATCH_BUFFER_END;
1594 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
1598 gen9_vme_hevc_fill_vme_batchbuffer(VADriverContextP ctx,
1599 struct encode_state *encode_state,
1600 int mb_width, int mb_height,
1602 int transform_8x8_mode_flag,
1603 struct intel_encoder_context *encoder_context)
1605 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1606 int mb_x = 0, mb_y = 0;
1608 unsigned int *command_ptr;
1609 VAEncSequenceParameterBufferHEVC *pSequenceParameter = (VAEncSequenceParameterBufferHEVC *)encode_state->seq_param_ext->buffer;
1610 int log2_cu_size = pSequenceParameter->log2_min_luma_coding_block_size_minus3 + 3;
1611 int log2_ctb_size = pSequenceParameter->log2_diff_max_min_luma_coding_block_size + log2_cu_size;
1613 int ctb_size = 1 << log2_ctb_size;
1614 int num_mb_in_ctb = (ctb_size + 15)/16;
1615 num_mb_in_ctb = num_mb_in_ctb * num_mb_in_ctb;
1617 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
1618 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
1620 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
1621 VAEncSliceParameterBufferHEVC *pSliceParameter = (VAEncSliceParameterBufferHEVC *)encode_state->slice_params_ext[s]->buffer;
1622 int slice_mb_begin = pSliceParameter->slice_segment_address * num_mb_in_ctb;
1623 int slice_mb_number = pSliceParameter->num_ctu_in_slice * num_mb_in_ctb;
1625 unsigned int mb_intra_ub;
1626 int slice_mb_x = slice_mb_begin % mb_width;
1627 for (i = 0; i < slice_mb_number; ) {
1628 int mb_count = i + slice_mb_begin;
1629 mb_x = mb_count % mb_width;
1630 mb_y = mb_count / mb_width;
1634 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
1637 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
1639 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
1640 if (mb_x != (mb_width -1))
1641 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1645 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_AE);
1646 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_BCD_MASK);
1647 if ((i == (mb_width - 1)) && slice_mb_x) {
1648 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1652 if ((i == mb_width) && slice_mb_x) {
1653 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_D);
1656 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
1657 *command_ptr++ = kernel;
1664 *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
1665 *command_ptr++ = ( (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
1667 *command_ptr++ = CMD_MEDIA_STATE_FLUSH;
1673 *command_ptr++ = MI_BATCH_BUFFER_END;
1676 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
1679 static void gen9_vme_hevc_pipeline_programing(VADriverContextP ctx,
1680 struct encode_state *encode_state,
1681 struct intel_encoder_context *encoder_context)
1683 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1684 struct intel_batchbuffer *batch = encoder_context->base.batch;
1685 VAEncSliceParameterBufferHEVC *pSliceParameter = (VAEncSliceParameterBufferHEVC *)encode_state->slice_params_ext[0]->buffer;
1686 VAEncSequenceParameterBufferHEVC *pSequenceParameter = (VAEncSequenceParameterBufferHEVC *)encode_state->seq_param_ext->buffer;
1687 int width_in_mbs = (pSequenceParameter->pic_width_in_luma_samples + 15)/16;
1688 int height_in_mbs = (pSequenceParameter->pic_height_in_luma_samples + 15)/16;
1690 bool allow_hwscore = true;
1693 int log2_cu_size = pSequenceParameter->log2_min_luma_coding_block_size_minus3 + 3;
1694 int log2_ctb_size = pSequenceParameter->log2_diff_max_min_luma_coding_block_size + log2_cu_size;
1696 int ctb_size = 1 << log2_ctb_size;
1697 int num_mb_in_ctb = (ctb_size + 15)/16;
1698 int transform_8x8_mode_flag = 1;
1699 num_mb_in_ctb = num_mb_in_ctb * num_mb_in_ctb;
1701 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
1702 pSliceParameter = (VAEncSliceParameterBufferHEVC *)encode_state->slice_params_ext[s]->buffer;
1703 int slice_mb_begin = pSliceParameter->slice_segment_address * num_mb_in_ctb;
1704 if ((slice_mb_begin % width_in_mbs)) {
1705 allow_hwscore = false;
1710 if (pSliceParameter->slice_type == HEVC_SLICE_I) {
1711 kernel_shader = VME_INTRA_SHADER;
1712 } else if (pSliceParameter->slice_type == HEVC_SLICE_P) {
1713 kernel_shader = VME_INTER_SHADER;
1715 kernel_shader = VME_BINTER_SHADER;
1717 kernel_shader = VME_INTER_SHADER;
1720 gen9wa_vme_hevc_walker_fill_vme_batchbuffer(ctx,
1722 width_in_mbs, height_in_mbs,
1724 transform_8x8_mode_flag,
1727 gen9_vme_hevc_fill_vme_batchbuffer(ctx,
1729 width_in_mbs, height_in_mbs,
1731 transform_8x8_mode_flag,
1734 intel_batchbuffer_start_atomic(batch, 0x1000);
1735 gen9_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
1736 BEGIN_BATCH(batch, 3);
1737 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
1739 vme_context->vme_batchbuffer.bo,
1740 I915_GEM_DOMAIN_COMMAND, 0,
1742 OUT_BATCH(batch, 0);
1743 ADVANCE_BATCH(batch);
1745 gen9_gpe_pipeline_end(ctx, &vme_context->gpe_context, batch);
1747 intel_batchbuffer_end_atomic(batch);
1750 static VAStatus gen9_intel_init_hevc_surface(VADriverContextP ctx,
1751 struct intel_encoder_context *encoder_context,
1752 struct encode_state *encode_state,
1753 struct object_surface *input_obj_surface)
1755 struct i965_driver_data *i965 = i965_driver_data(ctx);
1756 struct gen9_hcpe_context *mfc_context = encoder_context->mfc_context;
1757 VAEncSequenceParameterBufferHEVC *pSequenceParameter = (VAEncSequenceParameterBufferHEVC *)encode_state->seq_param_ext->buffer;
1758 GenHevcSurface *hevc_encoder_surface;
1759 struct i965_surface src_surface, dst_surface;
1760 struct object_surface *obj_surface;
1766 obj_surface = input_obj_surface;
1767 assert(obj_surface && obj_surface->bo);
1769 if (obj_surface->private_data == NULL) {
1771 if (mfc_context->pic_size.ctb_size == 16)
1772 size = ((pSequenceParameter->pic_width_in_luma_samples + 63) >> 6) *
1773 ((pSequenceParameter->pic_height_in_luma_samples + 15) >> 4);
1775 size = ((pSequenceParameter->pic_width_in_luma_samples + 31) >> 5) *
1776 ((pSequenceParameter->pic_height_in_luma_samples + 31) >> 5);
1777 size <<= 6; /* in unit of 64bytes */
1779 hevc_encoder_surface = calloc(sizeof(GenHevcSurface), 1);
1781 assert(hevc_encoder_surface);
1782 hevc_encoder_surface->motion_vector_temporal_bo =
1783 dri_bo_alloc(i965->intel.bufmgr,
1784 "motion vector temporal buffer",
1787 assert(hevc_encoder_surface->motion_vector_temporal_bo);
1789 hevc_encoder_surface->ctx = ctx;
1790 hevc_encoder_surface->nv12_surface_obj = NULL;
1791 hevc_encoder_surface->nv12_surface_id = VA_INVALID_SURFACE;
1792 hevc_encoder_surface->has_p010_to_nv12_done = 0;
1794 obj_surface->private_data = (void *)hevc_encoder_surface;
1795 obj_surface->free_private_data = (void *)gen_free_hevc_surface;
1798 hevc_encoder_surface = (GenHevcSurface *) obj_surface->private_data;
1800 if(!hevc_encoder_surface->has_p010_to_nv12_done && obj_surface->fourcc == VA_FOURCC_P010)
1805 rect.width = obj_surface->orig_width;
1806 rect.height = obj_surface->orig_height;
1808 src_surface.base = (struct object_base *)obj_surface;
1809 src_surface.type = I965_SURFACE_TYPE_SURFACE;
1810 src_surface.flags = I965_SURFACE_FLAG_FRAME;
1812 if(SURFACE(hevc_encoder_surface->nv12_surface_id) == NULL)
1814 status = i965_CreateSurfaces(ctx,
1815 obj_surface->orig_width,
1816 obj_surface->orig_height,
1817 VA_RT_FORMAT_YUV420,
1819 &hevc_encoder_surface->nv12_surface_id);
1820 assert(status == VA_STATUS_SUCCESS);
1822 if (status != VA_STATUS_SUCCESS)
1826 obj_surface = SURFACE(hevc_encoder_surface->nv12_surface_id);
1827 hevc_encoder_surface->nv12_surface_obj = obj_surface;
1828 assert(obj_surface);
1829 i965_check_alloc_surface_bo(ctx, obj_surface, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
1831 dst_surface.base = (struct object_base *)obj_surface;
1832 dst_surface.type = I965_SURFACE_TYPE_SURFACE;
1833 dst_surface.flags = I965_SURFACE_FLAG_FRAME;
1835 status = i965_image_processing(ctx,
1840 assert(status == VA_STATUS_SUCCESS);
1841 hevc_encoder_surface->has_p010_to_nv12_done = 1;
1842 i965_SyncSurface(ctx,hevc_encoder_surface->nv12_surface_id);
1844 return VA_STATUS_SUCCESS;
1847 static VAStatus gen9_intel_hevc_input_check(VADriverContextP ctx,
1848 struct encode_state *encode_state,
1849 struct intel_encoder_context *encoder_context)
1851 struct i965_driver_data *i965 = i965_driver_data(ctx);
1852 struct gen9_hcpe_context *mfc_context = encoder_context->mfc_context;
1853 struct object_surface *obj_surface;
1854 GenHevcSurface *hevc_encoder_surface = NULL;
1857 obj_surface = SURFACE(encode_state->current_render_target);
1858 assert(obj_surface && obj_surface->bo);
1859 hevc_encoder_surface = (GenHevcSurface *) obj_surface->private_data;
1860 if(hevc_encoder_surface)
1861 hevc_encoder_surface->has_p010_to_nv12_done = 0;
1862 gen9_intel_init_hevc_surface(ctx,encoder_context,encode_state,obj_surface);
1864 /* Setup current frame and current direct mv buffer*/
1865 obj_surface = encode_state->reconstructed_object;
1866 hevc_encoder_surface = NULL;
1867 hevc_encoder_surface = (GenHevcSurface *) obj_surface->private_data;
1868 if(hevc_encoder_surface)
1869 hevc_encoder_surface->has_p010_to_nv12_done = 1;
1870 gen9_intel_init_hevc_surface(ctx,encoder_context,encode_state,obj_surface);
1872 /* Setup reference frames and direct mv buffers*/
1873 for (i = 0; i < MAX_HCP_REFERENCE_SURFACES; i++) {
1874 obj_surface = encode_state->reference_objects[i];
1876 if (obj_surface && obj_surface->bo) {
1877 mfc_context->reference_surfaces[i].bo = obj_surface->bo;
1878 dri_bo_reference(obj_surface->bo);
1880 gen9_intel_init_hevc_surface(ctx,encoder_context,encode_state,obj_surface);
1886 return VA_STATUS_SUCCESS;
1889 static VAStatus gen9_vme_hevc_prepare(VADriverContextP ctx,
1890 struct encode_state *encode_state,
1891 struct intel_encoder_context *encoder_context)
1893 VAStatus vaStatus = VA_STATUS_SUCCESS;
1894 VAEncSliceParameterBufferHEVC *pSliceParameter = (VAEncSliceParameterBufferHEVC *)encode_state->slice_params_ext[0]->buffer;
1895 int is_intra = pSliceParameter->slice_type == HEVC_SLICE_I;
1896 VAEncSequenceParameterBufferHEVC *pSequenceParameter = (VAEncSequenceParameterBufferHEVC *)encode_state->seq_param_ext->buffer;
1897 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1899 /* here use the avc level for hevc vme */
1900 if (!vme_context->hevc_level ||
1901 (vme_context->hevc_level != pSequenceParameter->general_level_idc)) {
1902 vme_context->hevc_level = pSequenceParameter->general_level_idc;
1905 //internal input check for main10
1906 gen9_intel_hevc_input_check(ctx,encode_state,encoder_context);
1908 intel_vme_hevc_update_mbmv_cost(ctx, encode_state, encoder_context);
1910 /*Setup all the memory object*/
1911 gen9_vme_hevc_surface_setup(ctx, encode_state, is_intra, encoder_context);
1912 gen9_vme_interface_setup(ctx, encode_state, encoder_context);
1913 //gen9_vme_vme_state_setup(ctx, encode_state, is_intra, encoder_context);
1914 gen9_vme_constant_setup(ctx, encode_state, encoder_context, 1);
1916 /*Programing media pipeline*/
1917 gen9_vme_hevc_pipeline_programing(ctx, encode_state, encoder_context);
1924 gen9_vme_hevc_pipeline(VADriverContextP ctx,
1926 struct encode_state *encode_state,
1927 struct intel_encoder_context *encoder_context)
1929 gen9_vme_media_init(ctx, encoder_context);
1930 gen9_vme_hevc_prepare(ctx, encode_state, encoder_context);
1931 gen9_vme_run(ctx, encode_state, encoder_context);
1932 gen9_vme_stop(ctx, encode_state, encoder_context);
1934 return VA_STATUS_SUCCESS;
1939 gen9_vme_context_destroy(void *context)
1941 struct gen6_vme_context *vme_context = context;
1943 gen8_gpe_context_destroy(&vme_context->gpe_context);
1945 dri_bo_unreference(vme_context->vme_output.bo);
1946 vme_context->vme_output.bo = NULL;
1948 dri_bo_unreference(vme_context->vme_state.bo);
1949 vme_context->vme_state.bo = NULL;
1951 dri_bo_unreference(vme_context->vme_batchbuffer.bo);
1952 vme_context->vme_batchbuffer.bo = NULL;
1954 free(vme_context->vme_state_message);
1955 vme_context->vme_state_message = NULL;
1957 dri_bo_unreference(vme_context->i_qp_cost_table);
1958 vme_context->i_qp_cost_table = NULL;
1960 dri_bo_unreference(vme_context->p_qp_cost_table);
1961 vme_context->p_qp_cost_table = NULL;
1963 dri_bo_unreference(vme_context->b_qp_cost_table);
1964 vme_context->b_qp_cost_table = NULL;
1966 free(vme_context->qp_per_mb);
1967 vme_context->qp_per_mb = NULL;
1972 Bool gen9_vme_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
1974 struct gen6_vme_context *vme_context = calloc(1, sizeof(struct gen6_vme_context));
1975 struct i965_kernel *vme_kernel_list = NULL;
1976 int i965_kernel_num;
1978 switch (encoder_context->codec) {
1980 case CODEC_H264_MVC:
1981 vme_kernel_list = gen9_vme_kernels;
1982 encoder_context->vme_pipeline = gen9_vme_pipeline;
1983 i965_kernel_num = sizeof(gen9_vme_kernels) / sizeof(struct i965_kernel);
1987 vme_kernel_list = gen9_vme_mpeg2_kernels;
1988 encoder_context->vme_pipeline = gen9_vme_mpeg2_pipeline;
1989 i965_kernel_num = sizeof(gen9_vme_mpeg2_kernels) / sizeof(struct i965_kernel);
1993 vme_kernel_list = gen9_vme_vp8_kernels;
1994 encoder_context->vme_pipeline = gen9_vme_vp8_pipeline;
1995 i965_kernel_num = sizeof(gen9_vme_vp8_kernels) / sizeof(struct i965_kernel);
1999 vme_kernel_list = gen9_vme_hevc_kernels;
2000 encoder_context->vme_pipeline = gen9_vme_hevc_pipeline;
2001 i965_kernel_num = sizeof(gen9_vme_hevc_kernels) / sizeof(struct i965_kernel);
2005 /* never get here */
2011 assert(vme_context);
2012 vme_context->vme_kernel_sum = i965_kernel_num;
2013 vme_context->gpe_context.surface_state_binding_table.length = (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_MEDIA_SURFACES_GEN6;
2015 vme_context->gpe_context.idrt_size = sizeof(struct gen8_interface_descriptor_data) * MAX_INTERFACE_DESC_GEN6;
2016 vme_context->gpe_context.curbe_size = CURBE_TOTAL_DATA_LENGTH;
2017 vme_context->gpe_context.sampler_size = 0;
2020 vme_context->gpe_context.vfe_state.max_num_threads = 60 - 1;
2021 vme_context->gpe_context.vfe_state.num_urb_entries = 64;
2022 vme_context->gpe_context.vfe_state.gpgpu_mode = 0;
2023 vme_context->gpe_context.vfe_state.urb_entry_size = 16;
2024 vme_context->gpe_context.vfe_state.curbe_allocation_size = CURBE_ALLOCATION_SIZE - 1;
2026 gen7_vme_scoreboard_init(ctx, vme_context);
2028 gen8_gpe_load_kernels(ctx,
2029 &vme_context->gpe_context,
2032 vme_context->vme_surface2_setup = gen8_gpe_surface2_setup;
2033 vme_context->vme_media_rw_surface_setup = gen8_gpe_media_rw_surface_setup;
2034 vme_context->vme_buffer_suface_setup = gen8_gpe_buffer_suface_setup;
2035 vme_context->vme_media_chroma_surface_setup = gen8_gpe_media_chroma_surface_setup;
2037 encoder_context->vme_context = vme_context;
2038 encoder_context->vme_context_destroy = gen9_vme_context_destroy;
2040 vme_context->vme_state_message = malloc(VME_MSG_LENGTH * sizeof(int));