2 * Copyright © 2010-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Zhao Yakui <yakui.zhao@intel.com>
26 * Xiang Haihao <haihao.xiang@intel.com>
32 #include "intel_batchbuffer.h"
33 #include "intel_driver.h"
35 #include "i965_defines.h"
36 #include "i965_drv_video.h"
37 #include "i965_encoder.h"
41 #define SURFACE_STATE_PADDED_SIZE MAX(SURFACE_STATE_PADDED_SIZE_GEN6, SURFACE_STATE_PADDED_SIZE_GEN7)
42 #define SURFACE_STATE_OFFSET(index) (SURFACE_STATE_PADDED_SIZE * index)
43 #define BINDING_TABLE_OFFSET(index) (SURFACE_STATE_OFFSET(MAX_MEDIA_SURFACES_GEN6) + sizeof(unsigned int) * index)
45 #define VME_INTRA_SHADER 0
46 #define VME_INTER_SHADER 1
47 #define VME_BINTER_SHADER 3
48 #define VME_BATCHBUFFER 2
50 #define CURBE_ALLOCATION_SIZE 37 /* in 256-bit */
51 #define CURBE_TOTAL_DATA_LENGTH (4 * 32) /* in byte, it should be less than or equal to CURBE_ALLOCATION_SIZE * 32 */
52 #define CURBE_URB_ENTRY_LENGTH 4 /* in 256-bit, it should be less than or equal to CURBE_TOTAL_DATA_LENGTH / 32 */
54 #define VME_MSG_LENGTH 32
56 static const uint32_t gen75_vme_intra_frame[][4] = {
57 #include "shaders/vme/intra_frame_haswell.g75b"
60 static const uint32_t gen75_vme_inter_frame[][4] = {
61 #include "shaders/vme/inter_frame_haswell.g75b"
64 static const uint32_t gen75_vme_inter_bframe[][4] = {
65 #include "shaders/vme/inter_bframe_haswell.g75b"
68 static const uint32_t gen75_vme_batchbuffer[][4] = {
69 #include "shaders/vme/batchbuffer.g75b"
72 static struct i965_kernel gen75_vme_kernels[] = {
75 VME_INTRA_SHADER, /*index*/
76 gen75_vme_intra_frame,
77 sizeof(gen75_vme_intra_frame),
83 gen75_vme_inter_frame,
84 sizeof(gen75_vme_inter_frame),
90 gen75_vme_batchbuffer,
91 sizeof(gen75_vme_batchbuffer),
97 gen75_vme_inter_bframe,
98 sizeof(gen75_vme_inter_bframe),
103 static const uint32_t gen75_vme_mpeg2_intra_frame[][4] = {
104 #include "shaders/vme/intra_frame_haswell.g75b"
107 static const uint32_t gen75_vme_mpeg2_inter_frame[][4] = {
108 #include "shaders/vme/mpeg2_inter_haswell.g75b"
111 static const uint32_t gen75_vme_mpeg2_batchbuffer[][4] = {
112 #include "shaders/vme/batchbuffer.g75b"
115 static struct i965_kernel gen75_vme_mpeg2_kernels[] = {
118 VME_INTRA_SHADER, /*index*/
119 gen75_vme_mpeg2_intra_frame,
120 sizeof(gen75_vme_mpeg2_intra_frame),
126 gen75_vme_mpeg2_inter_frame,
127 sizeof(gen75_vme_mpeg2_inter_frame),
133 gen75_vme_mpeg2_batchbuffer,
134 sizeof(gen75_vme_mpeg2_batchbuffer),
139 /* only used for VME source surface state */
141 gen75_vme_source_surface_state(VADriverContextP ctx,
143 struct object_surface *obj_surface,
144 struct intel_encoder_context *encoder_context)
146 struct gen6_vme_context *vme_context = encoder_context->vme_context;
148 vme_context->vme_surface2_setup(ctx,
149 &vme_context->gpe_context,
151 BINDING_TABLE_OFFSET(index),
152 SURFACE_STATE_OFFSET(index));
156 gen75_vme_media_source_surface_state(VADriverContextP ctx,
158 struct object_surface *obj_surface,
159 struct intel_encoder_context *encoder_context)
161 struct gen6_vme_context *vme_context = encoder_context->vme_context;
163 vme_context->vme_media_rw_surface_setup(ctx,
164 &vme_context->gpe_context,
166 BINDING_TABLE_OFFSET(index),
167 SURFACE_STATE_OFFSET(index));
171 gen75_vme_media_chroma_source_surface_state(VADriverContextP ctx,
173 struct object_surface *obj_surface,
174 struct intel_encoder_context *encoder_context)
176 struct gen6_vme_context *vme_context = encoder_context->vme_context;
178 vme_context->vme_media_chroma_surface_setup(ctx,
179 &vme_context->gpe_context,
181 BINDING_TABLE_OFFSET(index),
182 SURFACE_STATE_OFFSET(index));
186 gen75_vme_output_buffer_setup(VADriverContextP ctx,
187 struct encode_state *encode_state,
189 struct intel_encoder_context *encoder_context)
192 struct i965_driver_data *i965 = i965_driver_data(ctx);
193 struct gen6_vme_context *vme_context = encoder_context->vme_context;
194 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
195 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
196 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
197 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
198 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
200 vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
201 vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
204 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 2;
206 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 24;
208 * Inter MV . 32-byte Intra search + 16 IME info + 128 IME MV + 32 IME Ref
209 * + 16 FBR Info + 128 FBR MV + 32 FBR Ref.
210 * 16 * (2 + 2 * (1 + 8 + 2))= 16 * 24.
213 vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr,
215 vme_context->vme_output.num_blocks * vme_context->vme_output.size_block,
217 assert(vme_context->vme_output.bo);
218 vme_context->vme_buffer_suface_setup(ctx,
219 &vme_context->gpe_context,
220 &vme_context->vme_output,
221 BINDING_TABLE_OFFSET(index),
222 SURFACE_STATE_OFFSET(index));
226 gen75_vme_output_vme_batchbuffer_setup(VADriverContextP ctx,
227 struct encode_state *encode_state,
229 struct intel_encoder_context *encoder_context)
232 struct i965_driver_data *i965 = i965_driver_data(ctx);
233 struct gen6_vme_context *vme_context = encoder_context->vme_context;
234 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
235 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
236 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
238 vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
239 vme_context->vme_batchbuffer.size_block = 64; /* 4 OWORDs */
240 vme_context->vme_batchbuffer.pitch = 16;
241 vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr,
243 vme_context->vme_batchbuffer.num_blocks * vme_context->vme_batchbuffer.size_block,
245 vme_context->vme_buffer_suface_setup(ctx,
246 &vme_context->gpe_context,
247 &vme_context->vme_batchbuffer,
248 BINDING_TABLE_OFFSET(index),
249 SURFACE_STATE_OFFSET(index));
253 gen75_vme_surface_setup(VADriverContextP ctx,
254 struct encode_state *encode_state,
256 struct intel_encoder_context *encoder_context)
258 struct object_surface *obj_surface;
260 /*Setup surfaces state*/
261 /* current picture for encoding */
262 obj_surface = encode_state->input_yuv_object;
263 gen75_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
264 gen75_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
265 gen75_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
268 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
271 slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
272 assert(slice_type != SLICE_TYPE_I && slice_type != SLICE_TYPE_SI);
274 intel_avc_vme_reference_state(ctx, encode_state, encoder_context, 0, 1, gen75_vme_source_surface_state);
276 if (slice_type == SLICE_TYPE_B)
277 intel_avc_vme_reference_state(ctx, encode_state, encoder_context, 1, 2, gen75_vme_source_surface_state);
281 gen75_vme_output_buffer_setup(ctx, encode_state, 3, encoder_context);
282 gen75_vme_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
284 return VA_STATUS_SUCCESS;
287 static VAStatus gen75_vme_interface_setup(VADriverContextP ctx,
288 struct encode_state *encode_state,
289 struct intel_encoder_context *encoder_context)
291 struct gen6_vme_context *vme_context = encoder_context->vme_context;
292 struct gen6_interface_descriptor_data *desc;
296 bo = vme_context->gpe_context.idrt.bo;
301 for (i = 0; i < vme_context->vme_kernel_sum; i++) {
302 struct i965_kernel *kernel;
303 kernel = &vme_context->gpe_context.kernels[i];
304 assert(sizeof(*desc) == 32);
305 /*Setup the descritor table*/
306 memset(desc, 0, sizeof(*desc));
307 desc->desc0.kernel_start_pointer = (kernel->bo->offset >> 6);
308 desc->desc2.sampler_count = 0; /* FIXME: */
309 desc->desc2.sampler_state_pointer = 0;
310 desc->desc3.binding_table_entry_count = 1; /* FIXME: */
311 desc->desc3.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
312 desc->desc4.constant_urb_entry_read_offset = 0;
313 desc->desc4.constant_urb_entry_read_length = CURBE_URB_ENTRY_LENGTH;
316 dri_bo_emit_reloc(bo,
317 I915_GEM_DOMAIN_INSTRUCTION, 0,
319 i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc0),
325 return VA_STATUS_SUCCESS;
328 static VAStatus gen75_vme_constant_setup(VADriverContextP ctx,
329 struct encode_state *encode_state,
330 struct intel_encoder_context *encoder_context)
332 struct gen6_vme_context *vme_context = encoder_context->vme_context;
333 unsigned char *constant_buffer;
334 unsigned int *vme_state_message;
337 vme_state_message = (unsigned int *)vme_context->vme_state_message;
339 if (encoder_context->codec == CODEC_H264) {
340 if (vme_context->h264_level >= 30) {
343 if (vme_context->h264_level >= 31)
346 } else if (encoder_context->codec == CODEC_MPEG2) {
350 vme_state_message[31] = mv_num;
352 dri_bo_map(vme_context->gpe_context.curbe.bo, 1);
353 assert(vme_context->gpe_context.curbe.bo->virtual);
354 constant_buffer = vme_context->gpe_context.curbe.bo->virtual;
356 /* VME MV/Mb cost table is passed by using const buffer */
357 /* Now it uses the fixed search path. So it is constructed directly
360 memcpy(constant_buffer, (char *)vme_context->vme_state_message, 128);
362 dri_bo_unmap(vme_context->gpe_context.curbe.bo);
364 return VA_STATUS_SUCCESS;
367 static const unsigned int intra_mb_mode_cost_table[] = {
368 0x31110001, // for qp0
369 0x09110001, // for qp1
370 0x15030001, // for qp2
371 0x0b030001, // for qp3
372 0x0d030011, // for qp4
373 0x17210011, // for qp5
374 0x41210011, // for qp6
375 0x19210011, // for qp7
376 0x25050003, // for qp8
377 0x1b130003, // for qp9
378 0x1d130003, // for qp10
379 0x27070021, // for qp11
380 0x51310021, // for qp12
381 0x29090021, // for qp13
382 0x35150005, // for qp14
383 0x2b0b0013, // for qp15
384 0x2d0d0013, // for qp16
385 0x37170007, // for qp17
386 0x61410031, // for qp18
387 0x39190009, // for qp19
388 0x45250015, // for qp20
389 0x3b1b000b, // for qp21
390 0x3d1d000d, // for qp22
391 0x47270017, // for qp23
392 0x71510041, // for qp24 ! center for qp=0..30
393 0x49290019, // for qp25
394 0x55350025, // for qp26
395 0x4b2b001b, // for qp27
396 0x4d2d001d, // for qp28
397 0x57370027, // for qp29
398 0x81610051, // for qp30
399 0x57270017, // for qp31
400 0x81510041, // for qp32 ! center for qp=31..51
401 0x59290019, // for qp33
402 0x65350025, // for qp34
403 0x5b2b001b, // for qp35
404 0x5d2d001d, // for qp36
405 0x67370027, // for qp37
406 0x91610051, // for qp38
407 0x69390029, // for qp39
408 0x75450035, // for qp40
409 0x6b3b002b, // for qp41
410 0x6d3d002d, // for qp42
411 0x77470037, // for qp43
412 0xa1710061, // for qp44
413 0x79490039, // for qp45
414 0x85550045, // for qp46
415 0x7b4b003b, // for qp47
416 0x7d4d003d, // for qp48
417 0x87570047, // for qp49
418 0xb1810071, // for qp50
419 0x89590049 // for qp51
422 static void gen75_vme_state_setup_fixup(VADriverContextP ctx,
423 struct encode_state *encode_state,
424 struct intel_encoder_context *encoder_context,
425 unsigned int *vme_state_message)
427 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
428 VAEncPictureParameterBufferH264 *pic_param = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
429 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
431 if (slice_param->slice_type != SLICE_TYPE_I &&
432 slice_param->slice_type != SLICE_TYPE_SI)
434 if (encoder_context->rate_control_mode == VA_RC_CQP)
435 vme_state_message[0] = intra_mb_mode_cost_table[pic_param->pic_init_qp + slice_param->slice_qp_delta];
437 vme_state_message[0] = intra_mb_mode_cost_table[mfc_context->bit_rate_control_context[SLICE_TYPE_I].QpPrimeY];
440 static VAStatus gen75_vme_vme_state_setup(VADriverContextP ctx,
441 struct encode_state *encode_state,
443 struct intel_encoder_context *encoder_context)
445 struct gen6_vme_context *vme_context = encoder_context->vme_context;
446 unsigned int *vme_state_message;
449 //pass the MV/Mb cost into VME message on HASWell
450 assert(vme_context->vme_state_message);
451 vme_state_message = (unsigned int *)vme_context->vme_state_message;
453 vme_state_message[0] = 0x4a4a4a4a;
454 vme_state_message[1] = 0x4a4a4a4a;
455 vme_state_message[2] = 0x4a4a4a4a;
456 vme_state_message[3] = 0x22120200;
457 vme_state_message[4] = 0x62524232;
459 for (i=5; i < 8; i++) {
460 vme_state_message[i] = 0;
463 switch (encoder_context->codec) {
465 gen75_vme_state_setup_fixup(ctx, encode_state, encoder_context, vme_state_message);
474 return VA_STATUS_SUCCESS;
478 gen75_vme_fill_vme_batchbuffer(VADriverContextP ctx,
479 struct encode_state *encode_state,
480 int mb_width, int mb_height,
482 int transform_8x8_mode_flag,
483 struct intel_encoder_context *encoder_context)
485 struct gen6_vme_context *vme_context = encoder_context->vme_context;
486 int mb_x = 0, mb_y = 0;
488 unsigned int *command_ptr;
490 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
491 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
493 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
494 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
495 int slice_mb_begin = pSliceParameter->macroblock_address;
496 int slice_mb_number = pSliceParameter->num_macroblocks;
497 unsigned int mb_intra_ub;
498 int slice_mb_x = pSliceParameter->macroblock_address % mb_width;
499 for (i = 0; i < slice_mb_number; ) {
500 int mb_count = i + slice_mb_begin;
501 mb_x = mb_count % mb_width;
502 mb_y = mb_count / mb_width;
505 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
508 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
510 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
511 if (mb_x != (mb_width -1))
512 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
516 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_AE);
517 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_BCD_MASK);
518 if ((i == (mb_width - 1)) && slice_mb_x) {
519 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
523 if ((i == mb_width) && slice_mb_x) {
524 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_D);
526 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
527 *command_ptr++ = kernel;
534 *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
535 *command_ptr++ = ( (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
542 *command_ptr++ = MI_BATCH_BUFFER_END;
544 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
547 static void gen75_vme_media_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
549 struct gen6_vme_context *vme_context = encoder_context->vme_context;
551 i965_gpe_context_init(ctx, &vme_context->gpe_context);
553 /* VME output buffer */
554 dri_bo_unreference(vme_context->vme_output.bo);
555 vme_context->vme_output.bo = NULL;
557 dri_bo_unreference(vme_context->vme_batchbuffer.bo);
558 vme_context->vme_batchbuffer.bo = NULL;
561 dri_bo_unreference(vme_context->vme_state.bo);
562 vme_context->vme_state.bo = NULL;
565 static void gen75_vme_pipeline_programing(VADriverContextP ctx,
566 struct encode_state *encode_state,
567 struct intel_encoder_context *encoder_context)
569 struct gen6_vme_context *vme_context = encoder_context->vme_context;
570 struct intel_batchbuffer *batch = encoder_context->base.batch;
571 VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
572 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
573 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
574 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
575 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
577 bool allow_hwscore = true;
580 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
581 pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
582 if ((pSliceParameter->macroblock_address % width_in_mbs)) {
583 allow_hwscore = false;
587 if ((pSliceParameter->slice_type == SLICE_TYPE_I) ||
588 (pSliceParameter->slice_type == SLICE_TYPE_I)) {
589 kernel_shader = VME_INTRA_SHADER;
590 } else if ((pSliceParameter->slice_type == SLICE_TYPE_P) ||
591 (pSliceParameter->slice_type == SLICE_TYPE_SP)) {
592 kernel_shader = VME_INTER_SHADER;
594 kernel_shader = VME_BINTER_SHADER;
596 kernel_shader = VME_INTER_SHADER;
599 gen7_vme_walker_fill_vme_batchbuffer(ctx,
601 width_in_mbs, height_in_mbs,
603 pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
606 gen75_vme_fill_vme_batchbuffer(ctx,
608 width_in_mbs, height_in_mbs,
610 pPicParameter->pic_fields.bits.transform_8x8_mode_flag,
613 intel_batchbuffer_start_atomic(batch, 0x1000);
614 gen6_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
615 BEGIN_BATCH(batch, 2);
616 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (2 << 6));
618 vme_context->vme_batchbuffer.bo,
619 I915_GEM_DOMAIN_COMMAND, 0,
621 ADVANCE_BATCH(batch);
623 intel_batchbuffer_end_atomic(batch);
626 static VAStatus gen75_vme_prepare(VADriverContextP ctx,
627 struct encode_state *encode_state,
628 struct intel_encoder_context *encoder_context)
630 VAStatus vaStatus = VA_STATUS_SUCCESS;
631 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
632 int is_intra = pSliceParameter->slice_type == SLICE_TYPE_I;
633 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
634 struct gen6_vme_context *vme_context = encoder_context->vme_context;
636 if (!vme_context->h264_level ||
637 (vme_context->h264_level != pSequenceParameter->level_idc)) {
638 vme_context->h264_level = pSequenceParameter->level_idc;
641 intel_vme_update_mbmv_cost(ctx, encode_state, encoder_context);
643 /*Setup all the memory object*/
644 gen75_vme_surface_setup(ctx, encode_state, is_intra, encoder_context);
645 gen75_vme_interface_setup(ctx, encode_state, encoder_context);
646 //gen75_vme_vme_state_setup(ctx, encode_state, is_intra, encoder_context);
647 gen75_vme_constant_setup(ctx, encode_state, encoder_context);
649 /*Programing media pipeline*/
650 gen75_vme_pipeline_programing(ctx, encode_state, encoder_context);
655 static VAStatus gen75_vme_run(VADriverContextP ctx,
656 struct encode_state *encode_state,
657 struct intel_encoder_context *encoder_context)
659 struct intel_batchbuffer *batch = encoder_context->base.batch;
661 intel_batchbuffer_flush(batch);
663 return VA_STATUS_SUCCESS;
666 static VAStatus gen75_vme_stop(VADriverContextP ctx,
667 struct encode_state *encode_state,
668 struct intel_encoder_context *encoder_context)
670 return VA_STATUS_SUCCESS;
674 gen75_vme_pipeline(VADriverContextP ctx,
676 struct encode_state *encode_state,
677 struct intel_encoder_context *encoder_context)
679 gen75_vme_media_init(ctx, encoder_context);
680 gen75_vme_prepare(ctx, encode_state, encoder_context);
681 gen75_vme_run(ctx, encode_state, encoder_context);
682 gen75_vme_stop(ctx, encode_state, encoder_context);
684 return VA_STATUS_SUCCESS;
688 gen75_vme_mpeg2_output_buffer_setup(VADriverContextP ctx,
689 struct encode_state *encode_state,
692 struct intel_encoder_context *encoder_context)
695 struct i965_driver_data *i965 = i965_driver_data(ctx);
696 struct gen6_vme_context *vme_context = encoder_context->vme_context;
697 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
698 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
699 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
701 vme_context->vme_output.num_blocks = width_in_mbs * height_in_mbs;
702 vme_context->vme_output.pitch = 16; /* in bytes, always 16 */
705 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 2;
707 vme_context->vme_output.size_block = INTRA_VME_OUTPUT_IN_BYTES * 24;
709 * Inter MV . 32-byte Intra search + 16 IME info + 128 IME MV + 32 IME Ref
710 * + 16 FBR Info + 128 FBR MV + 32 FBR Ref.
711 * 16 * (2 + 2 * (1 + 8 + 2))= 16 * 24.
714 vme_context->vme_output.bo = dri_bo_alloc(i965->intel.bufmgr,
716 vme_context->vme_output.num_blocks * vme_context->vme_output.size_block,
718 assert(vme_context->vme_output.bo);
719 vme_context->vme_buffer_suface_setup(ctx,
720 &vme_context->gpe_context,
721 &vme_context->vme_output,
722 BINDING_TABLE_OFFSET(index),
723 SURFACE_STATE_OFFSET(index));
727 gen75_vme_mpeg2_output_vme_batchbuffer_setup(VADriverContextP ctx,
728 struct encode_state *encode_state,
730 struct intel_encoder_context *encoder_context)
733 struct i965_driver_data *i965 = i965_driver_data(ctx);
734 struct gen6_vme_context *vme_context = encoder_context->vme_context;
735 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
736 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
737 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
739 vme_context->vme_batchbuffer.num_blocks = width_in_mbs * height_in_mbs + 1;
740 vme_context->vme_batchbuffer.size_block = 64; /* 4 OWORDs */
741 vme_context->vme_batchbuffer.pitch = 16;
742 vme_context->vme_batchbuffer.bo = dri_bo_alloc(i965->intel.bufmgr,
744 vme_context->vme_batchbuffer.num_blocks * vme_context->vme_batchbuffer.size_block,
746 vme_context->vme_buffer_suface_setup(ctx,
747 &vme_context->gpe_context,
748 &vme_context->vme_batchbuffer,
749 BINDING_TABLE_OFFSET(index),
750 SURFACE_STATE_OFFSET(index));
754 gen75_vme_mpeg2_surface_setup(VADriverContextP ctx,
755 struct encode_state *encode_state,
757 struct intel_encoder_context *encoder_context)
759 struct object_surface *obj_surface;
761 /*Setup surfaces state*/
762 /* current picture for encoding */
763 obj_surface = encode_state->input_yuv_object;
764 gen75_vme_source_surface_state(ctx, 0, obj_surface, encoder_context);
765 gen75_vme_media_source_surface_state(ctx, 4, obj_surface, encoder_context);
766 gen75_vme_media_chroma_source_surface_state(ctx, 6, obj_surface, encoder_context);
770 obj_surface = encode_state->reference_objects[0];
771 if (obj_surface->bo != NULL)
772 gen75_vme_source_surface_state(ctx, 1, obj_surface, encoder_context);
775 obj_surface = encode_state->reference_objects[1];
776 if (obj_surface && obj_surface->bo != NULL)
777 gen75_vme_source_surface_state(ctx, 2, obj_surface, encoder_context);
781 gen75_vme_mpeg2_output_buffer_setup(ctx, encode_state, 3, is_intra, encoder_context);
782 gen75_vme_mpeg2_output_vme_batchbuffer_setup(ctx, encode_state, 5, encoder_context);
784 return VA_STATUS_SUCCESS;
788 gen75_vme_mpeg2_fill_vme_batchbuffer(VADriverContextP ctx,
789 struct encode_state *encode_state,
790 int mb_width, int mb_height,
792 int transform_8x8_mode_flag,
793 struct intel_encoder_context *encoder_context)
795 struct gen6_vme_context *vme_context = encoder_context->vme_context;
796 int mb_x = 0, mb_y = 0;
798 unsigned int *command_ptr;
801 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
802 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
804 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
805 VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[s]->buffer;
807 for (j = 0; j < encode_state->slice_params_ext[s]->num_elements; j++) {
808 int slice_mb_begin = slice_param->macroblock_address;
809 int slice_mb_number = slice_param->num_macroblocks;
810 unsigned int mb_intra_ub;
811 int slice_mb_x = slice_param->macroblock_address % mb_width;
813 for (i = 0; i < slice_mb_number;) {
814 int mb_count = i + slice_mb_begin;
816 mb_x = mb_count % mb_width;
817 mb_y = mb_count / mb_width;
821 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
825 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
828 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
830 if (mb_x != (mb_width -1))
831 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
836 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_AE);
838 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_BCD_MASK);
840 if ((i == (mb_width - 1)) && slice_mb_x) {
841 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
845 if ((i == mb_width) && slice_mb_x) {
846 mb_intra_ub &= ~(INTRA_PRED_AVAIL_FLAG_D);
849 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
850 *command_ptr++ = kernel;
857 *command_ptr++ = (mb_width << 16 | mb_y << 8 | mb_x);
858 *command_ptr++ = ( (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
868 *command_ptr++ = MI_BATCH_BUFFER_END;
870 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
874 gen75_vme_mpeg2_pipeline_programing(VADriverContextP ctx,
875 struct encode_state *encode_state,
877 struct intel_encoder_context *encoder_context)
879 struct gen6_vme_context *vme_context = encoder_context->vme_context;
880 struct intel_batchbuffer *batch = encoder_context->base.batch;
881 VAEncPictureParameterBufferMPEG2 *pic_param = NULL;
882 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
883 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
884 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
885 bool allow_hwscore = true;
889 pic_param = (VAEncPictureParameterBufferMPEG2 *)encode_state->pic_param_ext->buffer;
891 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
893 VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[s]->buffer;
895 for (j = 0; j < encode_state->slice_params_ext[s]->num_elements; j++) {
896 if (slice_param->macroblock_address % width_in_mbs) {
897 allow_hwscore = false;
903 pic_param = (VAEncPictureParameterBufferMPEG2 *)encode_state->pic_param_ext->buffer;
904 if (pic_param->picture_type == VAEncPictureTypeIntra) {
905 allow_hwscore = false;
906 kernel_shader = VME_INTRA_SHADER;
908 kernel_shader = VME_INTER_SHADER;
912 gen7_vme_mpeg2_walker_fill_vme_batchbuffer(ctx,
914 width_in_mbs, height_in_mbs,
918 gen75_vme_mpeg2_fill_vme_batchbuffer(ctx,
920 width_in_mbs, height_in_mbs,
925 intel_batchbuffer_start_atomic(batch, 0x1000);
926 gen6_gpe_pipeline_setup(ctx, &vme_context->gpe_context, batch);
927 BEGIN_BATCH(batch, 2);
928 OUT_BATCH(batch, MI_BATCH_BUFFER_START | (2 << 6));
930 vme_context->vme_batchbuffer.bo,
931 I915_GEM_DOMAIN_COMMAND, 0,
933 ADVANCE_BATCH(batch);
935 intel_batchbuffer_end_atomic(batch);
939 gen75_vme_mpeg2_prepare(VADriverContextP ctx,
940 struct encode_state *encode_state,
941 struct intel_encoder_context *encoder_context)
943 VAStatus vaStatus = VA_STATUS_SUCCESS;
944 VAEncSliceParameterBufferMPEG2 *slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[0]->buffer;
946 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
947 struct gen6_vme_context *vme_context = encoder_context->vme_context;
949 if ((!vme_context->mpeg2_level) ||
950 (vme_context->mpeg2_level != (seq_param->sequence_extension.bits.profile_and_level_indication & MPEG2_LEVEL_MASK))) {
951 vme_context->mpeg2_level = seq_param->sequence_extension.bits.profile_and_level_indication & MPEG2_LEVEL_MASK;
954 /*Setup all the memory object*/
955 gen75_vme_mpeg2_surface_setup(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
956 gen75_vme_interface_setup(ctx, encode_state, encoder_context);
957 gen75_vme_vme_state_setup(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
958 intel_vme_mpeg2_state_setup(ctx, encode_state, encoder_context);
959 gen75_vme_constant_setup(ctx, encode_state, encoder_context);
961 /*Programing media pipeline*/
962 gen75_vme_mpeg2_pipeline_programing(ctx, encode_state, slice_param->is_intra_slice, encoder_context);
968 gen75_vme_mpeg2_pipeline(VADriverContextP ctx,
970 struct encode_state *encode_state,
971 struct intel_encoder_context *encoder_context)
973 gen75_vme_media_init(ctx, encoder_context);
974 gen75_vme_mpeg2_prepare(ctx, encode_state, encoder_context);
975 gen75_vme_run(ctx, encode_state, encoder_context);
976 gen75_vme_stop(ctx, encode_state, encoder_context);
978 return VA_STATUS_SUCCESS;
982 gen75_vme_context_destroy(void *context)
984 struct gen6_vme_context *vme_context = context;
986 i965_gpe_context_destroy(&vme_context->gpe_context);
988 dri_bo_unreference(vme_context->vme_output.bo);
989 vme_context->vme_output.bo = NULL;
991 dri_bo_unreference(vme_context->vme_state.bo);
992 vme_context->vme_state.bo = NULL;
994 dri_bo_unreference(vme_context->vme_batchbuffer.bo);
995 vme_context->vme_batchbuffer.bo = NULL;
997 if (vme_context->vme_state_message) {
998 free(vme_context->vme_state_message);
999 vme_context->vme_state_message = NULL;
1005 Bool gen75_vme_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
1007 struct gen6_vme_context *vme_context = calloc(1, sizeof(struct gen6_vme_context));
1008 struct i965_kernel *vme_kernel_list = NULL;
1009 int i965_kernel_num;
1011 switch (encoder_context->codec) {
1013 vme_kernel_list = gen75_vme_kernels;
1014 encoder_context->vme_pipeline = gen75_vme_pipeline;
1015 i965_kernel_num = sizeof(gen75_vme_kernels) / sizeof(struct i965_kernel);
1019 vme_kernel_list = gen75_vme_mpeg2_kernels;
1020 encoder_context->vme_pipeline = gen75_vme_mpeg2_pipeline;
1021 i965_kernel_num = sizeof(gen75_vme_mpeg2_kernels) / sizeof(struct i965_kernel);
1026 /* never get here */
1031 vme_context->vme_kernel_sum = i965_kernel_num;
1032 vme_context->gpe_context.surface_state_binding_table.length = (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_MEDIA_SURFACES_GEN6;
1034 vme_context->gpe_context.idrt.max_entries = MAX_INTERFACE_DESC_GEN6;
1035 vme_context->gpe_context.idrt.entry_size = sizeof(struct gen6_interface_descriptor_data);
1037 vme_context->gpe_context.curbe.length = CURBE_TOTAL_DATA_LENGTH;
1039 vme_context->gpe_context.vfe_state.max_num_threads = 60 - 1;
1040 vme_context->gpe_context.vfe_state.num_urb_entries = 16;
1041 vme_context->gpe_context.vfe_state.gpgpu_mode = 0;
1042 vme_context->gpe_context.vfe_state.urb_entry_size = 59 - 1;
1043 vme_context->gpe_context.vfe_state.curbe_allocation_size = CURBE_ALLOCATION_SIZE - 1;
1045 gen7_vme_scoreboard_init(ctx, vme_context);
1047 i965_gpe_load_kernels(ctx,
1048 &vme_context->gpe_context,
1051 vme_context->vme_surface2_setup = gen7_gpe_surface2_setup;
1052 vme_context->vme_media_rw_surface_setup = gen7_gpe_media_rw_surface_setup;
1053 vme_context->vme_buffer_suface_setup = gen7_gpe_buffer_suface_setup;
1054 vme_context->vme_media_chroma_surface_setup = gen75_gpe_media_chroma_surface_setup;
1056 encoder_context->vme_context = vme_context;
1057 encoder_context->vme_context_destroy = gen75_vme_context_destroy;
1059 vme_context->vme_state_message = malloc(VME_MSG_LENGTH * sizeof(int));