#include "gen6_mfc.h"
#include "gen6_vme.h"
#include "intel_media.h"
+#include <va/va_enc_jpeg.h>
+#include "vp8_probs.h"
#define SURFACE_STATE_PADDED_SIZE SURFACE_STATE_PADDED_SIZE_GEN8
#define SURFACE_STATE_OFFSET(index) (SURFACE_STATE_PADDED_SIZE * index)
#define BINDING_TABLE_OFFSET(index) (SURFACE_STATE_OFFSET(MAX_MEDIA_SURFACES_GEN6) + sizeof(unsigned int) * index)
-#define MFC_SOFTWARE_HASWELL 1
+#define MFC_SOFTWARE_BATCH 0
#define B0_STEP_REV 2
#define IS_STEPPING_BPLUS(i965) ((i965->intel.revision) >= B0_STEP_REV)
-static const uint32_t gen8_mfc_batchbuffer_avc_intra[][4] = {
-#include "shaders/utils/mfc_batchbuffer_avc_intra.g7b"
+//Zigzag scan order of the the Luma and Chroma components
+//Note: Jpeg Spec ISO/IEC 10918-1, Figure A.6 shows the zigzag order differently.
+//The Spec is trying to show the zigzag pattern with number positions. The below
+//table will use the pattern shown by A.6 and map the position of the elements in the array
+static const uint32_t zigzag_direct[64] = {
+ 0, 1, 8, 16, 9, 2, 3, 10,
+ 17, 24, 32, 25, 18, 11, 4, 5,
+ 12, 19, 26, 33, 40, 48, 41, 34,
+ 27, 20, 13, 6, 7, 14, 21, 28,
+ 35, 42, 49, 56, 57, 50, 43, 36,
+ 29, 22, 15, 23, 30, 37, 44, 51,
+ 58, 59, 52, 45, 38, 31, 39, 46,
+ 53, 60, 61, 54, 47, 55, 62, 63
};
-static const uint32_t gen8_mfc_batchbuffer_avc_inter[][4] = {
-#include "shaders/utils/mfc_batchbuffer_avc_inter.g7b"
+//Default Luminance quantization table
+//Source: Jpeg Spec ISO/IEC 10918-1, Annex K, Table K.1
+static const uint8_t jpeg_luma_quant[64] = {
+ 16, 11, 10, 16, 24, 40, 51, 61,
+ 12, 12, 14, 19, 26, 58, 60, 55,
+ 14, 13, 16, 24, 40, 57, 69, 56,
+ 14, 17, 22, 29, 51, 87, 80, 62,
+ 18, 22, 37, 56, 68, 109, 103, 77,
+ 24, 35, 55, 64, 81, 104, 113, 92,
+ 49, 64, 78, 87, 103, 121, 120, 101,
+ 72, 92, 95, 98, 112, 100, 103, 99
+};
+
+//Default Chroma quantization table
+//Source: Jpeg Spec ISO/IEC 10918-1, Annex K, Table K.2
+static const uint8_t jpeg_chroma_quant[64] = {
+ 17, 18, 24, 47, 99, 99, 99, 99,
+ 18, 21, 26, 66, 99, 99, 99, 99,
+ 24, 26, 56, 99, 99, 99, 99, 99,
+ 47, 66, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99,
+ 99, 99, 99, 99, 99, 99, 99, 99
+};
+
+
+static const int va_to_gen7_jpeg_hufftable[2] = {
+ MFX_HUFFTABLE_ID_Y,
+ MFX_HUFFTABLE_ID_UV
+};
+
+static const uint32_t gen8_mfc_batchbuffer_avc[][4] = {
+#include "shaders/utils/mfc_batchbuffer_hsw.g8b"
+};
+
+static const uint32_t gen9_mfc_batchbuffer_avc[][4] = {
+#include "shaders/utils/mfc_batchbuffer_hsw.g9b"
};
static struct i965_kernel gen8_mfc_kernels[] = {
{
"MFC AVC INTRA BATCHBUFFER ",
MFC_BATCHBUFFER_AVC_INTRA,
- gen8_mfc_batchbuffer_avc_intra,
- sizeof(gen8_mfc_batchbuffer_avc_intra),
+ gen8_mfc_batchbuffer_avc,
+ sizeof(gen8_mfc_batchbuffer_avc),
NULL
},
+};
+static struct i965_kernel gen9_mfc_kernels[] = {
{
- "MFC AVC INTER BATCHBUFFER ",
- MFC_BATCHBUFFER_AVC_INTER,
- gen8_mfc_batchbuffer_avc_inter,
- sizeof(gen8_mfc_batchbuffer_avc_inter),
+ "MFC AVC INTRA BATCHBUFFER ",
+ MFC_BATCHBUFFER_AVC_INTRA,
+ gen9_mfc_batchbuffer_avc,
+ sizeof(gen9_mfc_batchbuffer_avc),
NULL
},
};
+static const uint32_t qm_flat[16] = {
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010,
+ 0x10101010, 0x10101010, 0x10101010, 0x10101010
+};
+
+static const uint32_t fqm_flat[32] = {
+ 0x10001000, 0x10001000, 0x10001000, 0x10001000,
+ 0x10001000, 0x10001000, 0x10001000, 0x10001000,
+ 0x10001000, 0x10001000, 0x10001000, 0x10001000,
+ 0x10001000, 0x10001000, 0x10001000, 0x10001000,
+ 0x10001000, 0x10001000, 0x10001000, 0x10001000,
+ 0x10001000, 0x10001000, 0x10001000, 0x10001000,
+ 0x10001000, 0x10001000, 0x10001000, 0x10001000,
+ 0x10001000, 0x10001000, 0x10001000, 0x10001000
+};
+
#define INTER_MODE_MASK 0x03
#define INTER_8X8 0x03
#define INTER_16X8 0x01
#define INTER_8X16 0x02
#define SUBMB_SHAPE_MASK 0x00FF00
+#define INTER_16X16 0x00
#define INTER_MV8 (4 << 20)
#define INTER_MV32 (6 << 20)
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
assert(standard_select == MFX_FORMAT_MPEG2 ||
- standard_select == MFX_FORMAT_AVC);
+ standard_select == MFX_FORMAT_AVC ||
+ standard_select == MFX_FORMAT_JPEG ||
+ standard_select == MFX_FORMAT_VP8);
BEGIN_BCS_BATCH(batch, 5);
(0 << 10) | /* Stream-Out Enable */
((!!mfc_context->post_deblocking_output.bo) << 9) | /* Post Deblocking Output */
((!!mfc_context->pre_deblocking_output.bo) << 8) | /* Pre Deblocking Output */
+ (0 << 6) | /* frame statistics stream-out enable*/
(0 << 5) | /* not in stitch mode */
(1 << 4) | /* encoding mode */
- (standard_select << 0)); /* standard select: avc or mpeg2 */
+ (standard_select << 0)); /* standard select: avc or mpeg2 or jpeg*/
OUT_BCS_BATCH(batch,
(0 << 7) | /* expand NOA bus flag */
(0 << 6) | /* disable slice-level clock gating */
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
struct gen6_vme_context *vme_context = encoder_context->vme_context;
int vme_size;
+ unsigned int bse_offset;
BEGIN_BCS_BATCH(batch, 26);
OUT_BCS_BATCH(batch, 0);
OUT_BCS_BATCH(batch, 0);
OUT_BCS_BATCH(batch, 0);
+
/* the DW4-5 is the MFX upper bound */
- OUT_BCS_BATCH(batch, 0);
- OUT_BCS_BATCH(batch, 0);
+ if (encoder_context->codec == CODEC_VP8) {
+ OUT_BCS_RELOC(batch,
+ mfc_context->mfc_indirect_pak_bse_object.bo,
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
+ mfc_context->mfc_indirect_pak_bse_object.end_offset);
+ OUT_BCS_BATCH(batch, 0);
+ } else {
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ }
- vme_size = vme_context->vme_output.size_block * vme_context->vme_output.num_blocks;
- /* the DW6-10 is for MFX Indirect MV Object Base Address */
- OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
- OUT_BCS_BATCH(batch, 0);
- OUT_BCS_BATCH(batch, 0);
- OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, vme_size);
- OUT_BCS_BATCH(batch, 0);
+ if(encoder_context->codec != CODEC_JPEG) {
+ vme_size = vme_context->vme_output.size_block * vme_context->vme_output.num_blocks;
+ /* the DW6-10 is for MFX Indirect MV Object Base Address */
+ OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_RELOC(batch, vme_context->vme_output.bo, I915_GEM_DOMAIN_INSTRUCTION, 0, vme_size);
+ OUT_BCS_BATCH(batch, 0);
+ } else {
+ /* No VME for JPEG */
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ }
/* the DW11-15 is for MFX IT-COFF. Not used on encoder */
OUT_BCS_BATCH(batch, 0);
OUT_BCS_BATCH(batch, 0);
/* the DW21-25 is for MFC Indirect PAK-BSE Object Base Address for Encoder*/
+ bse_offset = (encoder_context->codec == CODEC_JPEG) ? (mfc_context->mfc_indirect_pak_bse_object.offset) : 0;
OUT_BCS_RELOC(batch,
mfc_context->mfc_indirect_pak_bse_object.bo,
I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
- 0);
+ bse_offset);
OUT_BCS_BATCH(batch, 0);
OUT_BCS_BATCH(batch, 0);
static void
gen8_mfc_qm_state(VADriverContextP ctx,
int qm_type,
- unsigned int *qm,
+ const uint32_t *qm,
int qm_length,
struct intel_encoder_context *encoder_context)
{
}
static void
-gen8_mfc_avc_qm_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+gen8_mfc_avc_qm_state(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
- unsigned int qm[16] = {
- 0x10101010, 0x10101010, 0x10101010, 0x10101010,
- 0x10101010, 0x10101010, 0x10101010, 0x10101010,
- 0x10101010, 0x10101010, 0x10101010, 0x10101010,
- 0x10101010, 0x10101010, 0x10101010, 0x10101010
- };
+ const unsigned int *qm_4x4_intra;
+ const unsigned int *qm_4x4_inter;
+ const unsigned int *qm_8x8_intra;
+ const unsigned int *qm_8x8_inter;
+ VAEncSequenceParameterBufferH264 *pSeqParameter =
+ (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+ VAEncPictureParameterBufferH264 *pPicParameter =
+ (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
+
+ if (!pSeqParameter->seq_fields.bits.seq_scaling_matrix_present_flag
+ && !pPicParameter->pic_fields.bits.pic_scaling_matrix_present_flag) {
+ qm_4x4_intra = qm_4x4_inter = qm_8x8_intra = qm_8x8_inter = qm_flat;
+ } else {
+ VAIQMatrixBufferH264 *qm;
+ assert(encode_state->q_matrix && encode_state->q_matrix->buffer);
+ qm = (VAIQMatrixBufferH264 *)encode_state->q_matrix->buffer;
+ qm_4x4_intra = (unsigned int *)qm->ScalingList4x4[0];
+ qm_4x4_inter = (unsigned int *)qm->ScalingList4x4[3];
+ qm_8x8_intra = (unsigned int *)qm->ScalingList8x8[0];
+ qm_8x8_inter = (unsigned int *)qm->ScalingList8x8[1];
+ }
- gen8_mfc_qm_state(ctx, MFX_QM_AVC_4X4_INTRA_MATRIX, qm, 12, encoder_context);
- gen8_mfc_qm_state(ctx, MFX_QM_AVC_4X4_INTER_MATRIX, qm, 12, encoder_context);
- gen8_mfc_qm_state(ctx, MFX_QM_AVC_8x8_INTRA_MATRIX, qm, 16, encoder_context);
- gen8_mfc_qm_state(ctx, MFX_QM_AVC_8x8_INTER_MATRIX, qm, 16, encoder_context);
+ gen8_mfc_qm_state(ctx, MFX_QM_AVC_4X4_INTRA_MATRIX, qm_4x4_intra, 12, encoder_context);
+ gen8_mfc_qm_state(ctx, MFX_QM_AVC_4X4_INTER_MATRIX, qm_4x4_inter, 12, encoder_context);
+ gen8_mfc_qm_state(ctx, MFX_QM_AVC_8x8_INTRA_MATRIX, qm_8x8_intra, 16, encoder_context);
+ gen8_mfc_qm_state(ctx, MFX_QM_AVC_8x8_INTER_MATRIX, qm_8x8_inter, 16, encoder_context);
}
static void
gen8_mfc_fqm_state(VADriverContextP ctx,
int fqm_type,
- unsigned int *fqm,
+ const uint32_t *fqm,
int fqm_length,
struct intel_encoder_context *encoder_context)
{
}
static void
-gen8_mfc_avc_fqm_state(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
-{
- unsigned int qm[32] = {
- 0x10001000, 0x10001000, 0x10001000, 0x10001000,
- 0x10001000, 0x10001000, 0x10001000, 0x10001000,
- 0x10001000, 0x10001000, 0x10001000, 0x10001000,
- 0x10001000, 0x10001000, 0x10001000, 0x10001000,
- 0x10001000, 0x10001000, 0x10001000, 0x10001000,
- 0x10001000, 0x10001000, 0x10001000, 0x10001000,
- 0x10001000, 0x10001000, 0x10001000, 0x10001000,
- 0x10001000, 0x10001000, 0x10001000, 0x10001000
- };
+gen8_mfc_avc_fill_fqm(uint8_t *qm, uint16_t *fqm, int len)
+{
+ int i, j;
+ for (i = 0; i < len; i++)
+ for (j = 0; j < len; j++)
+ fqm[i * len + j] = (1 << 16) / qm[j * len + i];
+}
+
+static void
+gen8_mfc_avc_fqm_state(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ VAEncSequenceParameterBufferH264 *pSeqParameter =
+ (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
+ VAEncPictureParameterBufferH264 *pPicParameter =
+ (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
+
+ if (!pSeqParameter->seq_fields.bits.seq_scaling_matrix_present_flag
+ && !pPicParameter->pic_fields.bits.pic_scaling_matrix_present_flag) {
+ gen8_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTRA_MATRIX, fqm_flat, 24, encoder_context);
+ gen8_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTER_MATRIX, fqm_flat, 24, encoder_context);
+ gen8_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTRA_MATRIX, fqm_flat, 32, encoder_context);
+ gen8_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTER_MATRIX, fqm_flat, 32, encoder_context);
+ } else {
+ int i;
+ uint32_t fqm[32];
+ VAIQMatrixBufferH264 *qm;
+ assert(encode_state->q_matrix && encode_state->q_matrix->buffer);
+ qm = (VAIQMatrixBufferH264 *)encode_state->q_matrix->buffer;
+
+ for (i = 0; i < 3; i++)
+ gen8_mfc_avc_fill_fqm(qm->ScalingList4x4[i], (uint16_t *)fqm + 16 * i, 4);
+ gen8_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTRA_MATRIX, fqm, 24, encoder_context);
+
+ for (i = 3; i < 6; i++)
+ gen8_mfc_avc_fill_fqm(qm->ScalingList4x4[i], (uint16_t *)fqm + 16 * (i - 3), 4);
+ gen8_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTER_MATRIX, fqm, 24, encoder_context);
- gen8_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTRA_MATRIX, qm, 24, encoder_context);
- gen8_mfc_fqm_state(ctx, MFX_QM_AVC_4X4_INTER_MATRIX, qm, 24, encoder_context);
- gen8_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTRA_MATRIX, qm, 32, encoder_context);
- gen8_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTER_MATRIX, qm, 32, encoder_context);
+ gen8_mfc_avc_fill_fqm(qm->ScalingList8x8[0], (uint16_t *)fqm, 8);
+ gen8_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTRA_MATRIX, fqm, 32, encoder_context);
+
+ gen8_mfc_avc_fill_fqm(qm->ScalingList8x8[1], (uint16_t *)fqm, 8);
+ gen8_mfc_fqm_state(ctx, MFX_QM_AVC_8x8_INTER_MATRIX, fqm, 32, encoder_context);
+ }
}
static void
VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
width_in_mbs = pSequenceParameter->picture_width_in_mbs;
height_in_mbs = pSequenceParameter->picture_height_in_mbs;
- } else {
+ } else if (encoder_context->codec == CODEC_MPEG2) {
VAEncSequenceParameterBufferMPEG2 *pSequenceParameter = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
assert(encoder_context->codec == CODEC_MPEG2);
width_in_mbs = ALIGN(pSequenceParameter->picture_width, 16) / 16;
height_in_mbs = ALIGN(pSequenceParameter->picture_height, 16) / 16;
+ } else {
+ assert(encoder_context->codec == CODEC_JPEG);
+ VAEncPictureParameterBufferJPEG *pic_param = (VAEncPictureParameterBufferJPEG *)encode_state->pic_param_ext->buffer;
+
+ width_in_mbs = ALIGN(pic_param->picture_width, 16) / 16;
+ height_in_mbs = ALIGN(pic_param->picture_height, 16) / 16;
}
slice_batchbuffer_size = 64 * width_in_mbs * height_in_mbs + 4096 +
mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
- if ( mfc_context->direct_mv_buffers[i].bo != NULL);
- dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
+ if (mfc_context->direct_mv_buffers[i].bo != NULL)
+ dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
mfc_context->direct_mv_buffers[i].bo = NULL;
}
mfc_context->aux_batchbuffer_surface.num_blocks = mfc_context->aux_batchbuffer->size / 16;
mfc_context->aux_batchbuffer_surface.size_block = 16;
- i965_gpe_context_init(ctx, &mfc_context->gpe_context);
+ gen8_gpe_context_init(ctx, &mfc_context->gpe_context);
}
static void
gen8_mfc_pipe_buf_addr_state(ctx, encoder_context);
gen8_mfc_bsp_buf_base_addr_state(ctx, encoder_context);
mfc_context->avc_img_state(ctx, encode_state, encoder_context);
- mfc_context->avc_qm_state(ctx, encoder_context);
- mfc_context->avc_fqm_state(ctx, encoder_context);
+ mfc_context->avc_qm_state(ctx, encode_state, encoder_context);
+ mfc_context->avc_fqm_state(ctx, encode_state, encoder_context);
gen8_mfc_avc_directmode_state(ctx, encoder_context);
intel_mfc_avc_ref_idx_state(ctx, encode_state, encoder_context);
}
ADVANCE_BCS_BATCH(batch);
}
-
-#ifdef MFC_SOFTWARE_HASWELL
+#define AVC_INTRA_RDO_OFFSET 4
+#define AVC_INTER_RDO_OFFSET 10
+#define AVC_INTER_MSG_OFFSET 8
+#define AVC_INTER_MV_OFFSET 48
+#define AVC_RDO_MASK 0xFFFF
static int
gen8_mfc_avc_pak_object_intra(VADriverContextP ctx, int x, int y, int end_mb,
return len_in_dwords;
}
-#define AVC_INTRA_RDO_OFFSET 4
-#define AVC_INTER_RDO_OFFSET 10
-#define AVC_INTER_MSG_OFFSET 8
-#define AVC_INTER_MV_OFFSET 48
-#define AVC_RDO_MASK 0xFFFF
-
static void
gen8_mfc_avc_pipeline_slice_programing(VADriverContextP ctx,
struct encode_state *encode_state,
int i,x,y;
int qp = pPicParameter->pic_init_qp + pSliceParameter->slice_qp_delta;
unsigned int rate_control_mode = encoder_context->rate_control_mode;
- unsigned char *slice_header = NULL;
- int slice_header_length_in_bits = 0;
unsigned int tail_data[] = { 0x0, 0x0 };
int slice_type = intel_avc_enc_slice_type_fixup(pSliceParameter->slice_type);
int is_intra = slice_type == SLICE_TYPE_I;
+ int qp_slice;
+ int qp_mb;
-
+ qp_slice = qp;
if (rate_control_mode == VA_RC_CBR) {
- qp = mfc_context->bit_rate_control_context[slice_type].QpPrimeY;
- pSliceParameter->slice_qp_delta = qp - pPicParameter->pic_init_qp;
+ qp = mfc_context->brc.qp_prime_y[0][slice_type];
+ if (encode_state->slice_header_index[slice_index] == 0) {
+ pSliceParameter->slice_qp_delta = qp - pPicParameter->pic_init_qp;
+ qp_slice = qp;
+ }
}
/* only support for 8-bit pixel bit-depth */
pPicParameter,
pSliceParameter,
encode_state, encoder_context,
- (rate_control_mode == VA_RC_CBR), qp, slice_batch);
+ (rate_control_mode == VA_RC_CBR), qp_slice, slice_batch);
if ( slice_index == 0)
intel_mfc_avc_pipeline_header_programing(ctx, encode_state, encoder_context, slice_batch);
- slice_header_length_in_bits = build_avc_slice_header(pSequenceParameter, pPicParameter, pSliceParameter, &slice_header);
-
- // slice hander
- mfc_context->insert_object(ctx, encoder_context,
- (unsigned int *)slice_header, ALIGN(slice_header_length_in_bits, 32) >> 5, slice_header_length_in_bits & 0x1f,
- 5, /* first 5 bytes are start code + nal unit type */
- 1, 0, 1, slice_batch);
-
- free(slice_header);
+ intel_avc_slice_insert_packed_data(ctx, encode_state, encoder_context, slice_index, slice_batch);
dri_bo_map(vme_context->vme_output.bo , 1);
msg_ptr = (unsigned char *)vme_context->vme_output.bo->virtual;
x = i % width_in_mbs;
y = i / width_in_mbs;
msg = (unsigned int *) (msg_ptr + i * vme_context->vme_output.size_block);
+ if (vme_context->roi_enabled) {
+ qp_mb = *(vme_context->qp_per_mb + i);
+ } else
+ qp_mb = qp;
if (is_intra) {
assert(msg);
- gen8_mfc_avc_pak_object_intra(ctx, x, y, last_mb, qp, msg, encoder_context, 0, 0, slice_batch);
+ gen8_mfc_avc_pak_object_intra(ctx, x, y, last_mb, qp_mb, msg, encoder_context, 0, 0, slice_batch);
} else {
int inter_rdo, intra_rdo;
inter_rdo = msg[AVC_INTER_RDO_OFFSET] & AVC_RDO_MASK;
intra_rdo = msg[AVC_INTRA_RDO_OFFSET] & AVC_RDO_MASK;
offset = i * vme_context->vme_output.size_block + AVC_INTER_MV_OFFSET;
if (intra_rdo < inter_rdo) {
- gen8_mfc_avc_pak_object_intra(ctx, x, y, last_mb, qp, msg, encoder_context, 0, 0, slice_batch);
+ gen8_mfc_avc_pak_object_intra(ctx, x, y, last_mb, qp_mb, msg, encoder_context, 0, 0, slice_batch);
} else {
msg += AVC_INTER_MSG_OFFSET;
- gen8_mfc_avc_pak_object_inter(ctx, x, y, last_mb, qp, msg, offset, encoder_context, 0, 0, pSliceParameter->slice_type, slice_batch);
+ gen8_mfc_avc_pak_object_inter(ctx, x, y, last_mb, qp_mb, msg, offset, encoder_context, 0, 0, pSliceParameter->slice_type, slice_batch);
}
}
}
tail_data, 1, 8,
1, 1, 1, 0, slice_batch);
}
-
-
}
static dri_bo *
struct intel_encoder_context *encoder_context)
{
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
- struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch;
dri_bo *batch_bo;
int i;
return batch_bo;
}
-#else
static void
gen8_mfc_batchbuffer_surfaces_input(VADriverContextP ctx,
struct encode_state *encode_state,
struct intel_encoder_context *encoder_context)
-
{
struct gen6_vme_context *vme_context = encoder_context->vme_context;
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
&vme_context->vme_output,
BINDING_TABLE_OFFSET(BIND_IDX_VME_OUTPUT),
SURFACE_STATE_OFFSET(BIND_IDX_VME_OUTPUT));
- assert(mfc_context->aux_batchbuffer_surface.bo);
- mfc_context->buffer_suface_setup(ctx,
- &mfc_context->gpe_context,
- &mfc_context->aux_batchbuffer_surface,
- BINDING_TABLE_OFFSET(BIND_IDX_MFC_SLICE_HEADER),
- SURFACE_STATE_OFFSET(BIND_IDX_MFC_SLICE_HEADER));
}
static void
gen8_mfc_batchbuffer_surfaces_output(VADriverContextP ctx,
struct encode_state *encode_state,
struct intel_encoder_context *encoder_context)
-
{
- struct i965_driver_data *i965 = i965_driver_data(ctx);
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
- VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
- int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
- int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
- mfc_context->mfc_batchbuffer_surface.num_blocks = width_in_mbs * height_in_mbs + encode_state->num_slice_params_ext * 8 + 1;
- mfc_context->mfc_batchbuffer_surface.size_block = 16 * CMD_LEN_IN_OWORD; /* 3 OWORDs */
- mfc_context->mfc_batchbuffer_surface.pitch = 16;
- mfc_context->mfc_batchbuffer_surface.bo = dri_bo_alloc(i965->intel.bufmgr,
- "MFC batchbuffer",
- mfc_context->mfc_batchbuffer_surface.num_blocks * mfc_context->mfc_batchbuffer_surface.size_block,
- 0x1000);
+ assert(mfc_context->aux_batchbuffer_surface.bo);
mfc_context->buffer_suface_setup(ctx,
&mfc_context->gpe_context,
- &mfc_context->mfc_batchbuffer_surface,
+ &mfc_context->aux_batchbuffer_surface,
BINDING_TABLE_OFFSET(BIND_IDX_MFC_BATCHBUFFER),
SURFACE_STATE_OFFSET(BIND_IDX_MFC_BATCHBUFFER));
}
struct intel_encoder_context *encoder_context)
{
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
- struct gen6_interface_descriptor_data *desc;
+ struct gen8_interface_descriptor_data *desc;
int i;
dri_bo *bo;
+ unsigned char *desc_ptr;
- bo = mfc_context->gpe_context.idrt.bo;
+ bo = mfc_context->gpe_context.dynamic_state.bo;
dri_bo_map(bo, 1);
assert(bo->virtual);
- desc = bo->virtual;
+ desc_ptr = (unsigned char *)bo->virtual + mfc_context->gpe_context.idrt_offset;
+
+ desc = (struct gen8_interface_descriptor_data *)desc_ptr;
for (i = 0; i < mfc_context->gpe_context.num_kernels; i++) {
struct i965_kernel *kernel;
-
kernel = &mfc_context->gpe_context.kernels[i];
assert(sizeof(*desc) == 32);
-
/*Setup the descritor table*/
memset(desc, 0, sizeof(*desc));
- desc->desc0.kernel_start_pointer = (kernel->bo->offset >> 6);
- desc->desc2.sampler_count = 0;
- desc->desc2.sampler_state_pointer = 0;
- desc->desc3.binding_table_entry_count = 2;
- desc->desc3.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
- desc->desc4.constant_urb_entry_read_offset = 0;
- desc->desc4.constant_urb_entry_read_length = 4;
+ desc->desc0.kernel_start_pointer = kernel->kernel_offset >> 6;
+ desc->desc3.sampler_count = 0;
+ desc->desc3.sampler_state_pointer = 0;
+ desc->desc4.binding_table_entry_count = 1;
+ desc->desc4.binding_table_pointer = (BINDING_TABLE_OFFSET(0) >> 5);
+ desc->desc5.constant_urb_entry_read_offset = 0;
+ desc->desc5.constant_urb_entry_read_length = 4;
+
- /*kernel start*/
- dri_bo_emit_reloc(bo,
- I915_GEM_DOMAIN_INSTRUCTION, 0,
- 0,
- i * sizeof(*desc) + offsetof(struct gen6_interface_descriptor_data, desc0),
- kernel->bo);
desc++;
}
dri_bo_unmap(bo);
+
+ return;
}
static void
(void)mfc_context;
}
+#define AVC_PAK_LEN_IN_BYTE 48
+#define AVC_PAK_LEN_IN_OWORD 3
+
static void
gen8_mfc_batchbuffer_emit_object_command(struct intel_batchbuffer *batch,
- int index,
- int head_offset,
- int batchbuffer_offset,
- int head_size,
- int tail_size,
- int number_mb_cmds,
- int first_object,
- int last_object,
- int last_slice,
- int mb_x,
- int mb_y,
- int width_in_mbs,
- int qp)
-{
- BEGIN_BATCH(batch, 12);
+ uint32_t intra_flag,
+ int head_offset,
+ int number_mb_cmds,
+ int slice_end_x,
+ int slice_end_y,
+ int mb_x,
+ int mb_y,
+ int width_in_mbs,
+ int qp,
+ uint32_t fwd_ref,
+ uint32_t bwd_ref)
+{
+ uint32_t temp_value;
+ BEGIN_BATCH(batch, 14);
- OUT_BATCH(batch, CMD_MEDIA_OBJECT | (12 - 2));
- OUT_BATCH(batch, index);
+ OUT_BATCH(batch, CMD_MEDIA_OBJECT | (14 - 2));
+ OUT_BATCH(batch, 0);
OUT_BATCH(batch, 0);
OUT_BATCH(batch, 0);
OUT_BATCH(batch, 0);
OUT_BATCH(batch, 0);
/*inline data */
- OUT_BATCH(batch, head_offset);
- OUT_BATCH(batch, batchbuffer_offset);
- OUT_BATCH(batch,
- head_size << 16 |
- tail_size);
- OUT_BATCH(batch,
- number_mb_cmds << 16 |
- first_object << 2 |
- last_object << 1 |
- last_slice);
- OUT_BATCH(batch,
- mb_y << 8 |
- mb_x);
+ OUT_BATCH(batch, head_offset / 16);
+ OUT_BATCH(batch, (intra_flag) | (qp << 16));
+ temp_value = (mb_x | (mb_y << 8) | (width_in_mbs << 16));
+ OUT_BATCH(batch, temp_value);
+
+ OUT_BATCH(batch, number_mb_cmds);
+
OUT_BATCH(batch,
- qp << 16 |
- width_in_mbs);
+ ((slice_end_y << 8) | (slice_end_x)));
+ OUT_BATCH(batch, fwd_ref);
+ OUT_BATCH(batch, bwd_ref);
+
+ OUT_BATCH(batch, MI_NOOP);
ADVANCE_BATCH(batch);
}
static void
gen8_mfc_avc_batchbuffer_slice_command(VADriverContextP ctx,
- struct intel_encoder_context *encoder_context,
- VAEncSliceParameterBufferH264 *slice_param,
- int head_offset,
- unsigned short head_size,
- unsigned short tail_size,
- int batchbuffer_offset,
- int qp,
- int last_slice)
+ struct intel_encoder_context *encoder_context,
+ VAEncSliceParameterBufferH264 *slice_param,
+ int head_offset,
+ int qp,
+ int last_slice)
{
struct intel_batchbuffer *batch = encoder_context->base.batch;
+ struct gen6_vme_context *vme_context = encoder_context->vme_context;
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
int total_mbs = slice_param->num_macroblocks;
+ int slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
int number_mb_cmds = 128;
- int starting_mb = 0;
- int last_object = 0;
- int first_object = 1;
- int i;
+ int starting_offset = 0;
int mb_x, mb_y;
- int index = (slice_param->slice_type == SLICE_TYPE_I) ? MFC_BATCHBUFFER_AVC_INTRA : MFC_BATCHBUFFER_AVC_INTER;
+ int last_mb, slice_end_x, slice_end_y;
+ int remaining_mb = total_mbs;
+ uint32_t fwd_ref , bwd_ref, mb_flag;
+ char tmp_qp;
+ int number_roi_mbs, max_mb_cmds, i;
- for (i = 0; i < total_mbs / number_mb_cmds; i++) {
- last_object = (total_mbs - starting_mb) == number_mb_cmds;
- mb_x = (slice_param->macroblock_address + starting_mb) % width_in_mbs;
- mb_y = (slice_param->macroblock_address + starting_mb) / width_in_mbs;
- assert(mb_x <= 255 && mb_y <= 255);
+ last_mb = slice_param->macroblock_address + total_mbs - 1;
+ slice_end_x = last_mb % width_in_mbs;
+ slice_end_y = last_mb / width_in_mbs;
- starting_mb += number_mb_cmds;
+ if (slice_type == SLICE_TYPE_I) {
+ fwd_ref = 0;
+ bwd_ref = 0;
+ mb_flag = 1;
+ } else {
+ fwd_ref = vme_context->ref_index_in_mb[0];
+ bwd_ref = vme_context->ref_index_in_mb[1];
+ mb_flag = 0;
+ }
- gen8_mfc_batchbuffer_emit_object_command(batch,
- index,
- head_offset,
- batchbuffer_offset,
- head_size,
- tail_size,
- number_mb_cmds,
- first_object,
- last_object,
- last_slice,
- mb_x,
- mb_y,
- width_in_mbs,
- qp);
-
- if (first_object) {
- head_offset += head_size;
- batchbuffer_offset += head_size;
- }
+ if (width_in_mbs >= 100) {
+ number_mb_cmds = width_in_mbs / 5;
+ } else if (width_in_mbs >= 80) {
+ number_mb_cmds = width_in_mbs / 4;
+ } else if (width_in_mbs >= 60) {
+ number_mb_cmds = width_in_mbs / 3;
+ } else if (width_in_mbs >= 40) {
+ number_mb_cmds = width_in_mbs / 2;
+ } else {
+ number_mb_cmds = width_in_mbs;
+ }
- if (last_object) {
- head_offset += tail_size;
- batchbuffer_offset += tail_size;
- }
+ max_mb_cmds = number_mb_cmds;
- batchbuffer_offset += number_mb_cmds * CMD_LEN_IN_OWORD;
+ do {
+ mb_x = (slice_param->macroblock_address + starting_offset) % width_in_mbs;
+ mb_y = (slice_param->macroblock_address + starting_offset) / width_in_mbs;
- first_object = 0;
- }
+ number_mb_cmds = max_mb_cmds;
+ if (vme_context->roi_enabled) {
+
+ number_roi_mbs = 1;
+ tmp_qp = *(vme_context->qp_per_mb + starting_offset);
+ for (i = 1; i < max_mb_cmds; i++) {
+ if (tmp_qp != *(vme_context->qp_per_mb + starting_offset + i))
+ break;
+
+ number_roi_mbs++;
+ }
- if (!last_object) {
- last_object = 1;
- number_mb_cmds = total_mbs % number_mb_cmds;
- mb_x = (slice_param->macroblock_address + starting_mb) % width_in_mbs;
- mb_y = (slice_param->macroblock_address + starting_mb) / width_in_mbs;
- assert(mb_x <= 255 && mb_y <= 255);
- starting_mb += number_mb_cmds;
+ number_mb_cmds = number_roi_mbs;
+ qp = tmp_qp;
+ }
+
+ if (number_mb_cmds >= remaining_mb) {
+ number_mb_cmds = remaining_mb;
+ }
gen8_mfc_batchbuffer_emit_object_command(batch,
- index,
- head_offset,
- batchbuffer_offset,
- head_size,
- tail_size,
- number_mb_cmds,
- first_object,
- last_object,
- last_slice,
- mb_x,
- mb_y,
- width_in_mbs,
- qp);
- }
-}
-
-/*
- * return size in Owords (16bytes)
- */
-static int
+ mb_flag,
+ head_offset,
+ number_mb_cmds,
+ slice_end_x,
+ slice_end_y,
+ mb_x,
+ mb_y,
+ width_in_mbs,
+ qp,
+ fwd_ref,
+ bwd_ref);
+
+ head_offset += (number_mb_cmds * AVC_PAK_LEN_IN_BYTE);
+ remaining_mb -= number_mb_cmds;
+ starting_offset += number_mb_cmds;
+ } while (remaining_mb > 0);
+}
+
+static void
gen8_mfc_avc_batchbuffer_slice(VADriverContextP ctx,
- struct encode_state *encode_state,
- struct intel_encoder_context *encoder_context,
- int slice_index,
- int batchbuffer_offset)
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context,
+ int slice_index)
{
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
struct intel_batchbuffer *slice_batch = mfc_context->aux_batchbuffer;
int last_slice = (pSliceParameter->macroblock_address + pSliceParameter->num_macroblocks) == (width_in_mbs * height_in_mbs);
int qp = pPicParameter->pic_init_qp + pSliceParameter->slice_qp_delta;
unsigned int rate_control_mode = encoder_context->rate_control_mode;
- unsigned char *slice_header = NULL;
- int slice_header_length_in_bits = 0;
unsigned int tail_data[] = { 0x0, 0x0 };
long head_offset;
- int old_used = intel_batchbuffer_used_size(slice_batch), used;
- unsigned short head_size, tail_size;
int slice_type = intel_avc_enc_slice_type_fixup(pSliceParameter->slice_type);
+ int qp_slice;
+ qp_slice = qp;
if (rate_control_mode == VA_RC_CBR) {
- qp = mfc_context->bit_rate_control_context[slice_type].QpPrimeY;
- pSliceParameter->slice_qp_delta = qp - pPicParameter->pic_init_qp;
+ qp = mfc_context->brc.qp_prime_y[0][slice_type];
+ if (encode_state->slice_header_index[slice_index] == 0) {
+ pSliceParameter->slice_qp_delta = qp - pPicParameter->pic_init_qp;
+ qp_slice = qp;
+ }
}
/* only support for 8-bit pixel bit-depth */
assert(pPicParameter->pic_init_qp >= 0 && pPicParameter->pic_init_qp < 52);
assert(qp >= 0 && qp < 52);
- head_offset = old_used / 16;
gen8_mfc_avc_slice_state(ctx,
- pPicParameter,
- pSliceParameter,
- encode_state,
- encoder_context,
- (rate_control_mode == VA_RC_CBR),
- qp,
- slice_batch);
+ pPicParameter,
+ pSliceParameter,
+ encode_state,
+ encoder_context,
+ (rate_control_mode == VA_RC_CBR),
+ qp_slice,
+ slice_batch);
if (slice_index == 0)
intel_mfc_avc_pipeline_header_programing(ctx, encode_state, encoder_context, slice_batch);
- slice_header_length_in_bits = build_avc_slice_header(pSequenceParameter, pPicParameter, pSliceParameter, &slice_header);
+ intel_avc_slice_insert_packed_data(ctx, encode_state, encoder_context, slice_index, slice_batch);
- // slice hander
- mfc_context->insert_object(ctx,
- encoder_context,
- (unsigned int *)slice_header,
- ALIGN(slice_header_length_in_bits, 32) >> 5,
- slice_header_length_in_bits & 0x1f,
- 5, /* first 5 bytes are start code + nal unit type */
- 1,
- 0,
- 1,
- slice_batch);
+ intel_batchbuffer_align(slice_batch, 16); /* aligned by an Oword */
+ head_offset = intel_batchbuffer_used_size(slice_batch);
- free(slice_header);
+ slice_batch->ptr += pSliceParameter->num_macroblocks * AVC_PAK_LEN_IN_BYTE;
+
+ gen8_mfc_avc_batchbuffer_slice_command(ctx,
+ encoder_context,
+ pSliceParameter,
+ head_offset,
+ qp,
+ last_slice);
- intel_batchbuffer_align(slice_batch, 16); /* aligned by an Oword */
- used = intel_batchbuffer_used_size(slice_batch);
- head_size = (used - old_used) / 16;
- old_used = used;
- /* tail */
+ /* Aligned for tail */
+ intel_batchbuffer_align(slice_batch, 16); /* aligned by an Oword */
if (last_slice) {
mfc_context->insert_object(ctx,
encoder_context,
slice_batch);
}
- intel_batchbuffer_align(slice_batch, 16); /* aligned by an Oword */
- used = intel_batchbuffer_used_size(slice_batch);
- tail_size = (used - old_used) / 16;
-
-
- gen8_mfc_avc_batchbuffer_slice_command(ctx,
- encoder_context,
- pSliceParameter,
- head_offset,
- head_size,
- tail_size,
- batchbuffer_offset,
- qp,
- last_slice);
-
- return head_size + tail_size + pSliceParameter->num_macroblocks * CMD_LEN_IN_OWORD;
+ return;
}
static void
struct encode_state *encode_state,
struct intel_encoder_context *encoder_context)
{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
struct intel_batchbuffer *batch = encoder_context->base.batch;
- int i, size, offset = 0;
- intel_batchbuffer_start_atomic(batch, 0x4000);
- gen6_gpe_pipeline_setup(ctx, &mfc_context->gpe_context, batch);
+ int i;
+
+ intel_batchbuffer_start_atomic(batch, 0x4000);
+
+ if (IS_GEN9(i965->intel.device_info))
+ gen9_gpe_pipeline_setup(ctx, &mfc_context->gpe_context, batch);
+ else
+ gen8_gpe_pipeline_setup(ctx, &mfc_context->gpe_context, batch);
for ( i = 0; i < encode_state->num_slice_params_ext; i++) {
- size = gen8_mfc_avc_batchbuffer_slice(ctx, encode_state, encoder_context, i, offset);
- offset += size;
+ gen8_mfc_avc_batchbuffer_slice(ctx, encode_state, encoder_context, i);
+ }
+ {
+ struct intel_batchbuffer *slice_batch = mfc_context->aux_batchbuffer;
+
+ intel_batchbuffer_align(slice_batch, 8);
+ BEGIN_BCS_BATCH(slice_batch, 2);
+ OUT_BCS_BATCH(slice_batch, 0);
+ OUT_BCS_BATCH(slice_batch, MI_BATCH_BUFFER_END);
+ ADVANCE_BCS_BATCH(slice_batch);
+
+ BEGIN_BATCH(batch, 2);
+ OUT_BATCH(batch, CMD_MEDIA_STATE_FLUSH);
+ OUT_BATCH(batch, 0);
+ ADVANCE_BATCH(batch);
}
intel_batchbuffer_end_atomic(batch);
intel_batchbuffer_flush(batch);
+
+ if (IS_GEN9(i965->intel.device_info))
+ gen9_gpe_pipeline_end(ctx, &mfc_context->gpe_context, batch);
}
static void
{
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ dri_bo_reference(mfc_context->aux_batchbuffer_surface.bo);
gen8_mfc_build_avc_batchbuffer(ctx, encode_state, encoder_context);
- dri_bo_reference(mfc_context->mfc_batchbuffer_surface.bo);
- return mfc_context->mfc_batchbuffer_surface.bo;
+ return mfc_context->aux_batchbuffer_surface.bo;
}
-#endif
-
static void
gen8_mfc_avc_pipeline_programing(VADriverContextP ctx,
struct encode_state *encode_state,
return;
}
-#ifdef MFC_SOFTWARE_HASWELL
- slice_batch_bo = gen8_mfc_avc_software_batchbuffer(ctx, encode_state, encoder_context);
-#else
- slice_batch_bo = gen8_mfc_avc_hardware_batchbuffer(ctx, encode_state, encoder_context);
-#endif
+ if (encoder_context->soft_batch_force)
+ slice_batch_bo = gen8_mfc_avc_software_batchbuffer(ctx, encode_state, encoder_context);
+ else
+ slice_batch_bo = gen8_mfc_avc_hardware_batchbuffer(ctx, encode_state, encoder_context);
+
// begin programing
intel_batchbuffer_start_atomic_bcs(batch, 0x4000);
gen8_mfc_run(ctx, encode_state, encoder_context);
if (rate_control_mode == VA_RC_CBR /*|| rate_control_mode == VA_RC_VBR*/) {
gen8_mfc_stop(ctx, encode_state, encoder_context, ¤t_frame_bits_size);
- sts = intel_mfc_brc_postpack(encode_state, mfc_context, current_frame_bits_size);
+ sts = intel_mfc_brc_postpack(encode_state, encoder_context, current_frame_bits_size);
if (sts == BRC_NO_HRD_VIOLATION) {
intel_mfc_hrd_context_update(encode_state, mfc_context);
break;
struct intel_encoder_context *encoder_context)
{
struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
- struct i965_driver_data *i965 = i965_driver_data(ctx);
struct intel_batchbuffer *batch;
VAEncSliceParameterBufferMPEG2 *next_slice_group_param = NULL;
dri_bo *batch_bo;
return VA_STATUS_SUCCESS;
}
-static void
-gen8_mfc_context_destroy(void *context)
+/* JPEG encode methods */
+
+static VAStatus
+intel_mfc_jpeg_prepare(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
{
- struct gen6_mfc_context *mfc_context = context;
- int i;
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ struct object_surface *obj_surface;
+ struct object_buffer *obj_buffer;
+ struct i965_coded_buffer_segment *coded_buffer_segment;
+ VAStatus vaStatus = VA_STATUS_SUCCESS;
+ dri_bo *bo;
+
+ /* input YUV surface */
+ obj_surface = encode_state->input_yuv_object;
+ mfc_context->uncompressed_picture_source.bo = obj_surface->bo;
+ dri_bo_reference(mfc_context->uncompressed_picture_source.bo);
- dri_bo_unreference(mfc_context->post_deblocking_output.bo);
- mfc_context->post_deblocking_output.bo = NULL;
+ /* coded buffer */
+ obj_buffer = encode_state->coded_buf_object;
+ bo = obj_buffer->buffer_store->bo;
+ mfc_context->mfc_indirect_pak_bse_object.bo = bo;
+ mfc_context->mfc_indirect_pak_bse_object.offset = I965_CODEDBUFFER_HEADER_SIZE;
+ mfc_context->mfc_indirect_pak_bse_object.end_offset = ALIGN(obj_buffer->size_element - 0x1000, 0x1000);
+ dri_bo_reference(mfc_context->mfc_indirect_pak_bse_object.bo);
- dri_bo_unreference(mfc_context->pre_deblocking_output.bo);
- mfc_context->pre_deblocking_output.bo = NULL;
+ /* set the internal flag to 0 to indicate the coded size is unknown */
+ dri_bo_map(bo, 1);
+ coded_buffer_segment = (struct i965_coded_buffer_segment *)bo->virtual;
+ coded_buffer_segment->mapped = 0;
+ coded_buffer_segment->codec = encoder_context->codec;
+ dri_bo_unmap(bo);
- dri_bo_unreference(mfc_context->uncompressed_picture_source.bo);
- mfc_context->uncompressed_picture_source.bo = NULL;
+ return vaStatus;
+}
- dri_bo_unreference(mfc_context->mfc_indirect_pak_bse_object.bo);
- mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
- for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
- dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
- mfc_context->direct_mv_buffers[i].bo = NULL;
+static void
+gen8_mfc_jpeg_set_surface_state(VADriverContextP ctx,
+ struct intel_encoder_context *encoder_context,
+ struct encode_state *encode_state)
+{
+ struct intel_batchbuffer *batch = encoder_context->base.batch;
+ struct object_surface *obj_surface = encode_state->input_yuv_object;
+ unsigned int input_fourcc;
+ unsigned int y_cb_offset;
+ unsigned int y_cr_offset;
+ unsigned int surface_format;
+
+ assert(obj_surface);
+
+ y_cb_offset = obj_surface->y_cb_offset;
+ y_cr_offset = obj_surface->y_cr_offset;
+ input_fourcc = obj_surface->fourcc;
+
+ surface_format = (obj_surface->fourcc == VA_FOURCC_Y800) ?
+ MFX_SURFACE_MONOCHROME : MFX_SURFACE_PLANAR_420_8;
+
+
+ switch (input_fourcc) {
+ case VA_FOURCC_Y800: {
+ surface_format = MFX_SURFACE_MONOCHROME;
+ break;
+ }
+ case VA_FOURCC_NV12: {
+ surface_format = MFX_SURFACE_PLANAR_420_8;
+ break;
+ }
+ case VA_FOURCC_UYVY: {
+ surface_format = MFX_SURFACE_YCRCB_SWAPY;
+ break;
+ }
+ case VA_FOURCC_YUY2: {
+ surface_format = MFX_SURFACE_YCRCB_NORMAL;
+ break;
+ }
+ case VA_FOURCC_RGBA:
+ case VA_FOURCC_444P: {
+ surface_format = MFX_SURFACE_R8G8B8A8_UNORM;
+ break;
+ }
}
- dri_bo_unreference(mfc_context->intra_row_store_scratch_buffer.bo);
- mfc_context->intra_row_store_scratch_buffer.bo = NULL;
+ BEGIN_BCS_BATCH(batch, 6);
- dri_bo_unreference(mfc_context->macroblock_status_buffer.bo);
- mfc_context->macroblock_status_buffer.bo = NULL;
+ OUT_BCS_BATCH(batch, MFX_SURFACE_STATE | (6 - 2));
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch,
+ ((obj_surface->orig_height - 1) << 18) |
+ ((obj_surface->orig_width - 1) << 4));
+ OUT_BCS_BATCH(batch,
+ (surface_format << 28) | /* Surface Format */
+ (0 << 27) | /* must be 1 for interleave U/V, hardware requirement for AVC/VC1/MPEG and 0 for JPEG */
+ (0 << 22) | /* surface object control state, FIXME??? */
+ ((obj_surface->width - 1) << 3) | /* pitch */
+ (0 << 2) | /* must be 0 for interleave U/V */
+ (1 << 1) | /* must be tiled */
+ (I965_TILEWALK_YMAJOR << 0)); /* tile walk, TILEWALK_YMAJOR */
+ OUT_BCS_BATCH(batch,
+ (0 << 16) | /* X offset for U(Cb), must be 0 */
+ (y_cb_offset << 0)); /* Y offset for U(Cb) */
+ OUT_BCS_BATCH(batch,
+ (0 << 16) | /* X offset for V(Cr), must be 0 */
+ (y_cr_offset << 0)); /* Y offset for V(Cr), must be 0 for video codec, non-zoeo for JPEG */
+
- dri_bo_unreference(mfc_context->deblocking_filter_row_store_scratch_buffer.bo);
- mfc_context->deblocking_filter_row_store_scratch_buffer.bo = NULL;
+ ADVANCE_BCS_BATCH(batch);
+}
- dri_bo_unreference(mfc_context->bsd_mpc_row_store_scratch_buffer.bo);
- mfc_context->bsd_mpc_row_store_scratch_buffer.bo = NULL;
+static void
+gen8_mfc_jpeg_pic_state(VADriverContextP ctx,
+ struct intel_encoder_context *encoder_context,
+ struct encode_state *encode_state)
+{
+ struct intel_batchbuffer *batch = encoder_context->base.batch;
+ struct object_surface *obj_surface = encode_state->input_yuv_object;
+ VAEncPictureParameterBufferJPEG *pic_param;
+ unsigned int surface_format;
+ unsigned int frame_width_in_blks;
+ unsigned int frame_height_in_blks;
+ unsigned int pixels_in_horizontal_lastMCU;
+ unsigned int pixels_in_vertical_lastMCU;
+ unsigned int input_surface_format;
+ unsigned int output_mcu_format;
+ unsigned int picture_width;
+ unsigned int picture_height;
+ assert(encode_state->pic_param_ext && encode_state->pic_param_ext->buffer);
+ assert(obj_surface);
+ pic_param = (VAEncPictureParameterBufferJPEG *)encode_state->pic_param_ext->buffer;
+ surface_format = obj_surface->fourcc;
+ picture_width = pic_param->picture_width;
+ picture_height = pic_param->picture_height;
+
+ switch (surface_format) {
+ case VA_FOURCC_Y800: {
+ input_surface_format = JPEG_ENC_SURFACE_Y8;
+ output_mcu_format = JPEG_ENC_MCU_YUV400;
+ break;
+ }
+ case VA_FOURCC_NV12: {
+ input_surface_format = JPEG_ENC_SURFACE_NV12;
+ output_mcu_format = JPEG_ENC_MCU_YUV420;
+ break;
+ }
+ case VA_FOURCC_UYVY: {
+ input_surface_format = JPEG_ENC_SURFACE_UYVY;
+ output_mcu_format = JPEG_ENC_MCU_YUV422H_2Y;
+ break;
+ }
+ case VA_FOURCC_YUY2: {
+ input_surface_format = JPEG_ENC_SURFACE_YUY2;
+ output_mcu_format = JPEG_ENC_MCU_YUV422H_2Y;
+ break;
+ }
- for (i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++){
- dri_bo_unreference(mfc_context->reference_surfaces[i].bo);
- mfc_context->reference_surfaces[i].bo = NULL;
+ case VA_FOURCC_RGBA:
+ case VA_FOURCC_444P: {
+ input_surface_format = JPEG_ENC_SURFACE_RGB;
+ output_mcu_format = JPEG_ENC_MCU_RGB;
+ break;
+ }
+ default : {
+ input_surface_format = JPEG_ENC_SURFACE_NV12;
+ output_mcu_format = JPEG_ENC_MCU_YUV420;
+ break;
+ }
}
- i965_gpe_context_destroy(&mfc_context->gpe_context);
-
- dri_bo_unreference(mfc_context->mfc_batchbuffer_surface.bo);
- mfc_context->mfc_batchbuffer_surface.bo = NULL;
-
- dri_bo_unreference(mfc_context->aux_batchbuffer_surface.bo);
- mfc_context->aux_batchbuffer_surface.bo = NULL;
+
+ switch (output_mcu_format) {
+
+ case JPEG_ENC_MCU_YUV400:
+ case JPEG_ENC_MCU_RGB: {
+ pixels_in_horizontal_lastMCU = (picture_width % 8);
+ pixels_in_vertical_lastMCU = (picture_height % 8);
+
+ //H1=1,V1=1 for YUV400 and YUV444. So, compute these values accordingly
+ frame_width_in_blks = ((picture_width + 7) / 8);
+ frame_height_in_blks = ((picture_height + 7) / 8);
+ break;
+ }
+
+ case JPEG_ENC_MCU_YUV420: {
+ if((picture_width % 2) == 0)
+ pixels_in_horizontal_lastMCU = picture_width % 16;
+ else
+ pixels_in_horizontal_lastMCU = ((picture_width % 16) + 1) % 16;
+
+ if((picture_height % 2) == 0)
+ pixels_in_vertical_lastMCU = picture_height % 16;
+ else
+ pixels_in_vertical_lastMCU = ((picture_height % 16) + 1) % 16;
+
+ //H1=2,V1=2 for YUV420. So, compute these values accordingly
+ frame_width_in_blks = ((picture_width + 15) / 16) * 2;
+ frame_height_in_blks = ((picture_height + 15) / 16) * 2;
+ break;
+ }
+
+ case JPEG_ENC_MCU_YUV422H_2Y: {
+ if(picture_width % 2 == 0)
+ pixels_in_horizontal_lastMCU = picture_width % 16;
+ else
+ pixels_in_horizontal_lastMCU = ((picture_width % 16) + 1) % 16;
+
+ pixels_in_vertical_lastMCU = picture_height % 8;
+
+ //H1=2,V1=1 for YUV422H_2Y. So, compute these values accordingly
+ frame_width_in_blks = ((picture_width + 15) / 16) * 2;
+ frame_height_in_blks = ((picture_height + 7) / 8);
+ break;
+ }
+ } //end of switch
+
+ BEGIN_BCS_BATCH(batch, 3);
+ /* DWORD 0 */
+ OUT_BCS_BATCH(batch, MFX_JPEG_PIC_STATE | (3 - 2));
+ /* DWORD 1 */
+ OUT_BCS_BATCH(batch,
+ ( pixels_in_horizontal_lastMCU << 26) | /* Pixels In Horizontal Last MCU */
+ ( pixels_in_vertical_lastMCU << 21) | /* Pixels In Vertical Last MCU */
+ ( input_surface_format << 8) | /* Input Surface format */
+ ( output_mcu_format << 0)); /* Output MCU Structure */
+ /* DWORD 2 */
+ OUT_BCS_BATCH(batch,
+ ((frame_height_in_blks - 1) << 16) | /* Frame Height In Blks Minus 1 */
+ (JPEG_ENC_ROUND_QUANT_DEFAULT << 13) | /* Rounding Quant set to default value 0 */
+ ((frame_width_in_blks - 1) << 0)); /* Frame Width In Blks Minus 1 */
+ ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+get_reciprocal_dword_qm(unsigned char *raster_qm, uint32_t *dword_qm)
+{
+ int i = 0, j = 0;
+ short reciprocal_qm[64];
+
+ for(i=0; i<64; i++) {
+ reciprocal_qm[i] = 65535/(raster_qm[i]);
+ }
+
+ for(i=0; i<64; i++) {
+ dword_qm[j] = ((reciprocal_qm[i+1] <<16) | (reciprocal_qm[i]));
+ j++;
+ i++;
+ }
+
+}
+
+
+static void
+gen8_mfc_jpeg_fqm_state(VADriverContextP ctx,
+ struct intel_encoder_context *encoder_context,
+ struct encode_state *encode_state)
+{
+ unsigned int quality = 0;
+ uint32_t temp, i = 0, j = 0, dword_qm[32];
+ VAEncPictureParameterBufferJPEG *pic_param;
+ VAQMatrixBufferJPEG *qmatrix;
+ unsigned char raster_qm[64], column_raster_qm[64];
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+ assert(encode_state->pic_param_ext && encode_state->pic_param_ext->buffer);
+ pic_param = (VAEncPictureParameterBufferJPEG *)encode_state->pic_param_ext->buffer;
+ quality = pic_param->quality;
+
+ //If the app sends the qmatrix, use it, buffer it for using it with the next frames
+ //The app can send qmatrix for the first frame and not send for the subsequent frames
+ if(encode_state->q_matrix && encode_state->q_matrix->buffer) {
+ qmatrix = (VAQMatrixBufferJPEG *)encode_state->q_matrix->buffer;
+
+ mfc_context->buffered_qmatrix.load_lum_quantiser_matrix = 1;
+ memcpy(mfc_context->buffered_qmatrix.lum_quantiser_matrix, qmatrix->lum_quantiser_matrix, 64 * (sizeof(unsigned char)));
+
+ if(pic_param->num_components > 1) {
+ mfc_context->buffered_qmatrix.load_chroma_quantiser_matrix = 1;
+ memcpy(mfc_context->buffered_qmatrix.chroma_quantiser_matrix, qmatrix->chroma_quantiser_matrix, 64 * (sizeof(unsigned char)));
+ } else {
+ mfc_context->buffered_qmatrix.load_chroma_quantiser_matrix = 0;
+ }
+
+ } else {
+ //If the app doesnt send the qmatrix, use the buffered/default qmatrix
+ qmatrix = &mfc_context->buffered_qmatrix;
+ qmatrix->load_lum_quantiser_matrix = 1;
+ qmatrix->load_chroma_quantiser_matrix = (pic_param->num_components > 1) ? 1 : 0;
+ }
+
+
+ //As per the design, normalization of the quality factor and scaling of the Quantization tables
+ //based on the quality factor needs to be done in the driver before sending the values to the HW.
+ //But note, the driver expects the scaled quantization tables (as per below logic) to be sent as
+ //packed header information. The packed header is written as the header of the jpeg file. This
+ //header information is used to decode the jpeg file. So, it is the app's responsibility to send
+ //the correct header information (See build_packed_jpeg_header_buffer() in jpegenc.c in LibVa on
+ //how to do this). QTables can be different for different applications. If no tables are provided,
+ //the default tables in the driver are used.
+
+ //Normalization of the quality factor
+ if (quality > 100) quality=100;
+ if (quality == 0) quality=1;
+ quality = (quality < 50) ? (5000/quality) : (200 - (quality*2));
+
+ //Step 1. Apply Quality factor and clip to range [1, 255] for luma and chroma Quantization matrices
+ //Step 2. HW expects the 1/Q[i] values in the qm sent, so get reciprocals
+ //Step 3. HW also expects 32 dwords, hence combine 2 (1/Q) values into 1 dword
+ //Step 4. Send the Quantization matrix to the HW, use gen8_mfc_fqm_state
+
+ //For luma (Y or R)
+ if(qmatrix->load_lum_quantiser_matrix) {
+ //apply quality to lum_quantiser_matrix
+ for(i=0; i < 64; i++) {
+ temp = (qmatrix->lum_quantiser_matrix[i] * quality)/100;
+ //clamp to range [1,255]
+ temp = (temp > 255) ? 255 : temp;
+ temp = (temp < 1) ? 1 : temp;
+ qmatrix->lum_quantiser_matrix[i] = (unsigned char)temp;
+ }
+
+ //For VAAPI, the VAQMatrixBuffer needs to be in zigzag order.
+ //The App should send it in zigzag. Now, the driver has to extract the raster from it.
+ for (j = 0; j < 64; j++)
+ raster_qm[zigzag_direct[j]] = qmatrix->lum_quantiser_matrix[j];
+
+ //Convert the raster order(row-ordered) to the column-raster (column by column).
+ //To be consistent with the other encoders, send it in column order.
+ //Need to double check if our HW expects col or row raster.
+ for (j = 0; j < 64; j++) {
+ int row = j / 8, col = j % 8;
+ column_raster_qm[col * 8 + row] = raster_qm[j];
+ }
+
+ //Convert to raster QM to reciprocal. HW expects values in reciprocal.
+ get_reciprocal_dword_qm(column_raster_qm, dword_qm);
+
+ //send the luma qm to the command buffer
+ gen8_mfc_fqm_state(ctx, MFX_QM_JPEG_LUMA_Y_QUANTIZER_MATRIX, dword_qm, 32, encoder_context);
+ }
+
+ //For Chroma, if chroma exists (Cb, Cr or G, B)
+ if(qmatrix->load_chroma_quantiser_matrix) {
+ //apply quality to chroma_quantiser_matrix
+ for(i=0; i < 64; i++) {
+ temp = (qmatrix->chroma_quantiser_matrix[i] * quality)/100;
+ //clamp to range [1,255]
+ temp = (temp > 255) ? 255 : temp;
+ temp = (temp < 1) ? 1 : temp;
+ qmatrix->chroma_quantiser_matrix[i] = (unsigned char)temp;
+ }
+
+ //For VAAPI, the VAQMatrixBuffer needs to be in zigzag order.
+ //The App should send it in zigzag. Now, the driver has to extract the raster from it.
+ for (j = 0; j < 64; j++)
+ raster_qm[zigzag_direct[j]] = qmatrix->chroma_quantiser_matrix[j];
+
+ //Convert the raster order(row-ordered) to the column-raster (column by column).
+ //To be consistent with the other encoders, send it in column order.
+ //Need to double check if our HW expects col or row raster.
+ for (j = 0; j < 64; j++) {
+ int row = j / 8, col = j % 8;
+ column_raster_qm[col * 8 + row] = raster_qm[j];
+ }
+
+
+ //Convert to raster QM to reciprocal. HW expects values in reciprocal.
+ get_reciprocal_dword_qm(column_raster_qm, dword_qm);
+
+ //send the same chroma qm to the command buffer (for both U,V or G,B)
+ gen8_mfc_fqm_state(ctx, MFX_QM_JPEG_CHROMA_CB_QUANTIZER_MATRIX, dword_qm, 32, encoder_context);
+ gen8_mfc_fqm_state(ctx, MFX_QM_JPEG_CHROMA_CR_QUANTIZER_MATRIX, dword_qm, 32, encoder_context);
+ }
+}
+
+
+//Translation of Table K.5 into code: This method takes the huffval from the
+//Huffmantable buffer and converts into index for the coefficients and size tables
+uint8_t map_huffval_to_index(uint8_t huff_val)
+{
+ uint8_t index = 0;
+
+ if(huff_val < 0xF0) {
+ index = (((huff_val >> 4) & 0x0F) * 0xA) + (huff_val & 0x0F);
+ } else {
+ index = 1 + (((huff_val >> 4) & 0x0F) * 0xA) + (huff_val & 0x0F);
+ }
+
+ return index;
+}
+
+
+//Implementation of Flow chart Annex C - Figure C.1
+static void
+generate_huffman_codesizes_table(uint8_t *bits, uint8_t *huff_size_table, uint8_t *lastK)
+{
+ uint8_t i=1, j=1, k=0;
+
+ while(i <= 16) {
+ while(j <= (uint8_t)bits[i-1]) {
+ huff_size_table[k] = i;
+ k = k+1;
+ j = j+1;
+ }
+
+ i = i+1;
+ j = 1;
+ }
+ huff_size_table[k] = 0;
+ (*lastK) = k;
+}
+
+//Implementation of Flow chart Annex C - Figure C.2
+static void
+generate_huffman_codes_table(uint8_t *huff_size_table, uint16_t *huff_code_table)
+{
+ uint8_t k=0;
+ uint16_t code=0;
+ uint8_t si=huff_size_table[k];
+
+ while(huff_size_table[k] != 0) {
+
+ while(huff_size_table[k] == si) {
+
+ // An huffman code can never be 0xFFFF. Replace it with 0 if 0xFFFF
+ if(code == 0xFFFF) {
+ code = 0x0000;
+ }
+
+ huff_code_table[k] = code;
+ code = code+1;
+ k = k+1;
+ }
+
+ code <<= 1;
+ si = si+1;
+ }
+
+}
+
+//Implementation of Flow chat Annex C - Figure C.3
+static void
+generate_ordered_codes_table(uint8_t *huff_vals, uint8_t *huff_size_table, uint16_t *huff_code_table, uint8_t type, uint8_t lastK)
+{
+ uint8_t huff_val_size=0, i=0, k=0;
+
+ huff_val_size = (type == 0) ? 12 : 162;
+ uint8_t huff_si_table[huff_val_size];
+ uint16_t huff_co_table[huff_val_size];
+
+ memset(huff_si_table, 0, sizeof(huff_si_table));
+ memset(huff_co_table, 0, sizeof(huff_co_table));
+
+ do {
+ i = map_huffval_to_index(huff_vals[k]);
+ huff_co_table[i] = huff_code_table[k];
+ huff_si_table[i] = huff_size_table[k];
+ k++;
+ } while(k < lastK);
+
+ memcpy(huff_size_table, huff_si_table, sizeof(uint8_t)*huff_val_size);
+ memcpy(huff_code_table, huff_co_table, sizeof(uint16_t)*huff_val_size);
+}
+
+
+//This method converts the huffman table to code words which is needed by the HW
+//Flowcharts from Jpeg Spec Annex C - Figure C.1, Figure C.2, Figure C.3 are used here
+static void
+convert_hufftable_to_codes(VAHuffmanTableBufferJPEGBaseline *huff_buffer, uint32_t *table, uint8_t type, uint8_t index)
+{
+ uint8_t lastK = 0, i=0;
+ uint8_t huff_val_size = 0;
+ uint8_t *huff_bits, *huff_vals;
+
+ huff_val_size = (type == 0) ? 12 : 162;
+ uint8_t huff_size_table[huff_val_size+1]; //The +1 for adding 0 at the end of huff_val_size
+ uint16_t huff_code_table[huff_val_size];
+
+ memset(huff_size_table, 0, sizeof(huff_size_table));
+ memset(huff_code_table, 0, sizeof(huff_code_table));
+
+ huff_bits = (type == 0) ? (huff_buffer->huffman_table[index].num_dc_codes) : (huff_buffer->huffman_table[index].num_ac_codes);
+ huff_vals = (type == 0) ? (huff_buffer->huffman_table[index].dc_values) : (huff_buffer->huffman_table[index].ac_values);
+
+
+ //Generation of table of Huffman code sizes
+ generate_huffman_codesizes_table(huff_bits, huff_size_table, &lastK);
+
+ //Generation of table of Huffman codes
+ generate_huffman_codes_table(huff_size_table, huff_code_table);
+
+ //Ordering procedure for encoding procedure code tables
+ generate_ordered_codes_table(huff_vals, huff_size_table, huff_code_table, type, lastK);
+
+ //HW expects Byte0: Code length; Byte1,Byte2: Code Word, Byte3: Dummy
+ //Since IA is littlended, &, | and << accordingly to store the values in the DWord.
+ for(i=0; i<huff_val_size; i++) {
+ table[i] = 0;
+ table[i] = ((huff_size_table[i] & 0xFF) | ((huff_code_table[i] & 0xFFFF) << 8));
+ }
+
+}
+
+//send the huffman table using MFC_JPEG_HUFF_TABLE_STATE
+static void
+gen8_mfc_jpeg_huff_table_state(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context,
+ int num_tables)
+{
+ VAHuffmanTableBufferJPEGBaseline *huff_buffer;
+ struct intel_batchbuffer *batch = encoder_context->base.batch;
+ uint8_t index;
+ uint32_t dc_table[12], ac_table[162];
+
+ assert(encode_state->huffman_table && encode_state->huffman_table->buffer);
+ huff_buffer = (VAHuffmanTableBufferJPEGBaseline *)encode_state->huffman_table->buffer;
+
+ memset(dc_table, 0, 12);
+ memset(ac_table, 0, 162);
+
+ for (index = 0; index < num_tables; index++) {
+ int id = va_to_gen7_jpeg_hufftable[index];
+
+ if (!huff_buffer->load_huffman_table[index])
+ continue;
+
+ //load DC table with 12 DWords
+ convert_hufftable_to_codes(huff_buffer, dc_table, 0, index); //0 for Dc
+
+ //load AC table with 162 DWords
+ convert_hufftable_to_codes(huff_buffer, ac_table, 1, index); //1 for AC
+
+ BEGIN_BCS_BATCH(batch, 176);
+ OUT_BCS_BATCH(batch, MFC_JPEG_HUFF_TABLE_STATE | (176 - 2));
+ OUT_BCS_BATCH(batch, id); //Huff table id
+
+ //DWord 2 - 13 has DC_TABLE
+ intel_batchbuffer_data(batch, dc_table, 12*4);
+
+ //Dword 14 -175 has AC_TABLE
+ intel_batchbuffer_data(batch, ac_table, 162*4);
+ ADVANCE_BCS_BATCH(batch);
+ }
+}
+
+
+//This method is used to compute the MCU count used for setting MFC_JPEG_SCAN_OBJECT
+static void get_Y_sampling_factors(uint32_t surface_format, uint8_t *h_factor, uint8_t *v_factor)
+{
+ switch (surface_format) {
+ case VA_FOURCC_Y800: {
+ (* h_factor) = 1;
+ (* v_factor) = 1;
+ break;
+ }
+ case VA_FOURCC_NV12: {
+ (* h_factor) = 2;
+ (* v_factor) = 2;
+ break;
+ }
+ case VA_FOURCC_UYVY: {
+ (* h_factor) = 2;
+ (* v_factor) = 1;
+ break;
+ }
+ case VA_FOURCC_YUY2: {
+ (* h_factor) = 2;
+ (* v_factor) = 1;
+ break;
+ }
+ case VA_FOURCC_RGBA:
+ case VA_FOURCC_444P: {
+ (* h_factor) = 1;
+ (* v_factor) = 1;
+ break;
+ }
+ default : { //May be have to insert error handling here. For now just use as below
+ (* h_factor) = 1;
+ (* v_factor) = 1;
+ break;
+ }
+ }
+}
+
+//set MFC_JPEG_SCAN_OBJECT
+static void
+gen8_mfc_jpeg_scan_object(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ uint32_t mcu_count, surface_format, Mx, My;
+ uint8_t i, horizontal_sampling_factor, vertical_sampling_factor, huff_ac_table=0, huff_dc_table=0;
+ uint8_t is_last_scan = 1; //Jpeg has only 1 scan per frame. When last scan, HW inserts EOI code.
+ uint8_t head_present_flag=1; //Header has tables and app data
+ uint16_t num_components, restart_interval; //Specifies number of MCUs in an ECS.
+ VAEncSliceParameterBufferJPEG *slice_param;
+ VAEncPictureParameterBufferJPEG *pic_param;
+
+ struct intel_batchbuffer *batch = encoder_context->base.batch;
+ struct object_surface *obj_surface = encode_state->input_yuv_object;
+
+ assert(encode_state->slice_params_ext[0] && encode_state->slice_params_ext[0]->buffer);
+ assert(encode_state->pic_param_ext && encode_state->pic_param_ext->buffer);
+ assert(obj_surface);
+ pic_param = (VAEncPictureParameterBufferJPEG *)encode_state->pic_param_ext->buffer;
+ slice_param = (VAEncSliceParameterBufferJPEG *)encode_state->slice_params_ext[0]->buffer;
+ surface_format = obj_surface->fourcc;
+
+ get_Y_sampling_factors(surface_format, &horizontal_sampling_factor, &vertical_sampling_factor);
+
+ // Mx = #MCUs in a row, My = #MCUs in a column
+ Mx = (pic_param->picture_width + (horizontal_sampling_factor*8 -1))/(horizontal_sampling_factor*8);
+ My = (pic_param->picture_height + (vertical_sampling_factor*8 -1))/(vertical_sampling_factor*8);
+ mcu_count = (Mx * My);
+
+ num_components = pic_param->num_components;
+ restart_interval = slice_param->restart_interval;
+
+ //Depending on number of components and values set for table selectors,
+ //only those bits are set in 24:22 for AC table, 20:18 for DC table
+ for(i=0; i<num_components; i++) {
+ huff_ac_table |= ((slice_param->components[i].ac_table_selector)<<i);
+ huff_dc_table |= ((slice_param->components[i].dc_table_selector)<<i);
+ }
+
+
+ BEGIN_BCS_BATCH(batch, 3);
+ /* DWORD 0 */
+ OUT_BCS_BATCH(batch, MFC_JPEG_SCAN_OBJECT | (3 - 2));
+ /* DWORD 1 */
+ OUT_BCS_BATCH(batch, mcu_count << 0); //MCU Count
+ /* DWORD 2 */
+ OUT_BCS_BATCH(batch,
+ (huff_ac_table << 22) | //Huffman AC Table
+ (huff_dc_table << 18) | //Huffman DC Table
+ (head_present_flag << 17) | //Head present flag
+ (is_last_scan << 16) | //Is last scan
+ (restart_interval << 0)); //Restart Interval
+ ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen8_mfc_jpeg_pak_insert_object(struct intel_encoder_context *encoder_context, unsigned int *insert_data,
+ int length_in_dws, int data_bits_in_last_dw, int is_last_header,
+ int is_end_of_slice)
+{
+ struct intel_batchbuffer *batch = encoder_context->base.batch;
+ assert(batch);
+
+ if (data_bits_in_last_dw == 0)
+ data_bits_in_last_dw = 32;
+
+ BEGIN_BCS_BATCH(batch, length_in_dws + 2);
+
+ OUT_BCS_BATCH(batch, MFX_INSERT_OBJECT | (length_in_dws + 2 - 2));
+ //DWord 1
+ OUT_BCS_BATCH(batch,
+ (0 << 16) | //DataByteOffset 0 for JPEG Encoder
+ (0 << 15) | //HeaderLengthExcludeFrmSize 0 for JPEG Encoder
+ (data_bits_in_last_dw << 8) | //DataBitsInLastDW
+ (0 << 4) | //SkipEmulByteCount 0 for JPEG Encoder
+ (0 << 3) | //EmulationFlag 0 for JPEG Encoder
+ ((!!is_last_header) << 2) | //LastHeaderFlag
+ ((!!is_end_of_slice) << 1) | //EndOfSliceFlag
+ (1 << 0)); //BitstreamStartReset 1 for JPEG Encoder
+ //Data Paylaod
+ intel_batchbuffer_data(batch, insert_data, length_in_dws*4);
+
+ ADVANCE_BCS_BATCH(batch);
+}
+
+
+//send the jpeg headers to HW using MFX_PAK_INSERT_OBJECT
+static void
+gen8_mfc_jpeg_add_headers(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ if (encode_state->packed_header_data_ext) {
+ VAEncPackedHeaderParameterBuffer *param = NULL;
+ unsigned int *header_data = (unsigned int *)(*encode_state->packed_header_data_ext)->buffer;
+ unsigned int length_in_bits;
+
+ param = (VAEncPackedHeaderParameterBuffer *)(*encode_state->packed_header_params_ext)->buffer;
+ length_in_bits = param->bit_length;
+
+ gen8_mfc_jpeg_pak_insert_object(encoder_context,
+ header_data,
+ ALIGN(length_in_bits, 32) >> 5,
+ length_in_bits & 0x1f,
+ 1,
+ 1);
+ }
+}
+
+//Initialize the buffered_qmatrix with the default qmatrix in the driver.
+//If the app sends the qmatrix, this will be replaced with the one app sends.
+static void
+jpeg_init_default_qmatrix(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
+{
+ int i=0;
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+ //Load the the QM in zigzag order. If app sends QM, it is always in zigzag order.
+ for(i=0; i<64; i++)
+ mfc_context->buffered_qmatrix.lum_quantiser_matrix[i] = jpeg_luma_quant[zigzag_direct[i]];
+
+ for(i=0; i<64; i++)
+ mfc_context->buffered_qmatrix.chroma_quantiser_matrix[i] = jpeg_chroma_quant[zigzag_direct[i]];
+}
+
+/* This is at the picture level */
+static void
+gen8_mfc_jpeg_pipeline_picture_programing(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ int i, j, component, max_selector = 0;
+ VAEncSliceParameterBufferJPEG *slice_param;
+
+ gen8_mfc_pipe_mode_select(ctx, MFX_FORMAT_JPEG, encoder_context);
+ gen8_mfc_jpeg_set_surface_state(ctx, encoder_context, encode_state);
+ gen8_mfc_pipe_buf_addr_state(ctx, encoder_context);
+ gen8_mfc_ind_obj_base_addr_state(ctx, encoder_context);
+ gen8_mfc_bsp_buf_base_addr_state(ctx, encoder_context);
+ gen8_mfc_jpeg_pic_state(ctx, encoder_context, encode_state);
+
+ //do the slice level encoding here
+ gen8_mfc_jpeg_fqm_state(ctx, encoder_context, encode_state);
+
+ //I dont think I need this for loop. Just to be consistent with other encoding logic...
+ for(i = 0; i < encode_state->num_slice_params_ext; i++) {
+ assert(encode_state->slice_params_ext && encode_state->slice_params_ext[i]->buffer);
+ slice_param = (VAEncSliceParameterBufferJPEG *)encode_state->slice_params_ext[i]->buffer;
+
+ for(j = 0; j < encode_state->slice_params_ext[i]->num_elements; j++) {
+
+ for(component = 0; component < slice_param->num_components; component++) {
+ if(max_selector < slice_param->components[component].dc_table_selector)
+ max_selector = slice_param->components[component].dc_table_selector;
+
+ if (max_selector < slice_param->components[component].ac_table_selector)
+ max_selector = slice_param->components[component].ac_table_selector;
+ }
+
+ slice_param++;
+ }
+ }
+
+ assert(max_selector < 2);
+ //send the huffman table using MFC_JPEG_HUFF_TABLE
+ gen8_mfc_jpeg_huff_table_state(ctx, encode_state, encoder_context, max_selector+1);
+ //set MFC_JPEG_SCAN_OBJECT
+ gen8_mfc_jpeg_scan_object(ctx, encode_state, encoder_context);
+ //add headers using MFX_PAK_INSERT_OBJECT (it is refered as MFX_INSERT_OBJECT in this driver code)
+ gen8_mfc_jpeg_add_headers(ctx, encode_state, encoder_context);
+
+}
+
+static void
+gen8_mfc_jpeg_pipeline_programing(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct intel_batchbuffer *batch = encoder_context->base.batch;
+
+ // begin programing
+ intel_batchbuffer_start_atomic_bcs(batch, 0x4000);
+ intel_batchbuffer_emit_mi_flush(batch);
+
+ // picture level programing
+ gen8_mfc_jpeg_pipeline_picture_programing(ctx, encode_state, encoder_context);
+
+ // end programing
+ intel_batchbuffer_end_atomic(batch);
+
+}
+
+
+static VAStatus
+gen8_mfc_jpeg_encode_picture(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ gen8_mfc_init(ctx, encode_state, encoder_context);
+ intel_mfc_jpeg_prepare(ctx, encode_state, encoder_context);
+ /*Programing bcs pipeline*/
+ gen8_mfc_jpeg_pipeline_programing(ctx, encode_state, encoder_context);
+ gen8_mfc_run(ctx, encode_state, encoder_context);
+
+ return VA_STATUS_SUCCESS;
+}
+
+static int gen8_mfc_vp8_qindex_estimate(struct encode_state *encode_state,
+ struct gen6_mfc_context *mfc_context,
+ int target_frame_size,
+ int is_key_frame)
+{
+ VAEncSequenceParameterBufferVP8 *seq_param = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
+ VAEncPictureParameterBufferVP8 *pic_param = (VAEncPictureParameterBufferVP8 *)encode_state->pic_param_ext->buffer;
+ unsigned int max_qindex = pic_param->clamp_qindex_high;
+ unsigned int min_qindex = pic_param->clamp_qindex_low;
+ int width_in_mbs = ALIGN(seq_param->frame_width, 16) / 16;
+ int height_in_mbs = ALIGN(seq_param->frame_height, 16) / 16;
+ int target_mb_size;
+ int last_size_gap = -1;
+ int per_mb_size_at_qindex;
+ int target_qindex = min_qindex, i;
+
+ /* make sure would not overflow*/
+ if (target_frame_size >= (0x7fffffff >> 9))
+ target_mb_size = (target_frame_size / width_in_mbs / height_in_mbs) << 9;
+ else
+ target_mb_size = (target_frame_size << 9) / width_in_mbs / height_in_mbs;
+
+ for (i = min_qindex; i <= max_qindex; i++) {
+ per_mb_size_at_qindex = vp8_bits_per_mb[!is_key_frame][i];
+ target_qindex = i;
+ if (per_mb_size_at_qindex <= target_mb_size) {
+ if (target_mb_size - per_mb_size_at_qindex < last_size_gap)
+ target_qindex--;
+ break;
+ }
+ else
+ last_size_gap = per_mb_size_at_qindex - target_mb_size;
+ }
+
+ return target_qindex;
+}
+
+static void gen8_mfc_vp8_brc_init(struct encode_state *encode_state,
+ struct intel_encoder_context* encoder_context)
+{
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ VAEncSequenceParameterBufferVP8 *seq_param = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
+ VAEncMiscParameterBuffer* misc_param_hrd = (VAEncMiscParameterBuffer*)encode_state->misc_param[VAEncMiscParameterTypeHRD][0]->buffer;
+ VAEncMiscParameterHRD* param_hrd = (VAEncMiscParameterHRD*)misc_param_hrd->data;
+ VAEncMiscParameterBuffer* misc_param_frame_rate_buffer = (VAEncMiscParameterBuffer*)encode_state->misc_param[VAEncMiscParameterTypeFrameRate][0]->buffer;
+ VAEncMiscParameterFrameRate* param_frame_rate = (VAEncMiscParameterFrameRate*)misc_param_frame_rate_buffer->data;
+ double bitrate = seq_param->bits_per_second;
+ unsigned int frame_rate = param_frame_rate->framerate;
+ int inum = 1, pnum = 0;
+ int intra_period = seq_param->intra_period;
+ int width_in_mbs = ALIGN(seq_param->frame_width, 16) / 16;
+ int height_in_mbs = ALIGN(seq_param->frame_height, 16) / 16;
+ int max_frame_size = (vp8_bits_per_mb[0][0] >> 9) * width_in_mbs * height_in_mbs;/* vp8_bits_per_mb table mutilpled 512 */
+
+ pnum = intra_period - 1;
+
+ mfc_context->brc.mode = encoder_context->rate_control_mode;
+
+ mfc_context->brc.target_frame_size[0][SLICE_TYPE_I] = (int)((double)((bitrate * intra_period)/frame_rate) /
+ (double)(inum + BRC_PWEIGHT * pnum ));
+ mfc_context->brc.target_frame_size[0][SLICE_TYPE_P] = BRC_PWEIGHT * mfc_context->brc.target_frame_size[0][SLICE_TYPE_I];
+
+ mfc_context->brc.gop_nums[SLICE_TYPE_I] = inum;
+ mfc_context->brc.gop_nums[SLICE_TYPE_P] = pnum;
+
+ mfc_context->brc.bits_per_frame[0] = bitrate/frame_rate;
+
+ mfc_context->brc.qp_prime_y[0][SLICE_TYPE_I] = gen8_mfc_vp8_qindex_estimate(encode_state,
+ mfc_context,
+ mfc_context->brc.target_frame_size[0][SLICE_TYPE_I],
+ 1);
+ mfc_context->brc.qp_prime_y[0][SLICE_TYPE_P] = gen8_mfc_vp8_qindex_estimate(encode_state,
+ mfc_context,
+ mfc_context->brc.target_frame_size[0][SLICE_TYPE_P],
+ 0);
+
+ mfc_context->hrd.buffer_size = (double)param_hrd->buffer_size;
+ mfc_context->hrd.current_buffer_fullness =
+ (double)(param_hrd->initial_buffer_fullness < mfc_context->hrd.buffer_size)?
+ param_hrd->initial_buffer_fullness: mfc_context->hrd.buffer_size/2.;
+ mfc_context->hrd.target_buffer_fullness = (double)mfc_context->hrd.buffer_size/2.;
+ mfc_context->hrd.buffer_capacity = (double)mfc_context->hrd.buffer_size/max_frame_size;
+ mfc_context->hrd.violation_noted = 0;
+}
+
+static int gen8_mfc_vp8_brc_postpack(struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context,
+ int frame_bits)
+{
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ gen6_brc_status sts = BRC_NO_HRD_VIOLATION;
+ VAEncPictureParameterBufferVP8 *pic_param = (VAEncPictureParameterBufferVP8 *)encode_state->pic_param_ext->buffer;
+ int is_key_frame = !pic_param->pic_flags.bits.frame_type;
+ int slicetype = (is_key_frame ? SLICE_TYPE_I : SLICE_TYPE_P);
+ int qpi = mfc_context->brc.qp_prime_y[0][SLICE_TYPE_I];
+ int qpp = mfc_context->brc.qp_prime_y[0][SLICE_TYPE_P];
+ int qp; // quantizer of previously encoded slice of current type
+ int qpn; // predicted quantizer for next frame of current type in integer format
+ double qpf; // predicted quantizer for next frame of current type in float format
+ double delta_qp; // QP correction
+ int target_frame_size, frame_size_next;
+ /* Notes:
+ * x - how far we are from HRD buffer borders
+ * y - how far we are from target HRD buffer fullness
+ */
+ double x, y;
+ double frame_size_alpha;
+ unsigned int max_qindex = pic_param->clamp_qindex_high;
+ unsigned int min_qindex = pic_param->clamp_qindex_low;
+
+ qp = mfc_context->brc.qp_prime_y[0][slicetype];
+
+ target_frame_size = mfc_context->brc.target_frame_size[0][slicetype];
+ if (mfc_context->hrd.buffer_capacity < 5)
+ frame_size_alpha = 0;
+ else
+ frame_size_alpha = (double)mfc_context->brc.gop_nums[slicetype];
+ if (frame_size_alpha > 30) frame_size_alpha = 30;
+ frame_size_next = target_frame_size + (double)(target_frame_size - frame_bits) /
+ (double)(frame_size_alpha + 1.);
+
+ /* frame_size_next: avoiding negative number and too small value */
+ if ((double)frame_size_next < (double)(target_frame_size * 0.25))
+ frame_size_next = (int)((double)target_frame_size * 0.25);
+
+ qpf = (double)qp * target_frame_size / frame_size_next;
+ qpn = (int)(qpf + 0.5);
+
+ if (qpn == qp) {
+ /* setting qpn we round qpf making mistakes: now we are trying to compensate this */
+ mfc_context->brc.qpf_rounding_accumulator += qpf - qpn;
+ if (mfc_context->brc.qpf_rounding_accumulator > 1.0) {
+ qpn++;
+ mfc_context->brc.qpf_rounding_accumulator = 0.;
+ } else if (mfc_context->brc.qpf_rounding_accumulator < -1.0) {
+ qpn--;
+ mfc_context->brc.qpf_rounding_accumulator = 0.;
+ }
+ }
+
+ /* making sure that QP is not changing too fast */
+ if ((qpn - qp) > BRC_QP_MAX_CHANGE) qpn = qp + BRC_QP_MAX_CHANGE;
+ else if ((qpn - qp) < -BRC_QP_MAX_CHANGE) qpn = qp - BRC_QP_MAX_CHANGE;
+ /* making sure that with QP predictions we did do not leave QPs range */
+ BRC_CLIP(qpn, min_qindex, max_qindex);
+
+ /* checking wthether HRD compliance is still met */
+ sts = intel_mfc_update_hrd(encode_state, encoder_context, frame_bits);
+
+ /* calculating QP delta as some function*/
+ x = mfc_context->hrd.target_buffer_fullness - mfc_context->hrd.current_buffer_fullness;
+ if (x > 0) {
+ x /= mfc_context->hrd.target_buffer_fullness;
+ y = mfc_context->hrd.current_buffer_fullness;
+ }
+ else {
+ x /= (mfc_context->hrd.buffer_size - mfc_context->hrd.target_buffer_fullness);
+ y = mfc_context->hrd.buffer_size - mfc_context->hrd.current_buffer_fullness;
+ }
+ if (y < 0.01) y = 0.01;
+ if (x > 1) x = 1;
+ else if (x < -1) x = -1;
+
+ delta_qp = BRC_QP_MAX_CHANGE*exp(-1/y)*sin(BRC_PI_0_5 * x);
+ qpn = (int)(qpn + delta_qp + 0.5);
+
+ /* making sure that with QP predictions we did do not leave QPs range */
+ BRC_CLIP(qpn, min_qindex, max_qindex);
+
+ if (sts == BRC_NO_HRD_VIOLATION) { // no HRD violation
+ /* correcting QPs of slices of other types */
+ if (!is_key_frame) {
+ if (abs(qpn - BRC_I_P_QP_DIFF - qpi) > 4)
+ mfc_context->brc.qp_prime_y[0][SLICE_TYPE_I] += (qpn - BRC_I_P_QP_DIFF - qpi) >> 2;
+ } else {
+ if (abs(qpn + BRC_I_P_QP_DIFF - qpp) > 4)
+ mfc_context->brc.qp_prime_y[0][SLICE_TYPE_P] += (qpn + BRC_I_P_QP_DIFF - qpp) >> 2;
+ }
+ BRC_CLIP(mfc_context->brc.qp_prime_y[0][SLICE_TYPE_I], min_qindex, max_qindex);
+ BRC_CLIP(mfc_context->brc.qp_prime_y[0][SLICE_TYPE_P], min_qindex, max_qindex);
+ } else if (sts == BRC_UNDERFLOW) { // underflow
+ if (qpn <= qp) qpn = qp + 2;
+ if (qpn > max_qindex) {
+ qpn = max_qindex;
+ sts = BRC_UNDERFLOW_WITH_MAX_QP; //underflow with maxQP
+ }
+ } else if (sts == BRC_OVERFLOW) {
+ if (qpn >= qp) qpn = qp - 2;
+ if (qpn < min_qindex) { // < 0 (?) overflow with minQP
+ qpn = min_qindex;
+ sts = BRC_OVERFLOW_WITH_MIN_QP; // bit stuffing to be done
+ }
+ }
+
+ mfc_context->brc.qp_prime_y[0][slicetype] = qpn;
+
+ return sts;
+}
+
+static void gen8_mfc_vp8_hrd_context_init(struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ VAEncSequenceParameterBufferVP8 *seq_param = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
+ unsigned int rate_control_mode = encoder_context->rate_control_mode;
+ int target_bit_rate = seq_param->bits_per_second;
+
+ // current we only support CBR mode.
+ if (rate_control_mode == VA_RC_CBR) {
+ mfc_context->vui_hrd.i_bit_rate_value = target_bit_rate >> 10;
+ mfc_context->vui_hrd.i_initial_cpb_removal_delay = ((target_bit_rate * 8) >> 10) * 0.5 * 1024 / target_bit_rate * 90000;
+ mfc_context->vui_hrd.i_cpb_removal_delay = 2;
+ mfc_context->vui_hrd.i_frame_number = 0;
+
+ mfc_context->vui_hrd.i_initial_cpb_removal_delay_length = 24;
+ mfc_context->vui_hrd.i_cpb_removal_delay_length = 24;
+ mfc_context->vui_hrd.i_dpb_output_delay_length = 24;
+ }
+
+}
+
+static void gen8_mfc_vp8_hrd_context_update(struct encode_state *encode_state,
+ struct gen6_mfc_context *mfc_context)
+{
+ mfc_context->vui_hrd.i_frame_number++;
+}
+
+/*
+ * Check whether the parameters related with CBR are updated and decide whether
+ * it needs to reinitialize the configuration related with CBR.
+ * Currently it will check the following parameters:
+ * bits_per_second
+ * frame_rate
+ * gop_configuration(intra_period, ip_period, intra_idr_period)
+ */
+static bool gen8_mfc_vp8_brc_updated_check(struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ unsigned int rate_control_mode = encoder_context->rate_control_mode;
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ double cur_fps, cur_bitrate;
+ VAEncSequenceParameterBufferVP8 *seq_param = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
+ VAEncMiscParameterBuffer *misc_param_frame_rate_buf = (VAEncMiscParameterBuffer*)encode_state->misc_param[VAEncMiscParameterTypeFrameRate][0]->buffer;
+ VAEncMiscParameterFrameRate *param_frame_rate = (VAEncMiscParameterFrameRate*)misc_param_frame_rate_buf->data;
+ unsigned int frame_rate = param_frame_rate->framerate;
+
+ if (rate_control_mode != VA_RC_CBR) {
+ return false;
+ }
+
+ cur_bitrate = seq_param->bits_per_second;
+ cur_fps = frame_rate;
+
+ if ((cur_bitrate == mfc_context->brc.saved_bps) &&
+ (cur_fps == mfc_context->brc.saved_fps) &&
+ (seq_param->intra_period == mfc_context->brc.saved_intra_period)) {
+ /* the parameters related with CBR are not updaetd */
+ return false;
+ }
+
+ mfc_context->brc.saved_intra_period = seq_param->intra_period;
+ mfc_context->brc.saved_fps = cur_fps;
+ mfc_context->brc.saved_bps = cur_bitrate;
+ return true;
+}
+
+static void gen8_mfc_vp8_brc_prepare(struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ unsigned int rate_control_mode = encoder_context->rate_control_mode;
+
+ if (rate_control_mode == VA_RC_CBR) {
+ bool brc_updated;
+ assert(encoder_context->codec != CODEC_MPEG2);
+
+ brc_updated = gen8_mfc_vp8_brc_updated_check(encode_state, encoder_context);
+
+ /*Programing bit rate control */
+ if (brc_updated) {
+ gen8_mfc_vp8_brc_init(encode_state, encoder_context);
+ }
+
+ /*Programing HRD control */
+ if (brc_updated)
+ gen8_mfc_vp8_hrd_context_init(encode_state, encoder_context);
+ }
+}
+
+static void vp8_enc_state_init(struct gen6_mfc_context *mfc_context,
+ VAEncPictureParameterBufferVP8 *pic_param,
+ VAQMatrixBufferVP8 *q_matrix)
+{
+
+ int is_key_frame = !pic_param->pic_flags.bits.frame_type;
+ unsigned char *coeff_probs_stream_in_buffer;
+
+ mfc_context->vp8_state.frame_header_lf_update_pos = 0;
+ mfc_context->vp8_state.frame_header_qindex_update_pos = 0;
+ mfc_context->vp8_state.frame_header_token_update_pos = 0;
+ mfc_context->vp8_state.frame_header_bin_mv_upate_pos = 0;
+
+ mfc_context->vp8_state.prob_skip_false = 255;
+ memset(mfc_context->vp8_state.mb_segment_tree_probs, 0, sizeof(mfc_context->vp8_state.mb_segment_tree_probs));
+ memcpy(mfc_context->vp8_state.mv_probs, vp8_default_mv_context, sizeof(mfc_context->vp8_state.mv_probs));
+
+ if (is_key_frame) {
+ memcpy(mfc_context->vp8_state.y_mode_probs, vp8_kf_ymode_prob, sizeof(mfc_context->vp8_state.y_mode_probs));
+ memcpy(mfc_context->vp8_state.uv_mode_probs, vp8_kf_uv_mode_prob, sizeof(mfc_context->vp8_state.uv_mode_probs));
+
+ mfc_context->vp8_state.prob_intra = 255;
+ mfc_context->vp8_state.prob_last = 128;
+ mfc_context->vp8_state.prob_gf = 128;
+ } else {
+ memcpy(mfc_context->vp8_state.y_mode_probs, vp8_ymode_prob, sizeof(mfc_context->vp8_state.y_mode_probs));
+ memcpy(mfc_context->vp8_state.uv_mode_probs, vp8_uv_mode_prob, sizeof(mfc_context->vp8_state.uv_mode_probs));
+
+ mfc_context->vp8_state.prob_intra = 63;
+ mfc_context->vp8_state.prob_last = 128;
+ mfc_context->vp8_state.prob_gf = 128;
+ }
+
+ mfc_context->vp8_state.prob_skip_false = vp8_base_skip_false_prob[q_matrix->quantization_index[0]];
+
+ dri_bo_map(mfc_context->vp8_state.coeff_probs_stream_in_bo, 1);
+ coeff_probs_stream_in_buffer = (unsigned char *)mfc_context->vp8_state.coeff_probs_stream_in_bo->virtual;
+ assert(coeff_probs_stream_in_buffer);
+ memcpy(coeff_probs_stream_in_buffer, vp8_default_coef_probs, sizeof(vp8_default_coef_probs));
+ dri_bo_unmap(mfc_context->vp8_state.coeff_probs_stream_in_bo);
+}
+
+static void vp8_enc_state_update(struct gen6_mfc_context *mfc_context,
+ VAQMatrixBufferVP8 *q_matrix)
+{
+
+ /*some other probabilities need to be updated*/
+}
+
+extern void binarize_vp8_frame_header(VAEncSequenceParameterBufferVP8 *seq_param,
+ VAEncPictureParameterBufferVP8 *pic_param,
+ VAQMatrixBufferVP8 *q_matrix,
+ struct gen6_mfc_context *mfc_context,
+ struct intel_encoder_context *encoder_context);
+
+static void vp8_enc_frame_header_binarize(struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context,
+ struct gen6_mfc_context *mfc_context)
+{
+ VAEncSequenceParameterBufferVP8 *seq_param = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
+ VAEncPictureParameterBufferVP8 *pic_param = (VAEncPictureParameterBufferVP8 *)encode_state->pic_param_ext->buffer;
+ VAQMatrixBufferVP8 *q_matrix = (VAQMatrixBufferVP8 *)encode_state->q_matrix->buffer;
+ unsigned char *frame_header_buffer;
+
+ binarize_vp8_frame_header(seq_param, pic_param, q_matrix, mfc_context, encoder_context);
+
+ dri_bo_map(mfc_context->vp8_state.frame_header_bo, 1);
+ frame_header_buffer = (unsigned char *)mfc_context->vp8_state.frame_header_bo->virtual;
+ assert(frame_header_buffer);
+ memcpy(frame_header_buffer, mfc_context->vp8_state.vp8_frame_header, (mfc_context->vp8_state.frame_header_bit_count + 7) / 8);
+ free(mfc_context->vp8_state.vp8_frame_header);
+ dri_bo_unmap(mfc_context->vp8_state.frame_header_bo);
+}
+
+#define MAX_VP8_FRAME_HEADER_SIZE 0x2000
+#define VP8_TOKEN_STATISTICS_BUFFER_SIZE 0x2000
+
+static void gen8_mfc_vp8_init(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ dri_bo *bo;
+ int i;
+ int width_in_mbs = 0;
+ int height_in_mbs = 0;
+ int slice_batchbuffer_size;
+ int is_key_frame, slice_type, rate_control_mode;
+
+ VAEncSequenceParameterBufferVP8 *pSequenceParameter = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
+ VAEncPictureParameterBufferVP8 *pic_param = (VAEncPictureParameterBufferVP8 *)encode_state->pic_param_ext->buffer;
+ VAQMatrixBufferVP8 *q_matrix = (VAQMatrixBufferVP8 *)encode_state->q_matrix->buffer;
+
+ width_in_mbs = ALIGN(pSequenceParameter->frame_height, 16) / 16;
+ height_in_mbs = ALIGN(pSequenceParameter->frame_height, 16) / 16;
+
+ is_key_frame = !pic_param->pic_flags.bits.frame_type;
+ slice_type = (is_key_frame ? SLICE_TYPE_I : SLICE_TYPE_P);
+ rate_control_mode = encoder_context->rate_control_mode;
+
+ if (rate_control_mode == VA_RC_CBR) {
+ q_matrix->quantization_index[0] = mfc_context->brc.qp_prime_y[0][slice_type];
+ for (i = 1; i < 4; i++)
+ q_matrix->quantization_index[i] = q_matrix->quantization_index[0];
+ for (i = 0; i < 5; i++)
+ q_matrix->quantization_index_delta[i] = 0;
+ }
+
+ slice_batchbuffer_size = 64 * width_in_mbs * height_in_mbs + 4096 +
+ (SLICE_HEADER + SLICE_TAIL);
+
+ /*Encode common setup for MFC*/
+ dri_bo_unreference(mfc_context->post_deblocking_output.bo);
+ mfc_context->post_deblocking_output.bo = NULL;
+
+ dri_bo_unreference(mfc_context->pre_deblocking_output.bo);
+ mfc_context->pre_deblocking_output.bo = NULL;
+
+ dri_bo_unreference(mfc_context->uncompressed_picture_source.bo);
+ mfc_context->uncompressed_picture_source.bo = NULL;
+
+ dri_bo_unreference(mfc_context->mfc_indirect_pak_bse_object.bo);
+ mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
+
+ for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
+ if ( mfc_context->direct_mv_buffers[i].bo != NULL)
+ dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
+ mfc_context->direct_mv_buffers[i].bo = NULL;
+ }
+
+ for (i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++){
+ if (mfc_context->reference_surfaces[i].bo != NULL)
+ dri_bo_unreference(mfc_context->reference_surfaces[i].bo);
+ mfc_context->reference_surfaces[i].bo = NULL;
+ }
+
+ dri_bo_unreference(mfc_context->intra_row_store_scratch_buffer.bo);
+ bo = dri_bo_alloc(i965->intel.bufmgr,
+ "Buffer",
+ width_in_mbs * 64 * 16,
+ 64);
+ assert(bo);
+ mfc_context->intra_row_store_scratch_buffer.bo = bo;
+
+ dri_bo_unreference(mfc_context->macroblock_status_buffer.bo);
+ bo = dri_bo_alloc(i965->intel.bufmgr,
+ "Buffer",
+ width_in_mbs * height_in_mbs * 16,
+ 64);
+ assert(bo);
+ mfc_context->macroblock_status_buffer.bo = bo;
+
+ dri_bo_unreference(mfc_context->deblocking_filter_row_store_scratch_buffer.bo);
+ bo = dri_bo_alloc(i965->intel.bufmgr,
+ "Buffer",
+ 16 * width_in_mbs * 64, /* 16 * width_in_mbs * 64 */
+ 64);
+ assert(bo);
+ mfc_context->deblocking_filter_row_store_scratch_buffer.bo = bo;
+
+ dri_bo_unreference(mfc_context->bsd_mpc_row_store_scratch_buffer.bo);
+ bo = dri_bo_alloc(i965->intel.bufmgr,
+ "Buffer",
+ 16 * width_in_mbs * 64, /* 16 * width_in_mbs * 64 */
+ 0x1000);
+ assert(bo);
+ mfc_context->bsd_mpc_row_store_scratch_buffer.bo = bo;
+
+ dri_bo_unreference(mfc_context->mfc_batchbuffer_surface.bo);
+ mfc_context->mfc_batchbuffer_surface.bo = NULL;
+
+ dri_bo_unreference(mfc_context->aux_batchbuffer_surface.bo);
+ mfc_context->aux_batchbuffer_surface.bo = NULL;
+
+ if (mfc_context->aux_batchbuffer) {
+ intel_batchbuffer_free(mfc_context->aux_batchbuffer);
+ mfc_context->aux_batchbuffer = NULL;
+ }
+
+ mfc_context->aux_batchbuffer = intel_batchbuffer_new(&i965->intel, I915_EXEC_BSD, slice_batchbuffer_size);
+ mfc_context->aux_batchbuffer_surface.bo = mfc_context->aux_batchbuffer->buffer;
+ dri_bo_reference(mfc_context->aux_batchbuffer_surface.bo);
+ mfc_context->aux_batchbuffer_surface.pitch = 16;
+ mfc_context->aux_batchbuffer_surface.num_blocks = mfc_context->aux_batchbuffer->size / 16;
+ mfc_context->aux_batchbuffer_surface.size_block = 16;
+
+ gen8_gpe_context_init(ctx, &mfc_context->gpe_context);
+
+ /* alloc vp8 encoding buffers*/
+ dri_bo_unreference(mfc_context->vp8_state.frame_header_bo);
+ bo = dri_bo_alloc(i965->intel.bufmgr,
+ "Buffer",
+ MAX_VP8_FRAME_HEADER_SIZE,
+ 0x1000);
+ assert(bo);
+ mfc_context->vp8_state.frame_header_bo = bo;
+
+ mfc_context->vp8_state.intermediate_buffer_max_size = width_in_mbs * height_in_mbs * 384 * 9;
+ for(i = 0; i < 8; i++) {
+ mfc_context->vp8_state.intermediate_partition_offset[i] = width_in_mbs * height_in_mbs * 384 * (i + 1);
+ }
+ dri_bo_unreference(mfc_context->vp8_state.intermediate_bo);
+ bo = dri_bo_alloc(i965->intel.bufmgr,
+ "Buffer",
+ mfc_context->vp8_state.intermediate_buffer_max_size,
+ 0x1000);
+ assert(bo);
+ mfc_context->vp8_state.intermediate_bo = bo;
+
+ dri_bo_unreference(mfc_context->vp8_state.stream_out_bo);
+ bo = dri_bo_alloc(i965->intel.bufmgr,
+ "Buffer",
+ width_in_mbs * height_in_mbs * 16,
+ 0x1000);
+ assert(bo);
+ mfc_context->vp8_state.stream_out_bo = bo;
+
+ dri_bo_unreference(mfc_context->vp8_state.coeff_probs_stream_in_bo);
+ bo = dri_bo_alloc(i965->intel.bufmgr,
+ "Buffer",
+ sizeof(vp8_default_coef_probs),
+ 0x1000);
+ assert(bo);
+ mfc_context->vp8_state.coeff_probs_stream_in_bo = bo;
+
+ dri_bo_unreference(mfc_context->vp8_state.token_statistics_bo);
+ bo = dri_bo_alloc(i965->intel.bufmgr,
+ "Buffer",
+ VP8_TOKEN_STATISTICS_BUFFER_SIZE,
+ 0x1000);
+ assert(bo);
+ mfc_context->vp8_state.token_statistics_bo = bo;
+
+ dri_bo_unreference(mfc_context->vp8_state.mpc_row_store_bo);
+ bo = dri_bo_alloc(i965->intel.bufmgr,
+ "Buffer",
+ width_in_mbs * 16 * 64,
+ 0x1000);
+ assert(bo);
+ mfc_context->vp8_state.mpc_row_store_bo = bo;
+
+ vp8_enc_state_init(mfc_context, pic_param, q_matrix);
+ vp8_enc_frame_header_binarize(encode_state, encoder_context, mfc_context);
+}
+
+static VAStatus
+intel_mfc_vp8_prepare(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ struct object_surface *obj_surface;
+ struct object_buffer *obj_buffer;
+ struct i965_coded_buffer_segment *coded_buffer_segment;
+ VAEncPictureParameterBufferVP8 *pic_param = (VAEncPictureParameterBufferVP8 *)encode_state->pic_param_ext->buffer;
+ VAStatus vaStatus = VA_STATUS_SUCCESS;
+ dri_bo *bo;
+ int i;
+
+ /* reconstructed surface */
+ obj_surface = encode_state->reconstructed_object;
+ i965_check_alloc_surface_bo(ctx, obj_surface, 1, VA_FOURCC('N','V','1','2'), SUBSAMPLE_YUV420);
+ if (pic_param->loop_filter_level[0] == 0) {
+ mfc_context->pre_deblocking_output.bo = obj_surface->bo;
+ dri_bo_reference(mfc_context->pre_deblocking_output.bo);
+ } else {
+ mfc_context->post_deblocking_output.bo = obj_surface->bo;
+ dri_bo_reference(mfc_context->post_deblocking_output.bo);
+ }
+
+ mfc_context->surface_state.width = obj_surface->orig_width;
+ mfc_context->surface_state.height = obj_surface->orig_height;
+ mfc_context->surface_state.w_pitch = obj_surface->width;
+ mfc_context->surface_state.h_pitch = obj_surface->height;
+
+ /* set vp8 reference frames */
+ for (i = 0; i < ARRAY_ELEMS(mfc_context->reference_surfaces); i++) {
+ obj_surface = encode_state->reference_objects[i];
+
+ if (obj_surface && obj_surface->bo) {
+ mfc_context->reference_surfaces[i].bo = obj_surface->bo;
+ dri_bo_reference(mfc_context->reference_surfaces[i].bo);
+ } else {
+ mfc_context->reference_surfaces[i].bo = NULL;
+ }
+ }
+
+ /* input YUV surface */
+ obj_surface = encode_state->input_yuv_object;
+ mfc_context->uncompressed_picture_source.bo = obj_surface->bo;
+ dri_bo_reference(mfc_context->uncompressed_picture_source.bo);
+
+ /* coded buffer */
+ obj_buffer = encode_state->coded_buf_object;
+ bo = obj_buffer->buffer_store->bo;
+ mfc_context->mfc_indirect_pak_bse_object.bo = bo;
+ mfc_context->mfc_indirect_pak_bse_object.offset = I965_CODEDBUFFER_HEADER_SIZE;
+ mfc_context->mfc_indirect_pak_bse_object.end_offset = ALIGN(obj_buffer->size_element - 0x1000, 0x1000);
+ dri_bo_reference(mfc_context->mfc_indirect_pak_bse_object.bo);
+
+ dri_bo_unreference(mfc_context->vp8_state.final_frame_bo);
+ mfc_context->vp8_state.final_frame_bo = mfc_context->mfc_indirect_pak_bse_object.bo;
+ mfc_context->vp8_state.final_frame_byte_offset = I965_CODEDBUFFER_HEADER_SIZE;
+ dri_bo_reference(mfc_context->vp8_state.final_frame_bo);
+
+ /* set the internal flag to 0 to indicate the coded size is unknown */
+ dri_bo_map(bo, 1);
+ coded_buffer_segment = (struct i965_coded_buffer_segment *)bo->virtual;
+ coded_buffer_segment->mapped = 0;
+ coded_buffer_segment->codec = encoder_context->codec;
+ dri_bo_unmap(bo);
+
+ return vaStatus;
+}
+
+static void
+gen8_mfc_vp8_encoder_cfg(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct intel_batchbuffer *batch = encoder_context->base.batch;
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ VAEncSequenceParameterBufferVP8 *seq_param = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
+ VAEncPictureParameterBufferVP8 *pic_param = (VAEncPictureParameterBufferVP8 *)encode_state->pic_param_ext->buffer;
+
+ BEGIN_BCS_BATCH(batch, 30);
+ OUT_BCS_BATCH(batch, MFX_VP8_ENCODER_CFG | (30 - 2)); /* SKL should be 31-2 ? */
+
+ OUT_BCS_BATCH(batch,
+ 0 << 9 | /* compressed bitstream output disable */
+ 1 << 7 | /* disable per-segment delta qindex and loop filter in RC */
+ 1 << 6 | /* RC initial pass */
+ 0 << 4 | /* upate segment feature date flag */
+ 1 << 3 | /* bitstream statistics output enable */
+ 1 << 2 | /* token statistics output enable */
+ 0 << 1 | /* final bitstream output disable */
+ 0 << 0); /*DW1*/
+
+ OUT_BCS_BATCH(batch, 0); /*DW2*/
+
+ OUT_BCS_BATCH(batch,
+ 0xfff << 16 | /* max intra mb bit count limit */
+ 0xfff << 0 /* max inter mb bit count limit */
+ ); /*DW3*/
+
+ OUT_BCS_BATCH(batch, 0); /*DW4*/
+ OUT_BCS_BATCH(batch, 0); /*DW5*/
+ OUT_BCS_BATCH(batch, 0); /*DW6*/
+ OUT_BCS_BATCH(batch, 0); /*DW7*/
+ OUT_BCS_BATCH(batch, 0); /*DW8*/
+ OUT_BCS_BATCH(batch, 0); /*DW9*/
+ OUT_BCS_BATCH(batch, 0); /*DW10*/
+ OUT_BCS_BATCH(batch, 0); /*DW11*/
+ OUT_BCS_BATCH(batch, 0); /*DW12*/
+ OUT_BCS_BATCH(batch, 0); /*DW13*/
+ OUT_BCS_BATCH(batch, 0); /*DW14*/
+ OUT_BCS_BATCH(batch, 0); /*DW15*/
+ OUT_BCS_BATCH(batch, 0); /*DW16*/
+ OUT_BCS_BATCH(batch, 0); /*DW17*/
+ OUT_BCS_BATCH(batch, 0); /*DW18*/
+ OUT_BCS_BATCH(batch, 0); /*DW19*/
+ OUT_BCS_BATCH(batch, 0); /*DW20*/
+ OUT_BCS_BATCH(batch, 0); /*DW21*/
+
+ OUT_BCS_BATCH(batch,
+ pic_param->pic_flags.bits.show_frame << 23 |
+ pic_param->pic_flags.bits.version << 20
+ ); /*DW22*/
+
+ OUT_BCS_BATCH(batch,
+ (seq_param->frame_height_scale << 14 | seq_param->frame_height) << 16 |
+ (seq_param->frame_width_scale << 14 | seq_param->frame_width) << 0
+ );
+
+ /*DW24*/
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.frame_header_bit_count); /* frame header bit count */
+
+ /*DW25*/
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.frame_header_qindex_update_pos); /* frame header bin buffer qindex update pointer */
+
+ /*DW26*/
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.frame_header_lf_update_pos); /* frame header bin buffer loop filter update pointer*/
+
+ /*DW27*/
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.frame_header_token_update_pos); /* frame header bin buffer token update pointer */
+
+ /*DW28*/
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.frame_header_bin_mv_upate_pos); /*frame header bin buffer mv update pointer */
+
+ /*DW29*/
+ OUT_BCS_BATCH(batch, 0);
+
+ ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen8_mfc_vp8_pic_state(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct intel_batchbuffer *batch = encoder_context->base.batch;
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ VAEncSequenceParameterBufferVP8 *seq_param = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
+ VAEncPictureParameterBufferVP8 *pic_param = (VAEncPictureParameterBufferVP8 *)encode_state->pic_param_ext->buffer;
+ VAQMatrixBufferVP8 *q_matrix = (VAQMatrixBufferVP8 *)encode_state->q_matrix->buffer;
+ int i, j, log2num;
+
+ log2num = pic_param->pic_flags.bits.num_token_partitions;
+
+ /*update mode and token probs*/
+ vp8_enc_state_update(mfc_context, q_matrix);
+
+ BEGIN_BCS_BATCH(batch, 38);
+ OUT_BCS_BATCH(batch, MFX_VP8_PIC_STATE | (38 - 2));
+ OUT_BCS_BATCH(batch,
+ (ALIGN(seq_param->frame_height, 16) / 16 - 1) << 16 |
+ (ALIGN(seq_param->frame_width, 16) / 16 - 1) << 0);
+
+ OUT_BCS_BATCH(batch,
+ log2num << 24 |
+ pic_param->sharpness_level << 16 |
+ pic_param->pic_flags.bits.sign_bias_alternate << 13 |
+ pic_param->pic_flags.bits.sign_bias_golden << 12 |
+ pic_param->pic_flags.bits.loop_filter_adj_enable << 11 |
+ pic_param->pic_flags.bits.mb_no_coeff_skip << 10 |
+ pic_param->pic_flags.bits.update_mb_segmentation_map << 9 |
+ pic_param->pic_flags.bits.segmentation_enabled << 8 |
+ !pic_param->pic_flags.bits.frame_type << 5 | /* 0 indicate an intra frame in VP8 stream/spec($9.1)*/
+ (pic_param->pic_flags.bits.version / 2) << 4 |
+ (pic_param->pic_flags.bits.version == 3) << 1 | /* full pixel mode for version 3 */
+ !!pic_param->pic_flags.bits.version << 0); /* version 0: 6 tap */
+
+ OUT_BCS_BATCH(batch,
+ pic_param->loop_filter_level[3] << 24 |
+ pic_param->loop_filter_level[2] << 16 |
+ pic_param->loop_filter_level[1] << 8 |
+ pic_param->loop_filter_level[0] << 0);
+
+ OUT_BCS_BATCH(batch,
+ q_matrix->quantization_index[3] << 24 |
+ q_matrix->quantization_index[2] << 16 |
+ q_matrix->quantization_index[1] << 8 |
+ q_matrix->quantization_index[0] << 0);
+
+ OUT_BCS_BATCH(batch,
+ ((unsigned short)(q_matrix->quantization_index_delta[4]) >> 15) << 28 |
+ abs(q_matrix->quantization_index_delta[4]) << 24 |
+ ((unsigned short)(q_matrix->quantization_index_delta[3]) >> 15) << 20 |
+ abs(q_matrix->quantization_index_delta[3]) << 16 |
+ ((unsigned short)(q_matrix->quantization_index_delta[2]) >> 15) << 12 |
+ abs(q_matrix->quantization_index_delta[2]) << 8 |
+ ((unsigned short)(q_matrix->quantization_index_delta[1]) >> 15) << 4 |
+ abs(q_matrix->quantization_index_delta[1]) << 0);
+
+ OUT_BCS_BATCH(batch,
+ ((unsigned short)(q_matrix->quantization_index_delta[0]) >> 15) << 4 |
+ abs(q_matrix->quantization_index_delta[0]) << 0);
+
+ OUT_BCS_BATCH(batch,
+ pic_param->clamp_qindex_high << 8 |
+ pic_param->clamp_qindex_low << 0);
+
+ for (i = 8; i < 19; i++) {
+ OUT_BCS_BATCH(batch, 0xffffffff);
+ }
+
+ OUT_BCS_BATCH(batch,
+ mfc_context->vp8_state.mb_segment_tree_probs[2] << 16 |
+ mfc_context->vp8_state.mb_segment_tree_probs[1] << 8 |
+ mfc_context->vp8_state.mb_segment_tree_probs[0] << 0);
+
+ OUT_BCS_BATCH(batch,
+ mfc_context->vp8_state.prob_skip_false << 24 |
+ mfc_context->vp8_state.prob_intra << 16 |
+ mfc_context->vp8_state.prob_last << 8 |
+ mfc_context->vp8_state.prob_gf << 0);
+
+ OUT_BCS_BATCH(batch,
+ mfc_context->vp8_state.y_mode_probs[3] << 24 |
+ mfc_context->vp8_state.y_mode_probs[2] << 16 |
+ mfc_context->vp8_state.y_mode_probs[1] << 8 |
+ mfc_context->vp8_state.y_mode_probs[0] << 0);
+
+ OUT_BCS_BATCH(batch,
+ mfc_context->vp8_state.uv_mode_probs[2] << 16 |
+ mfc_context->vp8_state.uv_mode_probs[1] << 8 |
+ mfc_context->vp8_state.uv_mode_probs[0] << 0);
+
+ /* MV update value, DW23-DW32 */
+ for (i = 0; i < 2; i++) {
+ for (j = 0; j < 20; j += 4) {
+ OUT_BCS_BATCH(batch,
+ (j + 3 == 19 ? 0 : mfc_context->vp8_state.mv_probs[i][j + 3]) << 24 |
+ mfc_context->vp8_state.mv_probs[i][j + 2] << 16 |
+ mfc_context->vp8_state.mv_probs[i][j + 1] << 8 |
+ mfc_context->vp8_state.mv_probs[i][j + 0] << 0);
+ }
+ }
+
+ OUT_BCS_BATCH(batch,
+ (pic_param->ref_lf_delta[3] & 0x7f) << 24 |
+ (pic_param->ref_lf_delta[2] & 0x7f) << 16 |
+ (pic_param->ref_lf_delta[1] & 0x7f) << 8 |
+ (pic_param->ref_lf_delta[0] & 0x7f) << 0);
+
+ OUT_BCS_BATCH(batch,
+ (pic_param->mode_lf_delta[3] & 0x7f) << 24 |
+ (pic_param->mode_lf_delta[2] & 0x7f) << 16 |
+ (pic_param->mode_lf_delta[1] & 0x7f) << 8 |
+ (pic_param->mode_lf_delta[0] & 0x7f) << 0);
+
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+
+ ADVANCE_BCS_BATCH(batch);
+}
+
+#define OUT_VP8_BUFFER(bo, offset) \
+ if (bo) \
+ OUT_BCS_RELOC(batch, \
+ bo, \
+ I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION, \
+ offset); \
+ else \
+ OUT_BCS_BATCH(batch, 0); \
+ OUT_BCS_BATCH(batch, 0); \
+ OUT_BCS_BATCH(batch, 0);
+
+static void
+gen8_mfc_vp8_bsp_buf_base_addr_state(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct intel_batchbuffer *batch = encoder_context->base.batch;
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+ BEGIN_BCS_BATCH(batch, 32);
+ OUT_BCS_BATCH(batch, MFX_VP8_BSP_BUF_BASE_ADDR_STATE | (32 - 2));
+
+ OUT_VP8_BUFFER(mfc_context->vp8_state.frame_header_bo, 0);
+
+ OUT_VP8_BUFFER(mfc_context->vp8_state.intermediate_bo, 0);
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.intermediate_partition_offset[0]);
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.intermediate_partition_offset[1]);
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.intermediate_partition_offset[2]);
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.intermediate_partition_offset[3]);
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.intermediate_partition_offset[4]);
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.intermediate_partition_offset[5]);
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.intermediate_partition_offset[6]);
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.intermediate_partition_offset[7]);
+ OUT_BCS_BATCH(batch, mfc_context->vp8_state.intermediate_buffer_max_size);
+
+ OUT_VP8_BUFFER(mfc_context->vp8_state.final_frame_bo, I965_CODEDBUFFER_HEADER_SIZE);
+ OUT_BCS_BATCH(batch, 0);
+
+ OUT_VP8_BUFFER(mfc_context->vp8_state.stream_out_bo, 0);
+ OUT_VP8_BUFFER(mfc_context->vp8_state.coeff_probs_stream_in_bo, 0);
+ OUT_VP8_BUFFER(mfc_context->vp8_state.token_statistics_bo, 0);
+ OUT_VP8_BUFFER(mfc_context->vp8_state.mpc_row_store_bo, 0);
+
+ ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen8_mfc_vp8_pipeline_picture_programing(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+
+ mfc_context->pipe_mode_select(ctx, MFX_FORMAT_VP8, encoder_context);
+ mfc_context->set_surface_state(ctx, encoder_context);
+ mfc_context->ind_obj_base_addr_state(ctx, encoder_context);
+ gen8_mfc_pipe_buf_addr_state(ctx, encoder_context);
+ gen8_mfc_bsp_buf_base_addr_state(ctx, encoder_context);
+ gen8_mfc_vp8_bsp_buf_base_addr_state(ctx, encode_state, encoder_context);
+ gen8_mfc_vp8_pic_state(ctx, encode_state,encoder_context);
+ gen8_mfc_vp8_encoder_cfg(ctx, encode_state, encoder_context);
+}
+
+static const unsigned char
+vp8_intra_mb_mode_map[VME_MB_INTRA_MODE_COUNT] = {
+ PAK_V_PRED,
+ PAK_H_PRED,
+ PAK_DC_PRED,
+ PAK_TM_PRED
+};
+
+static const unsigned char
+vp8_intra_block_mode_map[VME_B_INTRA_MODE_COUNT] = {
+ PAK_B_VE_PRED,
+ PAK_B_HE_PRED,
+ PAK_B_DC_PRED,
+ PAK_B_LD_PRED,
+ PAK_B_RD_PRED,
+ PAK_B_VR_PRED,
+ PAK_B_HD_PRED,
+ PAK_B_VL_PRED,
+ PAK_B_HU_PRED
+};
+
+static int inline gen8_mfc_vp8_intra_mb_mode_map(unsigned int vme_pred_mode, int is_luma_4x4)
+{
+ unsigned int i, pak_pred_mode = 0;
+ unsigned int vme_sub_blocks_pred_mode[8], pak_sub_blocks_pred_mode[8]; /* 8 blocks's intra mode */
+
+ if (!is_luma_4x4) {
+ pak_pred_mode = vp8_intra_mb_mode_map[vme_pred_mode & 0x3];
+ } else {
+ for (i = 0; i < 8; i++) {
+ vme_sub_blocks_pred_mode[i] = ((vme_pred_mode >> (4 * i)) & 0xf);
+ assert(vme_sub_blocks_pred_mode[i] < VME_B_INTRA_MODE_COUNT);
+ pak_sub_blocks_pred_mode[i] = vp8_intra_block_mode_map[vme_sub_blocks_pred_mode[i]];
+ pak_pred_mode |= (pak_sub_blocks_pred_mode[i] << (4 * i));
+ }
+ }
+
+ return pak_pred_mode;
+}
+static void
+gen8_mfc_vp8_pak_object_intra(VADriverContextP ctx,
+ struct intel_encoder_context *encoder_context,
+ unsigned int *msg,
+ int x, int y,
+ struct intel_batchbuffer *batch)
+{
+ unsigned int vme_intra_mb_mode, vme_chroma_pred_mode;
+ unsigned int pak_intra_mb_mode, pak_chroma_pred_mode;
+ unsigned int vme_luma_pred_mode[2], pak_luma_pred_mode[2];
+
+ if (batch == NULL)
+ batch = encoder_context->base.batch;
+
+ vme_intra_mb_mode = ((msg[0] & 0x30) >> 4);
+ assert((vme_intra_mb_mode == 0) || (vme_intra_mb_mode == 2)); //vp8 only support intra_16x16 and intra_4x4
+ pak_intra_mb_mode = (vme_intra_mb_mode >> 1);
+
+ vme_luma_pred_mode[0] = msg[1];
+ vme_luma_pred_mode[1] = msg[2];
+ vme_chroma_pred_mode = msg[3] & 0x3;
+
+ pak_luma_pred_mode[0] = gen8_mfc_vp8_intra_mb_mode_map(vme_luma_pred_mode[0], pak_intra_mb_mode);
+ pak_luma_pred_mode[1] = gen8_mfc_vp8_intra_mb_mode_map(vme_luma_pred_mode[1], pak_intra_mb_mode);
+ pak_chroma_pred_mode = gen8_mfc_vp8_intra_mb_mode_map(vme_chroma_pred_mode, 0);
+
+ BEGIN_BCS_BATCH(batch, 7);
+
+ OUT_BCS_BATCH(batch, MFX_VP8_PAK_OBJECT | (7 - 2));
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch,
+ (0 << 20) | /* mv format: intra mb */
+ (0 << 18) | /* Segment ID */
+ (0 << 17) | /* disable coeff clamp */
+ (1 << 13) | /* intra mb flag */
+ (0 << 11) | /* refer picture select: last frame */
+ (pak_intra_mb_mode << 8) | /* mb type */
+ (pak_chroma_pred_mode << 4) | /* mb uv mode */
+ (0 << 2) | /* skip mb flag: disable */
+ 0);
+
+ OUT_BCS_BATCH(batch, (y << 16) | x);
+ OUT_BCS_BATCH(batch, pak_luma_pred_mode[0]);
+ OUT_BCS_BATCH(batch, pak_luma_pred_mode[1]);
+
+ ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen8_mfc_vp8_pak_object_inter(VADriverContextP ctx,
+ struct intel_encoder_context *encoder_context,
+ unsigned int *msg,
+ int offset,
+ int x, int y,
+ struct intel_batchbuffer *batch)
+{
+ int i;
+
+ if (batch == NULL)
+ batch = encoder_context->base.batch;
+
+ /* only support inter_16x16 now */
+ assert((msg[AVC_INTER_MSG_OFFSET] & INTER_MODE_MASK) == INTER_16X16);
+ /* for inter_16x16, all 16 MVs should be same,
+ * and move mv to the vme mb start address to make sure offset is 64 bytes aligned
+ * as vp8 spec, all vp8 luma motion vectors are doulbled stored
+ */
+ msg[0] = (((msg[AVC_INTER_MV_OFFSET/4] & 0xffff0000) << 1) | ((msg[AVC_INTER_MV_OFFSET/4] << 1) & 0xffff));
+
+ for (i = 1; i < 16; i++) {
+ msg[i] = msg[0];
+ }
+
+ BEGIN_BCS_BATCH(batch, 7);
+
+ OUT_BCS_BATCH(batch, MFX_VP8_PAK_OBJECT | (7 - 2));
+ OUT_BCS_BATCH(batch,
+ (0 << 29) | /* enable inline mv data: disable */
+ 64);
+ OUT_BCS_BATCH(batch,
+ offset);
+ OUT_BCS_BATCH(batch,
+ (4 << 20) | /* mv format: inter */
+ (0 << 18) | /* Segment ID */
+ (0 << 17) | /* coeff clamp: disable */
+ (0 << 13) | /* intra mb flag: inter mb */
+ (0 << 11) | /* refer picture select: last frame */
+ (0 << 8) | /* mb type: 16x16 */
+ (0 << 4) | /* mb uv mode: dc_pred */
+ (0 << 2) | /* skip mb flag: disable */
+ 0);
+
+ OUT_BCS_BATCH(batch, (y << 16) | x);
+
+ /*new mv*/
+ OUT_BCS_BATCH(batch, 0x8);
+ OUT_BCS_BATCH(batch, 0x8);
+
+ ADVANCE_BCS_BATCH(batch);
+}
+
+static void
+gen8_mfc_vp8_pak_pipeline(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context,
+ struct intel_batchbuffer *slice_batch)
+{
+ struct gen6_vme_context *vme_context = encoder_context->vme_context;
+ VAEncSequenceParameterBufferVP8 *seq_param = (VAEncSequenceParameterBufferVP8 *)encode_state->seq_param_ext->buffer;
+ VAEncPictureParameterBufferVP8 *pic_param = (VAEncPictureParameterBufferVP8 *)encode_state->pic_param_ext->buffer;
+ int width_in_mbs = ALIGN(seq_param->frame_width, 16) / 16;
+ int height_in_mbs = ALIGN(seq_param->frame_height, 16) / 16;
+ unsigned int *msg = NULL;
+ unsigned char *msg_ptr = NULL;
+ unsigned int i, offset, is_intra_frame;
+
+ is_intra_frame = !pic_param->pic_flags.bits.frame_type;
+
+ dri_bo_map(vme_context->vme_output.bo , 1);
+ msg_ptr = (unsigned char *)vme_context->vme_output.bo->virtual;
+
+ for( i = 0; i < width_in_mbs * height_in_mbs; i++) {
+ int h_pos = i % width_in_mbs;
+ int v_pos = i / width_in_mbs;
+ msg = (unsigned int *) (msg_ptr + i * vme_context->vme_output.size_block);
+
+ if (is_intra_frame) {
+ gen8_mfc_vp8_pak_object_intra(ctx,
+ encoder_context,
+ msg,
+ h_pos, v_pos,
+ slice_batch);
+ } else {
+ int inter_rdo, intra_rdo;
+ inter_rdo = msg[AVC_INTER_RDO_OFFSET] & AVC_RDO_MASK;
+ intra_rdo = msg[AVC_INTRA_RDO_OFFSET] & AVC_RDO_MASK;
+
+ if (intra_rdo < inter_rdo) {
+ gen8_mfc_vp8_pak_object_intra(ctx,
+ encoder_context,
+ msg,
+ h_pos, v_pos,
+ slice_batch);
+ } else {
+ offset = i * vme_context->vme_output.size_block;
+ gen8_mfc_vp8_pak_object_inter(ctx,
+ encoder_context,
+ msg,
+ offset,
+ h_pos, v_pos,
+ slice_batch);
+ }
+ }
+ }
+
+ dri_bo_unmap(vme_context->vme_output.bo);
+}
+
+/*
+ * A batch buffer for vp8 pak object commands
+ */
+static dri_bo *
+gen8_mfc_vp8_software_batchbuffer(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ struct intel_batchbuffer *batch;
+ dri_bo *batch_bo;
+
+ batch = mfc_context->aux_batchbuffer;
+ batch_bo = batch->buffer;
+
+ gen8_mfc_vp8_pak_pipeline(ctx, encode_state, encoder_context, batch);
+
+ intel_batchbuffer_align(batch, 8);
+
+ BEGIN_BCS_BATCH(batch, 2);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_END);
+ ADVANCE_BCS_BATCH(batch);
+
+ dri_bo_reference(batch_bo);
+ intel_batchbuffer_free(batch);
+ mfc_context->aux_batchbuffer = NULL;
+
+ return batch_bo;
+}
+
+static void
+gen8_mfc_vp8_pipeline_programing(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct intel_batchbuffer *batch = encoder_context->base.batch;
+ dri_bo *slice_batch_bo;
+
+ slice_batch_bo = gen8_mfc_vp8_software_batchbuffer(ctx, encode_state, encoder_context);
+
+ // begin programing
+ intel_batchbuffer_start_atomic_bcs(batch, 0x4000);
+ intel_batchbuffer_emit_mi_flush(batch);
+
+ // picture level programing
+ gen8_mfc_vp8_pipeline_picture_programing(ctx, encode_state, encoder_context);
+
+ BEGIN_BCS_BATCH(batch, 4);
+ OUT_BCS_BATCH(batch, MI_BATCH_BUFFER_START | (1 << 8) | (1 << 0));
+ OUT_BCS_RELOC(batch,
+ slice_batch_bo,
+ I915_GEM_DOMAIN_COMMAND, 0,
+ 0);
+ OUT_BCS_BATCH(batch, 0);
+ OUT_BCS_BATCH(batch, 0);
+ ADVANCE_BCS_BATCH(batch);
+
+ // end programing
+ intel_batchbuffer_end_atomic(batch);
+
+ dri_bo_unreference(slice_batch_bo);
+}
+
+static int gen8_mfc_calc_vp8_coded_buffer_size(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ VAEncPictureParameterBufferVP8 *pic_param = (VAEncPictureParameterBufferVP8 *)encode_state->pic_param_ext->buffer;
+ unsigned char is_intra_frame = !pic_param->pic_flags.bits.frame_type;
+ unsigned int *vp8_encoding_status, i, first_partition_bytes, token_partition_bytes, vp8_coded_bytes;
+
+ int partition_num = 1 << pic_param->pic_flags.bits.num_token_partitions;
+
+ first_partition_bytes = token_partition_bytes = vp8_coded_bytes = 0;
+
+ dri_bo_map(mfc_context->vp8_state.token_statistics_bo, 0);
+
+ vp8_encoding_status = (unsigned int *)mfc_context->vp8_state.token_statistics_bo->virtual;
+ first_partition_bytes = (vp8_encoding_status[0] + 7) / 8;
+
+ for (i = 1; i <= partition_num; i++)
+ token_partition_bytes += (vp8_encoding_status[i] + 7) / 8;
+
+ /*coded_bytes includes P0~P8 partitions bytes + uncompresse date bytes + partion_size bytes in bitstream + 3 extra bytes */
+ /*it seems the last partition size in vp8 status buffer is smaller than reality. so add 3 extra bytes */
+ vp8_coded_bytes = first_partition_bytes + token_partition_bytes + (3 + 7 * !!is_intra_frame) + (partition_num - 1) * 3 + 3;
+
+ dri_bo_unmap(mfc_context->vp8_state.token_statistics_bo);
+
+ dri_bo_map(mfc_context->vp8_state.final_frame_bo, 0);
+ struct i965_coded_buffer_segment *coded_buffer_segment = (struct i965_coded_buffer_segment *)(mfc_context->vp8_state.final_frame_bo->virtual);
+ coded_buffer_segment->base.size = vp8_coded_bytes;
+ dri_bo_unmap(mfc_context->vp8_state.final_frame_bo);
+
+ return vp8_coded_bytes;
+}
+
+static VAStatus
+gen8_mfc_vp8_encode_picture(VADriverContextP ctx,
+ struct encode_state *encode_state,
+ struct intel_encoder_context *encoder_context)
+{
+ struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
+ unsigned int rate_control_mode = encoder_context->rate_control_mode;
+ int current_frame_bits_size;
+ int sts;
+
+ gen8_mfc_vp8_init(ctx, encode_state, encoder_context);
+ intel_mfc_vp8_prepare(ctx, encode_state, encoder_context);
+ /*Programing bcs pipeline*/
+ gen8_mfc_vp8_pipeline_programing(ctx, encode_state, encoder_context);
+ gen8_mfc_run(ctx, encode_state, encoder_context);
+ current_frame_bits_size = 8 * gen8_mfc_calc_vp8_coded_buffer_size(ctx, encode_state, encoder_context);
+
+ if (rate_control_mode == VA_RC_CBR /*|| rate_control_mode == VA_RC_VBR*/) {
+ sts = gen8_mfc_vp8_brc_postpack(encode_state, encoder_context, current_frame_bits_size);
+ if (sts == BRC_NO_HRD_VIOLATION) {
+ gen8_mfc_vp8_hrd_context_update(encode_state, mfc_context);
+ }
+ else if (sts == BRC_OVERFLOW_WITH_MIN_QP || sts == BRC_UNDERFLOW_WITH_MAX_QP) {
+ if (!mfc_context->hrd.violation_noted) {
+ fprintf(stderr, "Unrepairable %s!\n", (sts == BRC_OVERFLOW_WITH_MIN_QP)? "overflow": "underflow");
+ mfc_context->hrd.violation_noted = 1;
+ }
+ return VA_STATUS_SUCCESS;
+ }
+ }
+
+ return VA_STATUS_SUCCESS;
+}
+
+static void
+gen8_mfc_context_destroy(void *context)
+{
+ struct gen6_mfc_context *mfc_context = context;
+ int i;
+
+ dri_bo_unreference(mfc_context->post_deblocking_output.bo);
+ mfc_context->post_deblocking_output.bo = NULL;
+
+ dri_bo_unreference(mfc_context->pre_deblocking_output.bo);
+ mfc_context->pre_deblocking_output.bo = NULL;
+
+ dri_bo_unreference(mfc_context->uncompressed_picture_source.bo);
+ mfc_context->uncompressed_picture_source.bo = NULL;
+
+ dri_bo_unreference(mfc_context->mfc_indirect_pak_bse_object.bo);
+ mfc_context->mfc_indirect_pak_bse_object.bo = NULL;
+
+ for (i = 0; i < NUM_MFC_DMV_BUFFERS; i++){
+ dri_bo_unreference(mfc_context->direct_mv_buffers[i].bo);
+ mfc_context->direct_mv_buffers[i].bo = NULL;
+ }
+
+ dri_bo_unreference(mfc_context->intra_row_store_scratch_buffer.bo);
+ mfc_context->intra_row_store_scratch_buffer.bo = NULL;
+
+ dri_bo_unreference(mfc_context->macroblock_status_buffer.bo);
+ mfc_context->macroblock_status_buffer.bo = NULL;
+
+ dri_bo_unreference(mfc_context->deblocking_filter_row_store_scratch_buffer.bo);
+ mfc_context->deblocking_filter_row_store_scratch_buffer.bo = NULL;
+
+ dri_bo_unreference(mfc_context->bsd_mpc_row_store_scratch_buffer.bo);
+ mfc_context->bsd_mpc_row_store_scratch_buffer.bo = NULL;
+
+
+ for (i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++){
+ dri_bo_unreference(mfc_context->reference_surfaces[i].bo);
+ mfc_context->reference_surfaces[i].bo = NULL;
+ }
+
+ gen8_gpe_context_destroy(&mfc_context->gpe_context);
+
+ dri_bo_unreference(mfc_context->mfc_batchbuffer_surface.bo);
+ mfc_context->mfc_batchbuffer_surface.bo = NULL;
+
+ dri_bo_unreference(mfc_context->aux_batchbuffer_surface.bo);
+ mfc_context->aux_batchbuffer_surface.bo = NULL;
if (mfc_context->aux_batchbuffer)
intel_batchbuffer_free(mfc_context->aux_batchbuffer);
mfc_context->aux_batchbuffer = NULL;
+ dri_bo_unreference(mfc_context->vp8_state.coeff_probs_stream_in_bo);
+ mfc_context->vp8_state.coeff_probs_stream_in_bo = NULL;
+
+ dri_bo_unreference(mfc_context->vp8_state.final_frame_bo);
+ mfc_context->vp8_state.final_frame_bo = NULL;
+
+ dri_bo_unreference(mfc_context->vp8_state.frame_header_bo);
+ mfc_context->vp8_state.frame_header_bo = NULL;
+
+ dri_bo_unreference(mfc_context->vp8_state.intermediate_bo);
+ mfc_context->vp8_state.intermediate_bo = NULL;
+
+ dri_bo_unreference(mfc_context->vp8_state.mpc_row_store_bo);
+ mfc_context->vp8_state.mpc_row_store_bo = NULL;
+
+ dri_bo_unreference(mfc_context->vp8_state.stream_out_bo);
+ mfc_context->vp8_state.stream_out_bo = NULL;
+
+ dri_bo_unreference(mfc_context->vp8_state.token_statistics_bo);
+ mfc_context->vp8_state.token_statistics_bo = NULL;
+
free(mfc_context);
}
vaStatus = gen8_mfc_mpeg2_encode_picture(ctx, encode_state, encoder_context);
break;
+ case VAProfileJPEGBaseline:
+ jpeg_init_default_qmatrix(ctx, encoder_context);
+ vaStatus = gen8_mfc_jpeg_encode_picture(ctx, encode_state, encoder_context);
+ break;
+
+ case VAProfileVP8Version0_3:
+ vaStatus = gen8_mfc_vp8_encode_picture(ctx, encode_state, encoder_context);
+ break;
+
default:
vaStatus = VA_STATUS_ERROR_UNSUPPORTED_PROFILE;
break;
Bool gen8_mfc_context_init(VADriverContextP ctx, struct intel_encoder_context *encoder_context)
{
+ struct i965_driver_data *i965 = i965_driver_data(ctx);
struct gen6_mfc_context *mfc_context = calloc(1, sizeof(struct gen6_mfc_context));
+ assert(mfc_context);
mfc_context->gpe_context.surface_state_binding_table.length = (SURFACE_STATE_PADDED_SIZE + sizeof(unsigned int)) * MAX_MEDIA_SURFACES_GEN6;
- mfc_context->gpe_context.idrt.max_entries = MAX_GPE_KERNELS;
- mfc_context->gpe_context.idrt.entry_size = sizeof(struct gen6_interface_descriptor_data);
-
- mfc_context->gpe_context.curbe.length = 32 * 4;
+ mfc_context->gpe_context.idrt_size = sizeof(struct gen8_interface_descriptor_data) * MAX_INTERFACE_DESC_GEN6;
+ mfc_context->gpe_context.curbe_size = 32 * 4;
+ mfc_context->gpe_context.sampler_size = 0;
mfc_context->gpe_context.vfe_state.max_num_threads = 60 - 1;
mfc_context->gpe_context.vfe_state.num_urb_entries = 16;
mfc_context->gpe_context.vfe_state.urb_entry_size = 59 - 1;
mfc_context->gpe_context.vfe_state.curbe_allocation_size = 37 - 1;
- i965_gpe_load_kernels(ctx,
+ if (IS_GEN9(i965->intel.device_info)) {
+ gen8_gpe_load_kernels(ctx,
+ &mfc_context->gpe_context,
+ gen9_mfc_kernels,
+ 1);
+ } else {
+ gen8_gpe_load_kernels(ctx,
&mfc_context->gpe_context,
gen8_mfc_kernels,
- NUM_MFC_KERNEL);
+ 1);
+ }
mfc_context->pipe_mode_select = gen8_mfc_pipe_mode_select;
mfc_context->set_surface_state = gen8_mfc_surface_state;
encoder_context->mfc_context = mfc_context;
encoder_context->mfc_context_destroy = gen8_mfc_context_destroy;
encoder_context->mfc_pipeline = gen8_mfc_pipeline;
- encoder_context->mfc_brc_prepare = intel_mfc_brc_prepare;
+
+ if (encoder_context->codec == CODEC_VP8)
+ encoder_context->mfc_brc_prepare = gen8_mfc_vp8_brc_prepare;
+ else
+ encoder_context->mfc_brc_prepare = intel_mfc_brc_prepare;
return True;
}