};
static const struct huc_brc_update_constant_data
-gen9_brc_update_constant_data = {
+ gen9_brc_update_constant_data = {
.global_rate_qp_adj_tab_i = {
48, 40, 32, 24, 16, 8, 0, -8,
40, 32, 24, 16, 8, 0, -8, -16,
8, 0, 0, -24, -32, -32, -32, -48,
0, -16, -16, -24, -32, -48, -56, -64,
-8, -16, -32, -32, -48, -48, -56, -64,
- -16,-32, -48, -48, -48, -56, -64, -80,
+ -16, -32, -48, -48, -48, -56, -64, -80,
},
.global_rate_qp_adj_tab_b = {
}
ret = (unsigned char)((d << 4) + (int)((v + (d == 0 ? 0 : (1 << (d - 1)))) >> d));
- ret = (ret & 0xf) == 0 ? (ret | 8) : ret;
+ ret = (ret & 0xf) == 0 ? (ret | 8) : ret;
return ret;
}
if (slice_param->num_ref_idx_active_override_flag)
vdenc_context->num_refs[0] = slice_param->num_ref_idx_l0_active_minus1 + 1;
+ for (i = 0; i < ARRAY_ELEMS(vdenc_context->list_ref_idx[0]); i++) {
+ vdenc_context->list_ref_idx[0][i] = 0xFF;
+ }
+
if (vdenc_context->num_refs[0] > ARRAY_ELEMS(vdenc_context->list_ref_idx[0]))
return VA_STATUS_ERROR_INVALID_VALUE;
VAPictureH264 *va_pic;
assert(ARRAY_ELEMS(slice_param->RefPicList0) == ARRAY_ELEMS(vdenc_context->list_ref_idx[0]));
- vdenc_context->list_ref_idx[0][i] = 0;
if (i >= vdenc_context->num_refs[0])
continue;
input_bits_per_frame = ((double)vdenc_context->max_bit_rate * 1000.0 * vdenc_context->framerate.den) / vdenc_context->framerate.num;
bps_ratio = input_bits_per_frame /
- ((double)vdenc_context->vbv_buffer_size_in_bit * vdenc_context->framerate.den / vdenc_context->framerate.num);
+ ((double)vdenc_context->vbv_buffer_size_in_bit * vdenc_context->framerate.den / vdenc_context->framerate.num);
if (bps_ratio < 0.1)
bps_ratio = 0.1;
struct huc_brc_update_constant_data *brc_buffer;
brc_buffer = (struct huc_brc_update_constant_data *)
- i965_map_gpe_resource(&vdenc_context->brc_constant_data_res);
+ i965_map_gpe_resource(&vdenc_context->brc_constant_data_res);
if (!brc_buffer)
return;
(1 << 1) | /* must be tiled */
(I965_TILEWALK_YMAJOR << 0)); /* tile walk, TILEWALK_YMAJOR */
OUT_BCS_BATCH(batch,
- (0 << 16) | /* must be 0 for interleave U/V */
+ (0 << 16) | /* must be 0 for interleave U/V */
(gpe_resource->y_cb_offset)); /* y offset for U(cb) */
OUT_BCS_BATCH(batch,
- (0 << 16) | /* must be 0 for interleave U/V */
+ (0 << 16) | /* must be 0 for interleave U/V */
(gpe_resource->y_cb_offset)); /* y offset for U(cb) */
ADVANCE_BCS_BATCH(batch);
(1 << 1) | /* must be tiled */
(I965_TILEWALK_YMAJOR << 0)); /* tile walk, TILEWALK_YMAJOR */
OUT_BCS_BATCH(batch,
- (0 << 16) | /* must be 0 for interleave U/V */
+ (0 << 16) | /* must be 0 for interleave U/V */
(gpe_resource->y_cb_offset)); /* y offset for U(cb) */
OUT_BCS_BATCH(batch,
- (0 << 16) | /* must be 0 for interleave U/V */
+ (0 << 16) | /* must be 0 for interleave U/V */
(gpe_resource->y_cb_offset)); /* y offset for v(cr) */
ADVANCE_BCS_BATCH(batch);
OUT_BCS_BATCH(batch, VDENC_PIPE_BUF_ADDR_STATE | (37 - 2));
/* DW1-6 for DS FWD REF0/REF1 */
- OUT_BUFFER_3DW(batch, vdenc_context->list_scaled_4x_reference_res[vdenc_context->list_ref_idx[0][0]].bo, 0, 0, 0);
- OUT_BUFFER_3DW(batch, vdenc_context->list_scaled_4x_reference_res[vdenc_context->list_ref_idx[0][1]].bo, 0, 0, 0);
+
+ if (vdenc_context->list_ref_idx[0][0] != 0xFF)
+ OUT_BUFFER_3DW(batch, vdenc_context->list_scaled_4x_reference_res[vdenc_context->list_ref_idx[0][0]].bo, 0, 0, 0);
+ else
+ OUT_BUFFER_3DW(batch, NULL, 0, 0, 0);
+
+ if (vdenc_context->list_ref_idx[0][1] != 0xFF)
+ OUT_BUFFER_3DW(batch, vdenc_context->list_scaled_4x_reference_res[vdenc_context->list_ref_idx[0][1]].bo, 0, 0, 0);
+ else
+ OUT_BUFFER_3DW(batch, NULL, 0, 0, 0);
/* DW7-9 for DS BWD REF0, ignored on SKL */
OUT_BUFFER_3DW(batch, NULL, 0, 0, 0);
OUT_BUFFER_3DW(batch, NULL, 0, 0, 0);
/* DW22-DW27 for FWD REF0/REF1 */
- OUT_BUFFER_3DW(batch, vdenc_context->list_reference_res[vdenc_context->list_ref_idx[0][0]].bo, 0, 0, 0);
- OUT_BUFFER_3DW(batch, vdenc_context->list_reference_res[vdenc_context->list_ref_idx[0][1]].bo, 0, 0, 0);
+
+ if (vdenc_context->list_ref_idx[0][0] != 0xFF)
+ OUT_BUFFER_3DW(batch, vdenc_context->list_scaled_4x_reference_res[vdenc_context->list_ref_idx[0][0]].bo, 0, 0, 0);
+ else
+ OUT_BUFFER_3DW(batch, NULL, 0, 0, 0);
+
+ if (vdenc_context->list_ref_idx[0][1] != 0xFF)
+ OUT_BUFFER_3DW(batch, vdenc_context->list_scaled_4x_reference_res[vdenc_context->list_ref_idx[0][1]].bo, 0, 0, 0);
+ else
+ OUT_BUFFER_3DW(batch, NULL, 0, 0, 0);
/* DW28-DW30 for FWD REF2, ignored on SKL */
OUT_BUFFER_3DW(batch, NULL, 0, 0, 0);
struct intel_batchbuffer *batch = encoder_context->base.batch;
if (data_bits_in_last_dw == 0)
- data_bits_in_last_dw = 32;
+ data_bits_in_last_dw = 32;
BEGIN_BCS_BATCH(batch, lenght_in_dws + 2);
/* For the Normal H264 */
if (slice_index &&
- IS_KBL(i965->intel.device_info)) {
+ (IS_KBL(i965->intel.device_info) ||
+ IS_GLK(i965->intel.device_info))) {
saved_macroblock_address = slice_params->macroblock_address;
slice_params->macroblock_address = 0;
}
slice_header1 = slice_header;
if (slice_index &&
- IS_KBL(i965->intel.device_info)) {
+ (IS_KBL(i965->intel.device_info) ||
+ IS_GLK(i965->intel.device_info))) {
slice_params->macroblock_address = saved_macroblock_address;
}
unsigned char *slice_header1 = NULL;
if (slice_index &&
- IS_KBL(i965->intel.device_info)) {
+ (IS_KBL(i965->intel.device_info) ||
+ IS_GLK(i965->intel.device_info))) {
slice_header_index = (encode_state->slice_header_index[0] & SLICE_PACKED_DATA_INDEX_MASK);
}
BEGIN_BCS_BATCH(batch, 11);
- OUT_BCS_BATCH(batch, MFX_AVC_SLICE_STATE | (11 - 2) );
+ OUT_BCS_BATCH(batch, MFX_AVC_SLICE_STATE | (11 - 2));
OUT_BCS_BATCH(batch, slice_type);
OUT_BCS_BATCH(batch,
(num_ref_l0 << 16) |
OUT_BCS_BATCH(batch,
(0 << 31) | /* TODO: ignore it for VDENC ??? */
(!slice_param->macroblock_address << 30) | /* ResetRateControlCounter */
- (2 << 28) | /* Loose Rate Control */
+ (2 << 28) | /* Loose Rate Control */
(0 << 24) | /* RC Stable Tolerance */
(0 << 23) | /* RC Panic Enable */
(1 << 22) | /* CBP mode */
(0 << 21) | /* MB Type Direct Conversion, 0: Enable, 1: Disable */
(0 << 20) | /* MB Type Skip Conversion, 0: Enable, 1: Disable */
(!next_slice_param << 19) | /* Is Last Slice */
- (0 << 18) | /* BitstreamOutputFlag Compressed BitStream Output Disable Flag 0:enable 1:disable */
- (1 << 17) | /* HeaderPresentFlag */
- (1 << 16) | /* SliceData PresentFlag */
- (0 << 15) | /* TailPresentFlag, TODO: check it on VDEnc */
- (1 << 13) | /* RBSP NAL TYPE */
+ (0 << 18) | /* BitstreamOutputFlag Compressed BitStream Output Disable Flag 0:enable 1:disable */
+ (1 << 17) | /* HeaderPresentFlag */
+ (1 << 16) | /* SliceData PresentFlag */
+ (0 << 15) | /* TailPresentFlag, TODO: check it on VDEnc */
+ (1 << 13) | /* RBSP NAL TYPE */
(slice_index << 4) |
(1 << 12)); /* CabacZeroWordInsertionEnable */
fwd_ref_entry = 0x80808080;
slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
- for (i = 0; i < MAX(vdenc_context->num_refs[0], 3); i++) {
+ for (i = 0; i < MIN(vdenc_context->num_refs[0], 3); i++) {
ref_pic = &slice_param->RefPicList0[i];
ref_idx_shift = i * 8;
+ if (vdenc_context->list_ref_idx[0][i] == 0xFF)
+ continue;
+
fwd_ref_entry &= ~(0xFF << ref_idx_shift);
fwd_ref_entry += (gen9_vdenc_mfx_get_ref_idx_state(ref_pic, vdenc_context->list_ref_idx[0][i]) << ref_idx_shift);
}
{
struct i965_driver_data *i965 = i965_driver_data(ctx);
- if (IS_KBL(i965->intel.device_info)) {
+ if (IS_KBL(i965->intel.device_info) ||
+ IS_GLK(i965->intel.device_info)) {
gen95_vdenc_hw_interfaces_init(ctx, encoder_context, vdenc_context);
} else {
gen9_vdenc_hw_interfaces_init(ctx, encoder_context, vdenc_context);