2 * Copyright © 2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sub license, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
19 * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 * Xiang Haihao <haihao.xiang@intel.com>
26 * Zhao Yakui <yakui.zhao@intel.com>
36 #include "intel_batchbuffer.h"
37 #include "i965_defines.h"
38 #include "i965_structs.h"
39 #include "i965_drv_video.h"
40 #include "i965_encoder.h"
41 #include "i965_encoder_utils.h"
45 #include "intel_media.h"
48 #define log2f(x) (logf(x)/(float)M_LN2)
51 int intel_avc_enc_slice_type_fixup(int slice_type)
53 if (slice_type == SLICE_TYPE_SP ||
54 slice_type == SLICE_TYPE_P)
55 slice_type = SLICE_TYPE_P;
56 else if (slice_type == SLICE_TYPE_SI ||
57 slice_type == SLICE_TYPE_I)
58 slice_type = SLICE_TYPE_I;
60 if (slice_type != SLICE_TYPE_B)
61 WARN_ONCE("Invalid slice type for H.264 encoding!\n");
63 slice_type = SLICE_TYPE_B;
70 intel_mfc_bit_rate_control_context_init(struct encode_state *encode_state,
71 struct intel_encoder_context *encoder_context)
73 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
76 for(i = 0 ; i < 3; i++) {
77 mfc_context->bit_rate_control_context[i].MaxQpNegModifier = 6;
78 mfc_context->bit_rate_control_context[i].MaxQpPosModifier = 6;
79 mfc_context->bit_rate_control_context[i].GrowInit = 6;
80 mfc_context->bit_rate_control_context[i].GrowResistance = 4;
81 mfc_context->bit_rate_control_context[i].ShrinkInit = 6;
82 mfc_context->bit_rate_control_context[i].ShrinkResistance = 4;
84 mfc_context->bit_rate_control_context[i].Correct[0] = 8;
85 mfc_context->bit_rate_control_context[i].Correct[1] = 4;
86 mfc_context->bit_rate_control_context[i].Correct[2] = 2;
87 mfc_context->bit_rate_control_context[i].Correct[3] = 2;
88 mfc_context->bit_rate_control_context[i].Correct[4] = 4;
89 mfc_context->bit_rate_control_context[i].Correct[5] = 8;
93 static void intel_mfc_brc_init(struct encode_state *encode_state,
94 struct intel_encoder_context* encoder_context)
96 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
97 double bitrate, framerate;
98 double frame_per_bits = 8 * 3 * encoder_context->frame_width_in_pixel * encoder_context->frame_height_in_pixel / 2;
99 double qp1_size = 0.1 * frame_per_bits;
100 double qp51_size = 0.001 * frame_per_bits;
101 int min_qp = MAX(1, encoder_context->brc.min_qp);
102 double bpf, factor, hrd_factor;
103 int inum = encoder_context->brc.num_iframes_in_gop,
104 pnum = encoder_context->brc.num_pframes_in_gop,
105 bnum = encoder_context->brc.num_bframes_in_gop; /* Gop structure: number of I, P, B frames in the Gop. */
106 int intra_period = encoder_context->brc.gop_size;
109 if (encoder_context->layer.num_layers > 1)
110 qp1_size = 0.15 * frame_per_bits;
112 mfc_context->brc.mode = encoder_context->rate_control_mode;
114 mfc_context->hrd.violation_noted = 0;
116 for (i = 0; i < encoder_context->layer.num_layers; i++) {
117 mfc_context->brc.qp_prime_y[i][SLICE_TYPE_I] = 26;
118 mfc_context->brc.qp_prime_y[i][SLICE_TYPE_P] = 26;
119 mfc_context->brc.qp_prime_y[i][SLICE_TYPE_B] = 26;
122 bitrate = encoder_context->brc.bits_per_second[0];
123 framerate = (double)encoder_context->brc.framerate[0].num / (double)encoder_context->brc.framerate[0].den;
125 bitrate = (encoder_context->brc.bits_per_second[i] - encoder_context->brc.bits_per_second[i - 1]);
126 framerate = ((double)encoder_context->brc.framerate[i].num / (double)encoder_context->brc.framerate[i].den) -
127 ((double)encoder_context->brc.framerate[i - 1].num / (double)encoder_context->brc.framerate[i - 1].den);
130 if (mfc_context->brc.mode == VA_RC_VBR && encoder_context->brc.target_percentage[i])
131 bitrate = bitrate * encoder_context->brc.target_percentage[i] / 100;
133 if (i == encoder_context->layer.num_layers - 1)
136 factor = ((double)encoder_context->brc.framerate[i].num / (double)encoder_context->brc.framerate[i].den) /
137 ((double)encoder_context->brc.framerate[i - 1].num / (double)encoder_context->brc.framerate[i - 1].den);
140 hrd_factor = (double)bitrate / encoder_context->brc.bits_per_second[encoder_context->layer.num_layers - 1];
142 mfc_context->hrd.buffer_size[i] = (unsigned int)(encoder_context->brc.hrd_buffer_size * hrd_factor);
143 mfc_context->hrd.current_buffer_fullness[i] =
144 (double)(encoder_context->brc.hrd_initial_buffer_fullness < encoder_context->brc.hrd_buffer_size) ?
145 encoder_context->brc.hrd_initial_buffer_fullness : encoder_context->brc.hrd_buffer_size / 2.;
146 mfc_context->hrd.current_buffer_fullness[i] *= hrd_factor;
147 mfc_context->hrd.target_buffer_fullness[i] = (double)encoder_context->brc.hrd_buffer_size * hrd_factor / 2.;
148 mfc_context->hrd.buffer_capacity[i] = (double)encoder_context->brc.hrd_buffer_size * hrd_factor / qp1_size;
150 if (encoder_context->layer.num_layers > 1) {
152 intra_period = (int)(encoder_context->brc.gop_size * factor);
154 pnum = (int)(encoder_context->brc.num_pframes_in_gop * factor);
155 bnum = intra_period - inum - pnum;
157 intra_period = (int)(encoder_context->brc.gop_size * factor) - intra_period;
159 pnum = (int)(encoder_context->brc.num_pframes_in_gop * factor) - pnum;
160 bnum = intra_period - inum - pnum;
164 mfc_context->brc.gop_nums[i][SLICE_TYPE_I] = inum;
165 mfc_context->brc.gop_nums[i][SLICE_TYPE_P] = pnum;
166 mfc_context->brc.gop_nums[i][SLICE_TYPE_B] = bnum;
168 mfc_context->brc.target_frame_size[i][SLICE_TYPE_I] = (int)((double)((bitrate * intra_period)/framerate) /
169 (double)(inum + BRC_PWEIGHT * pnum + BRC_BWEIGHT * bnum));
170 mfc_context->brc.target_frame_size[i][SLICE_TYPE_P] = BRC_PWEIGHT * mfc_context->brc.target_frame_size[i][SLICE_TYPE_I];
171 mfc_context->brc.target_frame_size[i][SLICE_TYPE_B] = BRC_BWEIGHT * mfc_context->brc.target_frame_size[i][SLICE_TYPE_I];
173 bpf = mfc_context->brc.bits_per_frame[i] = bitrate/framerate;
175 if (encoder_context->brc.initial_qp) {
176 mfc_context->brc.qp_prime_y[i][SLICE_TYPE_I] = encoder_context->brc.initial_qp;
177 mfc_context->brc.qp_prime_y[i][SLICE_TYPE_P] = encoder_context->brc.initial_qp;
178 mfc_context->brc.qp_prime_y[i][SLICE_TYPE_B] = encoder_context->brc.initial_qp;
180 if ((bpf > qp51_size) && (bpf < qp1_size)) {
181 mfc_context->brc.qp_prime_y[i][SLICE_TYPE_P] = 51 - 50*(bpf - qp51_size)/(qp1_size - qp51_size);
183 else if (bpf >= qp1_size)
184 mfc_context->brc.qp_prime_y[i][SLICE_TYPE_P] = 1;
185 else if (bpf <= qp51_size)
186 mfc_context->brc.qp_prime_y[i][SLICE_TYPE_P] = 51;
188 mfc_context->brc.qp_prime_y[i][SLICE_TYPE_I] = mfc_context->brc.qp_prime_y[i][SLICE_TYPE_P];
189 mfc_context->brc.qp_prime_y[i][SLICE_TYPE_B] = mfc_context->brc.qp_prime_y[i][SLICE_TYPE_I];
192 BRC_CLIP(mfc_context->brc.qp_prime_y[i][SLICE_TYPE_I], min_qp, 51);
193 BRC_CLIP(mfc_context->brc.qp_prime_y[i][SLICE_TYPE_P], min_qp, 51);
194 BRC_CLIP(mfc_context->brc.qp_prime_y[i][SLICE_TYPE_B], min_qp, 51);
198 int intel_mfc_update_hrd(struct encode_state *encode_state,
199 struct intel_encoder_context *encoder_context,
202 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
203 int layer_id = encoder_context->layer.curr_frame_layer_id;
204 double prev_bf = mfc_context->hrd.current_buffer_fullness[layer_id];
206 mfc_context->hrd.current_buffer_fullness[layer_id] -= frame_bits;
208 if (mfc_context->hrd.buffer_size[layer_id] > 0 && mfc_context->hrd.current_buffer_fullness[layer_id] <= 0.) {
209 mfc_context->hrd.current_buffer_fullness[layer_id] = prev_bf;
210 return BRC_UNDERFLOW;
213 mfc_context->hrd.current_buffer_fullness[layer_id] += mfc_context->brc.bits_per_frame[layer_id];
214 if (mfc_context->hrd.buffer_size[layer_id] > 0 && mfc_context->hrd.current_buffer_fullness[layer_id] > mfc_context->hrd.buffer_size[layer_id]) {
215 if (mfc_context->brc.mode == VA_RC_VBR)
216 mfc_context->hrd.current_buffer_fullness[layer_id] = mfc_context->hrd.buffer_size[layer_id];
218 mfc_context->hrd.current_buffer_fullness[layer_id] = prev_bf;
222 return BRC_NO_HRD_VIOLATION;
225 static int intel_mfc_brc_postpack_cbr(struct encode_state *encode_state,
226 struct intel_encoder_context *encoder_context,
229 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
230 gen6_brc_status sts = BRC_NO_HRD_VIOLATION;
231 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
232 int slicetype = intel_avc_enc_slice_type_fixup(pSliceParameter->slice_type);
233 int curr_frame_layer_id, next_frame_layer_id;
235 int qp; // quantizer of previously encoded slice of current type
236 int qpn; // predicted quantizer for next frame of current type in integer format
237 double qpf; // predicted quantizer for next frame of current type in float format
238 double delta_qp; // QP correction
239 int min_qp = MAX(1, encoder_context->brc.min_qp);
240 int target_frame_size, frame_size_next;
242 * x - how far we are from HRD buffer borders
243 * y - how far we are from target HRD buffer fullness
246 double frame_size_alpha;
248 if (encoder_context->layer.num_layers < 2 || encoder_context->layer.size_frame_layer_ids == 0) {
249 curr_frame_layer_id = 0;
250 next_frame_layer_id = 0;
252 curr_frame_layer_id = encoder_context->layer.curr_frame_layer_id;
253 next_frame_layer_id = encoder_context->layer.frame_layer_ids[encoder_context->num_frames_in_sequence % encoder_context->layer.size_frame_layer_ids];
256 /* checking wthether HRD compliance first */
257 sts = intel_mfc_update_hrd(encode_state, encoder_context, frame_bits);
259 if (sts == BRC_NO_HRD_VIOLATION) { // no HRD violation
262 next_frame_layer_id = curr_frame_layer_id;
265 mfc_context->brc.bits_prev_frame[curr_frame_layer_id] = frame_bits;
266 frame_bits = mfc_context->brc.bits_prev_frame[next_frame_layer_id];
268 mfc_context->brc.prev_slice_type[curr_frame_layer_id] = slicetype;
269 slicetype = mfc_context->brc.prev_slice_type[next_frame_layer_id];
271 /* 0 means the next frame is the first frame of next layer */
275 qpi = mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_I];
276 qpp = mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_P];
277 qpb = mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_B];
279 qp = mfc_context->brc.qp_prime_y[next_frame_layer_id][slicetype];
281 target_frame_size = mfc_context->brc.target_frame_size[next_frame_layer_id][slicetype];
282 if (mfc_context->hrd.buffer_capacity[next_frame_layer_id] < 5)
283 frame_size_alpha = 0;
285 frame_size_alpha = (double)mfc_context->brc.gop_nums[next_frame_layer_id][slicetype];
286 if (frame_size_alpha > 30) frame_size_alpha = 30;
287 frame_size_next = target_frame_size + (double)(target_frame_size - frame_bits) /
288 (double)(frame_size_alpha + 1.);
290 /* frame_size_next: avoiding negative number and too small value */
291 if ((double)frame_size_next < (double)(target_frame_size * 0.25))
292 frame_size_next = (int)((double)target_frame_size * 0.25);
294 qpf = (double)qp * target_frame_size / frame_size_next;
295 qpn = (int)(qpf + 0.5);
298 /* setting qpn we round qpf making mistakes: now we are trying to compensate this */
299 mfc_context->brc.qpf_rounding_accumulator[next_frame_layer_id] += qpf - qpn;
300 if (mfc_context->brc.qpf_rounding_accumulator[next_frame_layer_id] > 1.0) {
302 mfc_context->brc.qpf_rounding_accumulator[next_frame_layer_id] = 0.;
303 } else if (mfc_context->brc.qpf_rounding_accumulator[next_frame_layer_id] < -1.0) {
305 mfc_context->brc.qpf_rounding_accumulator[next_frame_layer_id] = 0.;
308 /* making sure that QP is not changing too fast */
309 if ((qpn - qp) > BRC_QP_MAX_CHANGE) qpn = qp + BRC_QP_MAX_CHANGE;
310 else if ((qpn - qp) < -BRC_QP_MAX_CHANGE) qpn = qp - BRC_QP_MAX_CHANGE;
311 /* making sure that with QP predictions we did do not leave QPs range */
312 BRC_CLIP(qpn, 1, 51);
314 /* calculating QP delta as some function*/
315 x = mfc_context->hrd.target_buffer_fullness[next_frame_layer_id] - mfc_context->hrd.current_buffer_fullness[next_frame_layer_id];
317 x /= mfc_context->hrd.target_buffer_fullness[next_frame_layer_id];
318 y = mfc_context->hrd.current_buffer_fullness[next_frame_layer_id];
321 x /= (mfc_context->hrd.buffer_size[next_frame_layer_id] - mfc_context->hrd.target_buffer_fullness[next_frame_layer_id]);
322 y = mfc_context->hrd.buffer_size[next_frame_layer_id] - mfc_context->hrd.current_buffer_fullness[next_frame_layer_id];
324 if (y < 0.01) y = 0.01;
326 else if (x < -1) x = -1;
328 delta_qp = BRC_QP_MAX_CHANGE*exp(-1/y)*sin(BRC_PI_0_5 * x);
329 qpn = (int)(qpn + delta_qp + 0.5);
331 /* making sure that with QP predictions we did do not leave QPs range */
332 BRC_CLIP(qpn, min_qp, 51);
334 if (sts == BRC_NO_HRD_VIOLATION) { // no HRD violation
335 /* correcting QPs of slices of other types */
336 if (slicetype == SLICE_TYPE_P) {
337 if (abs(qpn + BRC_P_B_QP_DIFF - qpb) > 2)
338 mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_B] += (qpn + BRC_P_B_QP_DIFF - qpb) >> 1;
339 if (abs(qpn - BRC_I_P_QP_DIFF - qpi) > 2)
340 mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_I] += (qpn - BRC_I_P_QP_DIFF - qpi) >> 1;
341 } else if (slicetype == SLICE_TYPE_I) {
342 if (abs(qpn + BRC_I_B_QP_DIFF - qpb) > 4)
343 mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_B] += (qpn + BRC_I_B_QP_DIFF - qpb) >> 2;
344 if (abs(qpn + BRC_I_P_QP_DIFF - qpp) > 2)
345 mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_P] += (qpn + BRC_I_P_QP_DIFF - qpp) >> 2;
346 } else { // SLICE_TYPE_B
347 if (abs(qpn - BRC_P_B_QP_DIFF - qpp) > 2)
348 mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_P] += (qpn - BRC_P_B_QP_DIFF - qpp) >> 1;
349 if (abs(qpn - BRC_I_B_QP_DIFF - qpi) > 4)
350 mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_I] += (qpn - BRC_I_B_QP_DIFF - qpi) >> 2;
352 BRC_CLIP(mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_I], min_qp, 51);
353 BRC_CLIP(mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_P], min_qp, 51);
354 BRC_CLIP(mfc_context->brc.qp_prime_y[next_frame_layer_id][SLICE_TYPE_B], min_qp, 51);
355 } else if (sts == BRC_UNDERFLOW) { // underflow
356 if (qpn <= qp) qpn = qp + 1;
359 sts = BRC_UNDERFLOW_WITH_MAX_QP; //underflow with maxQP
361 } else if (sts == BRC_OVERFLOW) {
362 if (qpn >= qp) qpn = qp - 1;
363 if (qpn < min_qp) { // overflow with minQP
365 sts = BRC_OVERFLOW_WITH_MIN_QP; // bit stuffing to be done
369 mfc_context->brc.qp_prime_y[next_frame_layer_id][slicetype] = qpn;
374 static int intel_mfc_brc_postpack_vbr(struct encode_state *encode_state,
375 struct intel_encoder_context *encoder_context,
378 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
380 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
381 int slice_type = intel_avc_enc_slice_type_fixup(pSliceParameter->slice_type);
382 int *qp = mfc_context->brc.qp_prime_y[0];
383 int min_qp = MAX(1, encoder_context->brc.min_qp);
384 int qp_delta, large_frame_adjustment;
386 // This implements a simple reactive VBR rate control mode for single-layer H.264. The primary
387 // aim here is to avoid the problematic behaviour that the CBR rate controller displays on
388 // scene changes, where the QP can get pushed up by a large amount in a short period and
389 // compromise the quality of following frames to a very visible degree.
390 // The main idea, then, is to try to keep the HRD buffering above the target level most of the
391 // time, so that when a large frame is generated (on a scene change or when the stream
392 // complexity increases) we have plenty of slack to be able to encode the more difficult region
393 // without compromising quality immediately on the following frames. It is optimistic about
394 // the complexity of future frames, so even after generating one or more large frames on a
395 // significant change it will try to keep the QP at its current level until the HRD buffer
396 // bounds force a change to maintain the intended rate.
398 sts = intel_mfc_update_hrd(encode_state, encoder_context, frame_bits);
400 // This adjustment is applied to increase the QP by more than we normally would if a very
401 // large frame is encountered and we are in danger of running out of slack.
402 large_frame_adjustment = rint(2.0 * log(frame_bits / mfc_context->brc.target_frame_size[0][slice_type]));
404 if (sts == BRC_UNDERFLOW) {
405 // The frame is far too big and we don't have the bits available to send it, so it will
406 // have to be re-encoded at a higher QP.
408 if (frame_bits > mfc_context->brc.target_frame_size[0][slice_type])
409 qp_delta += large_frame_adjustment;
410 } else if (sts == BRC_OVERFLOW) {
411 // The frame is very small and we are now overflowing the HRD buffer. Currently this case
412 // does not occur because we ignore overflow in VBR mode.
413 assert(0 && "Overflow in VBR mode");
414 } else if (frame_bits <= mfc_context->brc.target_frame_size[0][slice_type]) {
415 // The frame is smaller than the average size expected for this frame type.
416 if (mfc_context->hrd.current_buffer_fullness[0] >
417 (mfc_context->hrd.target_buffer_fullness[0] + mfc_context->hrd.buffer_size[0]) / 2.0) {
418 // We currently have lots of bits available, so decrease the QP slightly for the next
422 // The HRD buffer fullness is increasing, so do nothing. (We may be under the target
423 // level here, but are moving in the right direction.)
427 // The frame is larger than the average size expected for this frame type.
428 if (mfc_context->hrd.current_buffer_fullness[0] > mfc_context->hrd.target_buffer_fullness[0]) {
429 // We are currently over the target level, so do nothing.
431 } else if (mfc_context->hrd.current_buffer_fullness[0] > mfc_context->hrd.target_buffer_fullness[0] / 2.0) {
432 // We are under the target level, but not critically. Increase the QP by one step if
433 // continuing like this would underflow soon (currently within one second).
434 if (mfc_context->hrd.current_buffer_fullness[0] /
435 (double)(frame_bits - mfc_context->brc.target_frame_size[0][slice_type] + 1) <
436 ((double)encoder_context->brc.framerate[0].num / (double)encoder_context->brc.framerate[0].den))
441 // We are a long way under the target level. Always increase the QP, possibly by a
442 // larger amount dependent on how big the frame we just made actually was.
443 qp_delta = +1 + large_frame_adjustment;
447 switch (slice_type) {
449 qp[SLICE_TYPE_I] += qp_delta;
450 qp[SLICE_TYPE_P] = qp[SLICE_TYPE_I] + BRC_I_P_QP_DIFF;
451 qp[SLICE_TYPE_B] = qp[SLICE_TYPE_I] + BRC_I_B_QP_DIFF;
454 qp[SLICE_TYPE_P] += qp_delta;
455 qp[SLICE_TYPE_I] = qp[SLICE_TYPE_P] - BRC_I_P_QP_DIFF;
456 qp[SLICE_TYPE_B] = qp[SLICE_TYPE_P] + BRC_P_B_QP_DIFF;
459 qp[SLICE_TYPE_B] += qp_delta;
460 qp[SLICE_TYPE_I] = qp[SLICE_TYPE_B] - BRC_I_B_QP_DIFF;
461 qp[SLICE_TYPE_P] = qp[SLICE_TYPE_B] - BRC_P_B_QP_DIFF;
464 BRC_CLIP(mfc_context->brc.qp_prime_y[0][SLICE_TYPE_I], min_qp, 51);
465 BRC_CLIP(mfc_context->brc.qp_prime_y[0][SLICE_TYPE_P], min_qp, 51);
466 BRC_CLIP(mfc_context->brc.qp_prime_y[0][SLICE_TYPE_B], min_qp, 51);
468 if (sts == BRC_UNDERFLOW && qp[slice_type] == 51)
469 sts = BRC_UNDERFLOW_WITH_MAX_QP;
470 if (sts == BRC_OVERFLOW && qp[slice_type] == min_qp)
471 sts = BRC_OVERFLOW_WITH_MIN_QP;
476 int intel_mfc_brc_postpack(struct encode_state *encode_state,
477 struct intel_encoder_context *encoder_context,
480 switch (encoder_context->rate_control_mode) {
482 return intel_mfc_brc_postpack_cbr(encode_state, encoder_context, frame_bits);
484 return intel_mfc_brc_postpack_vbr(encode_state, encoder_context, frame_bits);
486 assert(0 && "Invalid RC mode");
489 static void intel_mfc_hrd_context_init(struct encode_state *encode_state,
490 struct intel_encoder_context *encoder_context)
492 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
493 unsigned int rate_control_mode = encoder_context->rate_control_mode;
494 int target_bit_rate = encoder_context->brc.bits_per_second[encoder_context->layer.num_layers - 1];
496 // current we only support CBR mode.
497 if (rate_control_mode == VA_RC_CBR) {
498 mfc_context->vui_hrd.i_bit_rate_value = target_bit_rate >> 10;
499 mfc_context->vui_hrd.i_initial_cpb_removal_delay = ((target_bit_rate * 8) >> 10) * 0.5 * 1024 / target_bit_rate * 90000;
500 mfc_context->vui_hrd.i_cpb_removal_delay = 2;
501 mfc_context->vui_hrd.i_frame_number = 0;
503 mfc_context->vui_hrd.i_initial_cpb_removal_delay_length = 24;
504 mfc_context->vui_hrd.i_cpb_removal_delay_length = 24;
505 mfc_context->vui_hrd.i_dpb_output_delay_length = 24;
511 intel_mfc_hrd_context_update(struct encode_state *encode_state,
512 struct gen6_mfc_context *mfc_context)
514 mfc_context->vui_hrd.i_frame_number++;
517 int intel_mfc_interlace_check(VADriverContextP ctx,
518 struct encode_state *encode_state,
519 struct intel_encoder_context *encoder_context)
521 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
522 VAEncSliceParameterBufferH264 *pSliceParameter;
525 int width_in_mbs = (mfc_context->surface_state.width + 15) / 16;
526 int height_in_mbs = (mfc_context->surface_state.height + 15) / 16;
528 for (i = 0; i < encode_state->num_slice_params_ext; i++) {
529 pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[i]->buffer;
530 mbCount += pSliceParameter->num_macroblocks;
533 if ( mbCount == ( width_in_mbs * height_in_mbs ) )
539 void intel_mfc_brc_prepare(struct encode_state *encode_state,
540 struct intel_encoder_context *encoder_context)
542 unsigned int rate_control_mode = encoder_context->rate_control_mode;
544 if (encoder_context->codec != CODEC_H264 &&
545 encoder_context->codec != CODEC_H264_MVC)
548 if (rate_control_mode != VA_RC_CQP) {
549 /*Programing bit rate control */
550 if (encoder_context->brc.need_reset) {
551 intel_mfc_bit_rate_control_context_init(encode_state, encoder_context);
552 intel_mfc_brc_init(encode_state, encoder_context);
555 /*Programing HRD control */
556 if (encoder_context->brc.need_reset)
557 intel_mfc_hrd_context_init(encode_state, encoder_context);
561 void intel_mfc_avc_pipeline_header_programing(VADriverContextP ctx,
562 struct encode_state *encode_state,
563 struct intel_encoder_context *encoder_context,
564 struct intel_batchbuffer *slice_batch)
566 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
567 int idx = va_enc_packed_type_to_idx(VAEncPackedHeaderH264_SPS);
568 unsigned int rate_control_mode = encoder_context->rate_control_mode;
569 unsigned int skip_emul_byte_cnt;
571 if (encode_state->packed_header_data[idx]) {
572 VAEncPackedHeaderParameterBuffer *param = NULL;
573 unsigned int *header_data = (unsigned int *)encode_state->packed_header_data[idx]->buffer;
574 unsigned int length_in_bits;
576 assert(encode_state->packed_header_param[idx]);
577 param = (VAEncPackedHeaderParameterBuffer *)encode_state->packed_header_param[idx]->buffer;
578 length_in_bits = param->bit_length;
580 skip_emul_byte_cnt = intel_avc_find_skipemulcnt((unsigned char *)header_data, length_in_bits);
581 mfc_context->insert_object(ctx,
584 ALIGN(length_in_bits, 32) >> 5,
585 length_in_bits & 0x1f,
589 !param->has_emulation_bytes,
593 idx = va_enc_packed_type_to_idx(VAEncPackedHeaderH264_PPS);
595 if (encode_state->packed_header_data[idx]) {
596 VAEncPackedHeaderParameterBuffer *param = NULL;
597 unsigned int *header_data = (unsigned int *)encode_state->packed_header_data[idx]->buffer;
598 unsigned int length_in_bits;
600 assert(encode_state->packed_header_param[idx]);
601 param = (VAEncPackedHeaderParameterBuffer *)encode_state->packed_header_param[idx]->buffer;
602 length_in_bits = param->bit_length;
604 skip_emul_byte_cnt = intel_avc_find_skipemulcnt((unsigned char *)header_data, length_in_bits);
606 mfc_context->insert_object(ctx,
609 ALIGN(length_in_bits, 32) >> 5,
610 length_in_bits & 0x1f,
614 !param->has_emulation_bytes,
618 idx = va_enc_packed_type_to_idx(VAEncPackedHeaderH264_SEI);
620 if (encode_state->packed_header_data[idx]) {
621 VAEncPackedHeaderParameterBuffer *param = NULL;
622 unsigned int *header_data = (unsigned int *)encode_state->packed_header_data[idx]->buffer;
623 unsigned int length_in_bits;
625 assert(encode_state->packed_header_param[idx]);
626 param = (VAEncPackedHeaderParameterBuffer *)encode_state->packed_header_param[idx]->buffer;
627 length_in_bits = param->bit_length;
629 skip_emul_byte_cnt = intel_avc_find_skipemulcnt((unsigned char *)header_data, length_in_bits);
630 mfc_context->insert_object(ctx,
633 ALIGN(length_in_bits, 32) >> 5,
634 length_in_bits & 0x1f,
638 !param->has_emulation_bytes,
640 } else if (rate_control_mode == VA_RC_CBR) {
642 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
644 unsigned char *sei_data = NULL;
646 int length_in_bits = build_avc_sei_buffer_timing(
647 mfc_context->vui_hrd.i_initial_cpb_removal_delay_length,
648 mfc_context->vui_hrd.i_initial_cpb_removal_delay,
650 mfc_context->vui_hrd.i_cpb_removal_delay_length, mfc_context->vui_hrd.i_cpb_removal_delay * mfc_context->vui_hrd.i_frame_number,
651 mfc_context->vui_hrd.i_dpb_output_delay_length,
654 mfc_context->insert_object(ctx,
656 (unsigned int *)sei_data,
657 ALIGN(length_in_bits, 32) >> 5,
658 length_in_bits & 0x1f,
668 VAStatus intel_mfc_avc_prepare(VADriverContextP ctx,
669 struct encode_state *encode_state,
670 struct intel_encoder_context *encoder_context)
672 struct i965_driver_data *i965 = i965_driver_data(ctx);
673 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
674 struct object_surface *obj_surface;
675 struct object_buffer *obj_buffer;
676 GenAvcSurface *gen6_avc_surface;
678 VAStatus vaStatus = VA_STATUS_SUCCESS;
679 int i, j, enable_avc_ildb = 0;
680 VAEncSliceParameterBufferH264 *slice_param;
681 struct i965_coded_buffer_segment *coded_buffer_segment;
682 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
683 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
684 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
686 if (IS_GEN6(i965->intel.device_info)) {
687 /* On the SNB it should be fixed to 128 for the DMV buffer */
691 for (j = 0; j < encode_state->num_slice_params_ext && enable_avc_ildb == 0; j++) {
692 assert(encode_state->slice_params_ext && encode_state->slice_params_ext[j]->buffer);
693 slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[j]->buffer;
695 for (i = 0; i < encode_state->slice_params_ext[j]->num_elements; i++) {
696 assert((slice_param->slice_type == SLICE_TYPE_I) ||
697 (slice_param->slice_type == SLICE_TYPE_SI) ||
698 (slice_param->slice_type == SLICE_TYPE_P) ||
699 (slice_param->slice_type == SLICE_TYPE_SP) ||
700 (slice_param->slice_type == SLICE_TYPE_B));
702 if (slice_param->disable_deblocking_filter_idc != 1) {
711 /*Setup all the input&output object*/
713 /* Setup current frame and current direct mv buffer*/
714 obj_surface = encode_state->reconstructed_object;
715 i965_check_alloc_surface_bo(ctx, obj_surface, 1, VA_FOURCC_NV12, SUBSAMPLE_YUV420);
717 if ( obj_surface->private_data == NULL) {
718 gen6_avc_surface = calloc(sizeof(GenAvcSurface), 1);
719 assert(gen6_avc_surface);
720 gen6_avc_surface->dmv_top =
721 dri_bo_alloc(i965->intel.bufmgr,
723 68 * width_in_mbs * height_in_mbs,
725 gen6_avc_surface->dmv_bottom =
726 dri_bo_alloc(i965->intel.bufmgr,
728 68 * width_in_mbs * height_in_mbs,
730 assert(gen6_avc_surface->dmv_top);
731 assert(gen6_avc_surface->dmv_bottom);
732 obj_surface->private_data = (void *)gen6_avc_surface;
733 obj_surface->free_private_data = (void *)gen_free_avc_surface;
735 gen6_avc_surface = (GenAvcSurface *) obj_surface->private_data;
736 mfc_context->direct_mv_buffers[NUM_MFC_DMV_BUFFERS - 2].bo = gen6_avc_surface->dmv_top;
737 mfc_context->direct_mv_buffers[NUM_MFC_DMV_BUFFERS - 1].bo = gen6_avc_surface->dmv_bottom;
738 dri_bo_reference(gen6_avc_surface->dmv_top);
739 dri_bo_reference(gen6_avc_surface->dmv_bottom);
741 if (enable_avc_ildb) {
742 mfc_context->post_deblocking_output.bo = obj_surface->bo;
743 dri_bo_reference(mfc_context->post_deblocking_output.bo);
745 mfc_context->pre_deblocking_output.bo = obj_surface->bo;
746 dri_bo_reference(mfc_context->pre_deblocking_output.bo);
749 mfc_context->surface_state.width = obj_surface->orig_width;
750 mfc_context->surface_state.height = obj_surface->orig_height;
751 mfc_context->surface_state.w_pitch = obj_surface->width;
752 mfc_context->surface_state.h_pitch = obj_surface->height;
754 /* Setup reference frames and direct mv buffers*/
755 for(i = 0; i < MAX_MFC_REFERENCE_SURFACES; i++) {
756 obj_surface = encode_state->reference_objects[i];
758 if (obj_surface && obj_surface->bo) {
759 mfc_context->reference_surfaces[i].bo = obj_surface->bo;
760 dri_bo_reference(obj_surface->bo);
762 /* Check DMV buffer */
763 if ( obj_surface->private_data == NULL) {
765 gen6_avc_surface = calloc(sizeof(GenAvcSurface), 1);
766 assert(gen6_avc_surface);
767 gen6_avc_surface->dmv_top =
768 dri_bo_alloc(i965->intel.bufmgr,
770 68 * width_in_mbs * height_in_mbs,
772 gen6_avc_surface->dmv_bottom =
773 dri_bo_alloc(i965->intel.bufmgr,
775 68 * width_in_mbs * height_in_mbs,
777 assert(gen6_avc_surface->dmv_top);
778 assert(gen6_avc_surface->dmv_bottom);
779 obj_surface->private_data = gen6_avc_surface;
780 obj_surface->free_private_data = gen_free_avc_surface;
783 gen6_avc_surface = (GenAvcSurface *) obj_surface->private_data;
784 /* Setup DMV buffer */
785 mfc_context->direct_mv_buffers[i*2].bo = gen6_avc_surface->dmv_top;
786 mfc_context->direct_mv_buffers[i*2+1].bo = gen6_avc_surface->dmv_bottom;
787 dri_bo_reference(gen6_avc_surface->dmv_top);
788 dri_bo_reference(gen6_avc_surface->dmv_bottom);
794 mfc_context->uncompressed_picture_source.bo = encode_state->input_yuv_object->bo;
795 dri_bo_reference(mfc_context->uncompressed_picture_source.bo);
797 obj_buffer = encode_state->coded_buf_object;
798 bo = obj_buffer->buffer_store->bo;
799 mfc_context->mfc_indirect_pak_bse_object.bo = bo;
800 mfc_context->mfc_indirect_pak_bse_object.offset = I965_CODEDBUFFER_HEADER_SIZE;
801 mfc_context->mfc_indirect_pak_bse_object.end_offset = ALIGN(obj_buffer->size_element - 0x1000, 0x1000);
802 dri_bo_reference(mfc_context->mfc_indirect_pak_bse_object.bo);
805 coded_buffer_segment = (struct i965_coded_buffer_segment *)bo->virtual;
806 coded_buffer_segment->mapped = 0;
807 coded_buffer_segment->codec = encoder_context->codec;
813 * The LUT uses the pair of 4-bit units: (shift, base) structure.
815 * So it is necessary to convert one cost into the nearest LUT format.
817 * 2^K *x = 2^n * (1 + deltaX)
818 * k + log2(x) = n + log2(1 + deltaX)
819 * log2(x) = n - k + log2(1 + deltaX)
820 * As X is in the range of [1, 15]
821 * 4 > n - k + log2(1 + deltaX) >= 0
822 * => n + log2(1 + deltaX) >= k > n - 4 + log2(1 + deltaX)
823 * Then we can derive the corresponding K and get the nearest LUT format.
825 int intel_format_lutvalue(int value, int max)
828 int logvalue, temp1, temp2;
833 logvalue = (int)(log2f((float)value));
837 int error, temp_value, base, j, temp_err;
839 j = logvalue - 4 + 1;
841 for(; j <= logvalue; j++) {
845 base = (value + (1 << (j - 1)) - 1) >> j;
850 temp_value = base << j;
851 temp_err = abs(value - temp_value);
852 if (temp_err < error) {
854 ret = (j << 4) | base;
860 temp1 = (ret & 0xf) << ((ret & 0xf0) >> 4);
861 temp2 = (max & 0xf) << ((max & 0xf0) >> 4);
870 #define VP8_QP_MAX 128
873 static float intel_lambda_qp(int qp)
875 float value, lambdaf;
877 value = value / 6 - 2;
880 lambdaf = roundf(powf(2, value));
885 void intel_h264_calc_mbmvcost_qp(int qp,
887 uint8_t *vme_state_message)
889 int m_cost, j, mv_count;
890 float lambda, m_costf;
892 assert(qp <= QP_MAX);
893 lambda = intel_lambda_qp(qp);
896 vme_state_message[MODE_CHROMA_INTRA] = 0;
897 vme_state_message[MODE_REFID_COST] = intel_format_lutvalue(m_cost, 0x8f);
899 if (slice_type == SLICE_TYPE_I) {
900 vme_state_message[MODE_INTRA_16X16] = 0;
902 vme_state_message[MODE_INTRA_8X8] = intel_format_lutvalue(m_cost, 0x8f);
903 m_cost = lambda * 16;
904 vme_state_message[MODE_INTRA_4X4] = intel_format_lutvalue(m_cost, 0x8f);
906 vme_state_message[MODE_INTRA_NONPRED] = intel_format_lutvalue(m_cost, 0x6f);
909 vme_state_message[MODE_INTER_MV0] = intel_format_lutvalue(m_cost, 0x6f);
910 for (j = 1; j < 3; j++) {
911 m_costf = (log2f((float)(j + 1)) + 1.718f) * lambda;
912 m_cost = (int)m_costf;
913 vme_state_message[MODE_INTER_MV0 + j] = intel_format_lutvalue(m_cost, 0x6f);
916 for (j = 4; j <= 64; j *= 2) {
917 m_costf = (log2f((float)(j + 1)) + 1.718f) * lambda;
918 m_cost = (int)m_costf;
919 vme_state_message[MODE_INTER_MV0 + mv_count] = intel_format_lutvalue(m_cost, 0x6f);
924 vme_state_message[MODE_INTRA_16X16] = 0x4a;
925 vme_state_message[MODE_INTRA_8X8] = 0x4a;
926 vme_state_message[MODE_INTRA_4X4] = 0x4a;
927 vme_state_message[MODE_INTRA_NONPRED] = 0x4a;
928 vme_state_message[MODE_INTER_16X16] = 0x4a;
929 vme_state_message[MODE_INTER_16X8] = 0x4a;
930 vme_state_message[MODE_INTER_8X8] = 0x4a;
931 vme_state_message[MODE_INTER_8X4] = 0x4a;
932 vme_state_message[MODE_INTER_4X4] = 0x4a;
933 vme_state_message[MODE_INTER_BWD] = 0x2a;
936 m_costf = lambda * 10;
937 vme_state_message[MODE_INTRA_16X16] = intel_format_lutvalue(m_cost, 0x8f);
938 m_cost = lambda * 14;
939 vme_state_message[MODE_INTRA_8X8] = intel_format_lutvalue(m_cost, 0x8f);
940 m_cost = lambda * 24;
941 vme_state_message[MODE_INTRA_4X4] = intel_format_lutvalue(m_cost, 0x8f);
942 m_costf = lambda * 3.5;
944 vme_state_message[MODE_INTRA_NONPRED] = intel_format_lutvalue(m_cost, 0x6f);
945 if (slice_type == SLICE_TYPE_P) {
946 m_costf = lambda * 2.5;
948 vme_state_message[MODE_INTER_16X16] = intel_format_lutvalue(m_cost, 0x8f);
949 m_costf = lambda * 4;
951 vme_state_message[MODE_INTER_16X8] = intel_format_lutvalue(m_cost, 0x8f);
952 m_costf = lambda * 1.5;
954 vme_state_message[MODE_INTER_8X8] = intel_format_lutvalue(m_cost, 0x6f);
955 m_costf = lambda * 3;
957 vme_state_message[MODE_INTER_8X4] = intel_format_lutvalue(m_cost, 0x6f);
958 m_costf = lambda * 5;
960 vme_state_message[MODE_INTER_4X4] = intel_format_lutvalue(m_cost, 0x6f);
961 /* BWD is not used in P-frame */
962 vme_state_message[MODE_INTER_BWD] = 0;
964 m_costf = lambda * 2.5;
966 vme_state_message[MODE_INTER_16X16] = intel_format_lutvalue(m_cost, 0x8f);
967 m_costf = lambda * 5.5;
969 vme_state_message[MODE_INTER_16X8] = intel_format_lutvalue(m_cost, 0x8f);
970 m_costf = lambda * 3.5;
972 vme_state_message[MODE_INTER_8X8] = intel_format_lutvalue(m_cost, 0x6f);
973 m_costf = lambda * 5.0;
975 vme_state_message[MODE_INTER_8X4] = intel_format_lutvalue(m_cost, 0x6f);
976 m_costf = lambda * 6.5;
978 vme_state_message[MODE_INTER_4X4] = intel_format_lutvalue(m_cost, 0x6f);
979 m_costf = lambda * 1.5;
981 vme_state_message[MODE_INTER_BWD] = intel_format_lutvalue(m_cost, 0x6f);
987 void intel_vme_update_mbmv_cost(VADriverContextP ctx,
988 struct encode_state *encode_state,
989 struct intel_encoder_context *encoder_context)
991 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
992 struct gen6_vme_context *vme_context = encoder_context->vme_context;
993 VAEncPictureParameterBufferH264 *pic_param = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
994 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
996 uint8_t *vme_state_message = (uint8_t *)(vme_context->vme_state_message);
998 int slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
1000 if (encoder_context->rate_control_mode == VA_RC_CQP)
1001 qp = pic_param->pic_init_qp + slice_param->slice_qp_delta;
1003 qp = mfc_context->brc.qp_prime_y[encoder_context->layer.curr_frame_layer_id][slice_type];
1005 if (vme_state_message == NULL)
1008 intel_h264_calc_mbmvcost_qp(qp, slice_type, vme_state_message);
1011 void intel_vme_vp8_update_mbmv_cost(VADriverContextP ctx,
1012 struct encode_state *encode_state,
1013 struct intel_encoder_context *encoder_context)
1015 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
1016 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1017 VAEncPictureParameterBufferVP8 *pic_param = (VAEncPictureParameterBufferVP8 *)encode_state->pic_param_ext->buffer;
1018 VAQMatrixBufferVP8 *q_matrix = (VAQMatrixBufferVP8 *)encode_state->q_matrix->buffer;
1019 int qp, m_cost, j, mv_count;
1020 uint8_t *vme_state_message = (uint8_t *)(vme_context->vme_state_message);
1021 float lambda, m_costf;
1023 int is_key_frame = !pic_param->pic_flags.bits.frame_type;
1024 int slice_type = (is_key_frame ? SLICE_TYPE_I : SLICE_TYPE_P);
1026 if (vme_state_message == NULL)
1029 if (encoder_context->rate_control_mode == VA_RC_CQP)
1030 qp = q_matrix->quantization_index[0];
1032 qp = mfc_context->brc.qp_prime_y[encoder_context->layer.curr_frame_layer_id][slice_type];
1034 lambda = intel_lambda_qp(qp * QP_MAX / VP8_QP_MAX);
1037 vme_state_message[MODE_CHROMA_INTRA] = intel_format_lutvalue(m_cost, 0x8f);
1040 vme_state_message[MODE_INTRA_16X16] = 0;
1041 m_cost = lambda * 16;
1042 vme_state_message[MODE_INTRA_4X4] = intel_format_lutvalue(m_cost, 0x8f);
1043 m_cost = lambda * 3;
1044 vme_state_message[MODE_INTRA_NONPRED] = intel_format_lutvalue(m_cost, 0x6f);
1047 vme_state_message[MODE_INTER_MV0] = intel_format_lutvalue(m_cost, 0x6f);
1048 for (j = 1; j < 3; j++) {
1049 m_costf = (log2f((float)(j + 1)) + 1.718f) * lambda;
1050 m_cost = (int)m_costf;
1051 vme_state_message[MODE_INTER_MV0 + j] = intel_format_lutvalue(m_cost, 0x6f);
1054 for (j = 4; j <= 64; j *= 2) {
1055 m_costf = (log2f((float)(j + 1)) + 1.718f) * lambda;
1056 m_cost = (int)m_costf;
1057 vme_state_message[MODE_INTER_MV0 + mv_count] = intel_format_lutvalue(m_cost, 0x6f);
1062 vme_state_message[MODE_INTRA_16X16] = 0x4a;
1063 vme_state_message[MODE_INTRA_4X4] = 0x4a;
1064 vme_state_message[MODE_INTRA_NONPRED] = 0x4a;
1065 vme_state_message[MODE_INTER_16X16] = 0x4a;
1066 vme_state_message[MODE_INTER_16X8] = 0x4a;
1067 vme_state_message[MODE_INTER_8X8] = 0x4a;
1068 vme_state_message[MODE_INTER_4X4] = 0x4a;
1069 vme_state_message[MODE_INTER_BWD] = 0;
1072 m_costf = lambda * 10;
1073 vme_state_message[MODE_INTRA_16X16] = intel_format_lutvalue(m_cost, 0x8f);
1074 m_cost = lambda * 24;
1075 vme_state_message[MODE_INTRA_4X4] = intel_format_lutvalue(m_cost, 0x8f);
1077 m_costf = lambda * 3.5;
1079 vme_state_message[MODE_INTRA_NONPRED] = intel_format_lutvalue(m_cost, 0x6f);
1081 m_costf = lambda * 2.5;
1083 vme_state_message[MODE_INTER_16X16] = intel_format_lutvalue(m_cost, 0x8f);
1084 m_costf = lambda * 4;
1086 vme_state_message[MODE_INTER_16X8] = intel_format_lutvalue(m_cost, 0x8f);
1087 m_costf = lambda * 1.5;
1089 vme_state_message[MODE_INTER_8X8] = intel_format_lutvalue(m_cost, 0x6f);
1090 m_costf = lambda * 5;
1092 vme_state_message[MODE_INTER_4X4] = intel_format_lutvalue(m_cost, 0x6f);
1093 /* BWD is not used in P-frame */
1094 vme_state_message[MODE_INTER_BWD] = 0;
1098 #define MB_SCOREBOARD_A (1 << 0)
1099 #define MB_SCOREBOARD_B (1 << 1)
1100 #define MB_SCOREBOARD_C (1 << 2)
1102 gen7_vme_scoreboard_init(VADriverContextP ctx, struct gen6_vme_context *vme_context)
1104 vme_context->gpe_context.vfe_desc5.scoreboard0.enable = 1;
1105 vme_context->gpe_context.vfe_desc5.scoreboard0.type = SCOREBOARD_STALLING;
1106 vme_context->gpe_context.vfe_desc5.scoreboard0.mask = (MB_SCOREBOARD_A |
1110 /* In VME prediction the current mb depends on the neighbour
1111 * A/B/C macroblock. So the left/up/up-right dependency should
1114 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_x0 = -1;
1115 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_y0 = 0;
1116 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_x1 = 0;
1117 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_y1 = -1;
1118 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_x2 = 1;
1119 vme_context->gpe_context.vfe_desc6.scoreboard1.delta_y2 = -1;
1121 vme_context->gpe_context.vfe_desc7.dword = 0;
1125 /* check whether the mb of (x_index, y_index) is out of bound */
1126 static inline int loop_in_bounds(int x_index, int y_index, int first_mb, int num_mb, int mb_width, int mb_height)
1129 if (x_index < 0 || x_index >= mb_width)
1131 if (y_index < 0 || y_index >= mb_height)
1134 mb_index = y_index * mb_width + x_index;
1135 if (mb_index < first_mb || mb_index > (first_mb + num_mb))
1141 gen7_vme_walker_fill_vme_batchbuffer(VADriverContextP ctx,
1142 struct encode_state *encode_state,
1143 int mb_width, int mb_height,
1145 int transform_8x8_mode_flag,
1146 struct intel_encoder_context *encoder_context)
1148 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1151 unsigned int *command_ptr;
1152 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
1153 VAEncPictureParameterBufferH264 *pic_param = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
1154 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
1155 int qp,qp_mb,qp_index;
1156 int slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
1158 if (encoder_context->rate_control_mode == VA_RC_CQP)
1159 qp = pic_param->pic_init_qp + slice_param->slice_qp_delta;
1161 qp = mfc_context->brc.qp_prime_y[encoder_context->layer.curr_frame_layer_id][slice_type];
1163 #define USE_SCOREBOARD (1 << 21)
1165 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
1166 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
1168 for (s = 0; s < encode_state->num_slice_params_ext; s++) {
1169 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[s]->buffer;
1170 int first_mb = pSliceParameter->macroblock_address;
1171 int num_mb = pSliceParameter->num_macroblocks;
1172 unsigned int mb_intra_ub, score_dep;
1173 int x_outer, y_outer, x_inner, y_inner;
1174 int xtemp_outer = 0;
1176 x_outer = first_mb % mb_width;
1177 y_outer = first_mb / mb_width;
1180 for (; x_outer < (mb_width -2 ) && !loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
1183 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
1187 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
1188 score_dep |= MB_SCOREBOARD_A;
1190 if (y_inner != mb_row) {
1191 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
1192 score_dep |= MB_SCOREBOARD_B;
1194 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
1195 if (x_inner != (mb_width -1)) {
1196 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1197 score_dep |= MB_SCOREBOARD_C;
1201 *command_ptr++ = (CMD_MEDIA_OBJECT | (9 - 2));
1202 *command_ptr++ = kernel;
1203 *command_ptr++ = USE_SCOREBOARD;
1206 /* the (X, Y) term of scoreboard */
1207 *command_ptr++ = ((y_inner << 16) | x_inner);
1208 *command_ptr++ = score_dep;
1210 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
1211 *command_ptr++ = ((1 << 18) | (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
1212 /* QP occupies one byte */
1213 if (vme_context->roi_enabled) {
1214 qp_index = y_inner * mb_width + x_inner;
1215 qp_mb = *(vme_context->qp_per_mb + qp_index);
1218 *command_ptr++ = qp_mb;
1225 xtemp_outer = mb_width - 2;
1226 if (xtemp_outer < 0)
1228 x_outer = xtemp_outer;
1229 y_outer = first_mb / mb_width;
1230 for (;!loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
1233 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
1237 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
1238 score_dep |= MB_SCOREBOARD_A;
1240 if (y_inner != mb_row) {
1241 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
1242 score_dep |= MB_SCOREBOARD_B;
1244 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
1246 if (x_inner != (mb_width -1)) {
1247 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1248 score_dep |= MB_SCOREBOARD_C;
1252 *command_ptr++ = (CMD_MEDIA_OBJECT | (9 - 2));
1253 *command_ptr++ = kernel;
1254 *command_ptr++ = USE_SCOREBOARD;
1257 /* the (X, Y) term of scoreboard */
1258 *command_ptr++ = ((y_inner << 16) | x_inner);
1259 *command_ptr++ = score_dep;
1261 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
1262 *command_ptr++ = ((1 << 18) | (1 << 16) | transform_8x8_mode_flag | (mb_intra_ub << 8));
1263 /* qp occupies one byte */
1264 if (vme_context->roi_enabled) {
1265 qp_index = y_inner * mb_width + x_inner;
1266 qp_mb = *(vme_context->qp_per_mb + qp_index);
1269 *command_ptr++ = qp_mb;
1275 if (x_outer >= mb_width) {
1277 x_outer = xtemp_outer;
1283 *command_ptr++ = MI_BATCH_BUFFER_END;
1285 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
1289 intel_get_ref_idx_state_1(VAPictureH264 *va_pic, unsigned int frame_store_id)
1291 unsigned int is_long_term =
1292 !!(va_pic->flags & VA_PICTURE_H264_LONG_TERM_REFERENCE);
1293 unsigned int is_top_field =
1294 !!(va_pic->flags & VA_PICTURE_H264_TOP_FIELD);
1295 unsigned int is_bottom_field =
1296 !!(va_pic->flags & VA_PICTURE_H264_BOTTOM_FIELD);
1298 return ((is_long_term << 6) |
1299 ((is_top_field ^ is_bottom_field ^ 1) << 5) |
1300 (frame_store_id << 1) |
1301 ((is_top_field ^ 1) & is_bottom_field));
1305 intel_mfc_avc_ref_idx_state(VADriverContextP ctx,
1306 struct encode_state *encode_state,
1307 struct intel_encoder_context *encoder_context)
1309 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1310 struct intel_batchbuffer *batch = encoder_context->base.batch;
1312 struct object_surface *obj_surface;
1313 unsigned int fref_entry, bref_entry;
1315 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
1317 fref_entry = 0x80808080;
1318 bref_entry = 0x80808080;
1319 slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
1321 if (slice_type == SLICE_TYPE_P || slice_type == SLICE_TYPE_B) {
1322 int ref_idx_l0 = (vme_context->ref_index_in_mb[0] & 0xff);
1324 if (ref_idx_l0 > 3) {
1325 WARN_ONCE("ref_idx_l0 is out of range\n");
1329 obj_surface = vme_context->used_reference_objects[0];
1331 for (i = 0; i < 16; i++) {
1333 obj_surface == encode_state->reference_objects[i]) {
1338 if (frame_index == -1) {
1339 WARN_ONCE("RefPicList0 is not found in DPB!\n");
1341 int ref_idx_l0_shift = ref_idx_l0 * 8;
1342 fref_entry &= ~(0xFF << ref_idx_l0_shift);
1343 fref_entry += (intel_get_ref_idx_state_1(vme_context->used_references[0], frame_index) << ref_idx_l0_shift);
1347 if (slice_type == SLICE_TYPE_B) {
1348 int ref_idx_l1 = (vme_context->ref_index_in_mb[1] & 0xff);
1350 if (ref_idx_l1 > 3) {
1351 WARN_ONCE("ref_idx_l1 is out of range\n");
1355 obj_surface = vme_context->used_reference_objects[1];
1357 for (i = 0; i < 16; i++) {
1359 obj_surface == encode_state->reference_objects[i]) {
1364 if (frame_index == -1) {
1365 WARN_ONCE("RefPicList1 is not found in DPB!\n");
1367 int ref_idx_l1_shift = ref_idx_l1 * 8;
1368 bref_entry &= ~(0xFF << ref_idx_l1_shift);
1369 bref_entry += (intel_get_ref_idx_state_1(vme_context->used_references[1], frame_index) << ref_idx_l1_shift);
1373 BEGIN_BCS_BATCH(batch, 10);
1374 OUT_BCS_BATCH(batch, MFX_AVC_REF_IDX_STATE | 8);
1375 OUT_BCS_BATCH(batch, 0); //Select L0
1376 OUT_BCS_BATCH(batch, fref_entry); //Only 1 reference
1377 for(i = 0; i < 7; i++) {
1378 OUT_BCS_BATCH(batch, 0x80808080);
1380 ADVANCE_BCS_BATCH(batch);
1382 BEGIN_BCS_BATCH(batch, 10);
1383 OUT_BCS_BATCH(batch, MFX_AVC_REF_IDX_STATE | 8);
1384 OUT_BCS_BATCH(batch, 1); //Select L1
1385 OUT_BCS_BATCH(batch, bref_entry); //Only 1 reference
1386 for(i = 0; i < 7; i++) {
1387 OUT_BCS_BATCH(batch, 0x80808080);
1389 ADVANCE_BCS_BATCH(batch);
1393 void intel_vme_mpeg2_state_setup(VADriverContextP ctx,
1394 struct encode_state *encode_state,
1395 struct intel_encoder_context *encoder_context)
1397 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1398 uint32_t *vme_state_message = (uint32_t *)(vme_context->vme_state_message);
1399 VAEncSequenceParameterBufferMPEG2 *seq_param = (VAEncSequenceParameterBufferMPEG2 *)encode_state->seq_param_ext->buffer;
1400 int width_in_mbs = ALIGN(seq_param->picture_width, 16) / 16;
1401 int height_in_mbs = ALIGN(seq_param->picture_height, 16) / 16;
1402 uint32_t mv_x, mv_y;
1403 VAEncSliceParameterBufferMPEG2 *slice_param = NULL;
1404 VAEncPictureParameterBufferMPEG2 *pic_param = NULL;
1405 slice_param = (VAEncSliceParameterBufferMPEG2 *)encode_state->slice_params_ext[0]->buffer;
1407 if (vme_context->mpeg2_level == MPEG2_LEVEL_LOW) {
1410 } else if (vme_context->mpeg2_level == MPEG2_LEVEL_MAIN) {
1413 } else if (vme_context->mpeg2_level == MPEG2_LEVEL_HIGH) {
1417 WARN_ONCE("Incorrect Mpeg2 level setting!\n");
1422 pic_param = (VAEncPictureParameterBufferMPEG2 *)encode_state->pic_param_ext->buffer;
1423 if (pic_param->picture_type != VAEncPictureTypeIntra) {
1424 int qp, m_cost, j, mv_count;
1425 float lambda, m_costf;
1426 slice_param = (VAEncSliceParameterBufferMPEG2 *)
1427 encode_state->slice_params_ext[0]->buffer;
1428 qp = slice_param->quantiser_scale_code;
1429 lambda = intel_lambda_qp(qp);
1430 /* No Intra prediction. So it is zero */
1431 vme_state_message[MODE_INTRA_8X8] = 0;
1432 vme_state_message[MODE_INTRA_4X4] = 0;
1433 vme_state_message[MODE_INTER_MV0] = 0;
1434 for (j = 1; j < 3; j++) {
1435 m_costf = (log2f((float)(j + 1)) + 1.718f) * lambda;
1436 m_cost = (int)m_costf;
1437 vme_state_message[MODE_INTER_MV0 + j] = intel_format_lutvalue(m_cost, 0x6f);
1440 for (j = 4; j <= 64; j *= 2) {
1441 m_costf = (log2f((float)(j + 1)) + 1.718f) * lambda;
1442 m_cost = (int)m_costf;
1443 vme_state_message[MODE_INTER_MV0 + mv_count] =
1444 intel_format_lutvalue(m_cost, 0x6f);
1448 /* It can only perform the 16x16 search. So mode cost can be ignored for
1449 * the other mode. for example: 16x8/8x8
1451 vme_state_message[MODE_INTRA_16X16] = intel_format_lutvalue(m_cost, 0x8f);
1452 vme_state_message[MODE_INTER_16X16] = intel_format_lutvalue(m_cost, 0x8f);
1454 vme_state_message[MODE_INTER_16X8] = 0;
1455 vme_state_message[MODE_INTER_8X8] = 0;
1456 vme_state_message[MODE_INTER_8X4] = 0;
1457 vme_state_message[MODE_INTER_4X4] = 0;
1458 vme_state_message[MODE_INTER_BWD] = intel_format_lutvalue(m_cost, 0x6f);
1461 vme_state_message[MPEG2_MV_RANGE] = (mv_y << 16) | (mv_x);
1463 vme_state_message[MPEG2_PIC_WIDTH_HEIGHT] = (height_in_mbs << 16) |
1468 gen7_vme_mpeg2_walker_fill_vme_batchbuffer(VADriverContextP ctx,
1469 struct encode_state *encode_state,
1470 int mb_width, int mb_height,
1472 struct intel_encoder_context *encoder_context)
1474 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1475 unsigned int *command_ptr;
1477 #define MPEG2_SCOREBOARD (1 << 21)
1479 dri_bo_map(vme_context->vme_batchbuffer.bo, 1);
1480 command_ptr = vme_context->vme_batchbuffer.bo->virtual;
1483 unsigned int mb_intra_ub, score_dep;
1484 int x_outer, y_outer, x_inner, y_inner;
1485 int xtemp_outer = 0;
1487 int num_mb = mb_width * mb_height;
1493 for (; x_outer < (mb_width -2 ) && !loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
1496 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
1500 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
1501 score_dep |= MB_SCOREBOARD_A;
1504 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
1505 score_dep |= MB_SCOREBOARD_B;
1508 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
1510 if (x_inner != (mb_width -1)) {
1511 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1512 score_dep |= MB_SCOREBOARD_C;
1516 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
1517 *command_ptr++ = kernel;
1518 *command_ptr++ = MPEG2_SCOREBOARD;
1521 /* the (X, Y) term of scoreboard */
1522 *command_ptr++ = ((y_inner << 16) | x_inner);
1523 *command_ptr++ = score_dep;
1525 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
1526 *command_ptr++ = ((1 << 18) | (1 << 16) | (mb_intra_ub << 8));
1533 xtemp_outer = mb_width - 2;
1534 if (xtemp_outer < 0)
1536 x_outer = xtemp_outer;
1538 for (;!loop_in_bounds(x_outer, y_outer, first_mb, num_mb, mb_width, mb_height); ) {
1541 for (; !loop_in_bounds(x_inner, y_inner, first_mb, num_mb, mb_width, mb_height);) {
1545 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_AE;
1546 score_dep |= MB_SCOREBOARD_A;
1549 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_B;
1550 score_dep |= MB_SCOREBOARD_B;
1553 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_D;
1555 if (x_inner != (mb_width -1)) {
1556 mb_intra_ub |= INTRA_PRED_AVAIL_FLAG_C;
1557 score_dep |= MB_SCOREBOARD_C;
1561 *command_ptr++ = (CMD_MEDIA_OBJECT | (8 - 2));
1562 *command_ptr++ = kernel;
1563 *command_ptr++ = MPEG2_SCOREBOARD;
1566 /* the (X, Y) term of scoreboard */
1567 *command_ptr++ = ((y_inner << 16) | x_inner);
1568 *command_ptr++ = score_dep;
1570 *command_ptr++ = (mb_width << 16 | y_inner << 8 | x_inner);
1571 *command_ptr++ = ((1 << 18) | (1 << 16) | (mb_intra_ub << 8));
1577 if (x_outer >= mb_width) {
1579 x_outer = xtemp_outer;
1585 *command_ptr++ = MI_BATCH_BUFFER_END;
1587 dri_bo_unmap(vme_context->vme_batchbuffer.bo);
1592 avc_temporal_find_surface(VAPictureH264 *curr_pic,
1593 VAPictureH264 *ref_list,
1597 int i, found = -1, min = 0x7FFFFFFF;
1599 for (i = 0; i < num_pictures; i++) {
1602 if ((ref_list[i].flags & VA_PICTURE_H264_INVALID) ||
1603 (ref_list[i].picture_id == VA_INVALID_SURFACE))
1606 tmp = curr_pic->TopFieldOrderCnt - ref_list[i].TopFieldOrderCnt;
1611 if (tmp > 0 && tmp < min) {
1621 intel_avc_vme_reference_state(VADriverContextP ctx,
1622 struct encode_state *encode_state,
1623 struct intel_encoder_context *encoder_context,
1626 void (* vme_source_surface_state)(
1627 VADriverContextP ctx,
1629 struct object_surface *obj_surface,
1630 struct intel_encoder_context *encoder_context))
1632 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1633 struct object_surface *obj_surface = NULL;
1634 struct i965_driver_data *i965 = i965_driver_data(ctx);
1635 VASurfaceID ref_surface_id;
1636 VAEncPictureParameterBufferH264 *pic_param = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
1637 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
1638 int max_num_references;
1639 VAPictureH264 *curr_pic;
1640 VAPictureH264 *ref_list;
1643 if (list_index == 0) {
1644 max_num_references = pic_param->num_ref_idx_l0_active_minus1 + 1;
1645 ref_list = slice_param->RefPicList0;
1647 max_num_references = pic_param->num_ref_idx_l1_active_minus1 + 1;
1648 ref_list = slice_param->RefPicList1;
1651 if (max_num_references == 1) {
1652 if (list_index == 0) {
1653 ref_surface_id = slice_param->RefPicList0[0].picture_id;
1654 vme_context->used_references[0] = &slice_param->RefPicList0[0];
1656 ref_surface_id = slice_param->RefPicList1[0].picture_id;
1657 vme_context->used_references[1] = &slice_param->RefPicList1[0];
1660 if (ref_surface_id != VA_INVALID_SURFACE)
1661 obj_surface = SURFACE(ref_surface_id);
1665 obj_surface = encode_state->reference_objects[list_index];
1666 vme_context->used_references[list_index] = &pic_param->ReferenceFrames[list_index];
1671 curr_pic = &pic_param->CurrPic;
1673 /* select the reference frame in temporal space */
1674 ref_idx = avc_temporal_find_surface(curr_pic, ref_list, max_num_references, list_index == 1);
1675 ref_surface_id = ref_list[ref_idx].picture_id;
1677 if (ref_surface_id != VA_INVALID_SURFACE) /* otherwise warning later */
1678 obj_surface = SURFACE(ref_surface_id);
1680 vme_context->used_reference_objects[list_index] = obj_surface;
1681 vme_context->used_references[list_index] = &ref_list[ref_idx];
1686 assert(ref_idx >= 0);
1687 vme_context->used_reference_objects[list_index] = obj_surface;
1688 vme_source_surface_state(ctx, surface_index, obj_surface, encoder_context);
1689 vme_context->ref_index_in_mb[list_index] = (ref_idx << 24 |
1694 vme_context->used_reference_objects[list_index] = NULL;
1695 vme_context->used_references[list_index] = NULL;
1696 vme_context->ref_index_in_mb[list_index] = 0;
1700 void intel_avc_slice_insert_packed_data(VADriverContextP ctx,
1701 struct encode_state *encode_state,
1702 struct intel_encoder_context *encoder_context,
1704 struct intel_batchbuffer *slice_batch)
1706 int count, i, start_index;
1707 unsigned int length_in_bits;
1708 VAEncPackedHeaderParameterBuffer *param = NULL;
1709 unsigned int *header_data = NULL;
1710 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
1711 int slice_header_index;
1713 if (encode_state->slice_header_index[slice_index] == 0)
1714 slice_header_index = -1;
1716 slice_header_index = (encode_state->slice_header_index[slice_index] & SLICE_PACKED_DATA_INDEX_MASK);
1718 count = encode_state->slice_rawdata_count[slice_index];
1719 start_index = (encode_state->slice_rawdata_index[slice_index] & SLICE_PACKED_DATA_INDEX_MASK);
1721 for (i = 0; i < count; i++) {
1722 unsigned int skip_emul_byte_cnt;
1724 header_data = (unsigned int *)encode_state->packed_header_data_ext[start_index + i]->buffer;
1726 param = (VAEncPackedHeaderParameterBuffer *)
1727 (encode_state->packed_header_params_ext[start_index + i]->buffer);
1729 /* skip the slice header packed data type as it is lastly inserted */
1730 if (param->type == VAEncPackedHeaderSlice)
1733 length_in_bits = param->bit_length;
1735 skip_emul_byte_cnt = intel_avc_find_skipemulcnt((unsigned char *)header_data, length_in_bits);
1737 /* as the slice header is still required, the last header flag is set to
1740 mfc_context->insert_object(ctx,
1743 ALIGN(length_in_bits, 32) >> 5,
1744 length_in_bits & 0x1f,
1748 !param->has_emulation_bytes,
1752 if (slice_header_index == -1) {
1753 unsigned char *slice_header = NULL;
1754 int slice_header_length_in_bits = 0;
1755 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
1756 VAEncPictureParameterBufferH264 *pPicParameter = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
1757 VAEncSliceParameterBufferH264 *pSliceParameter = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[slice_index]->buffer;
1759 /* No slice header data is passed. And the driver needs to generate it */
1760 /* For the Normal H264 */
1761 slice_header_length_in_bits = build_avc_slice_header(pSequenceParameter,
1765 mfc_context->insert_object(ctx, encoder_context,
1766 (unsigned int *)slice_header,
1767 ALIGN(slice_header_length_in_bits, 32) >> 5,
1768 slice_header_length_in_bits & 0x1f,
1769 5, /* first 5 bytes are start code + nal unit type */
1770 1, 0, 1, slice_batch);
1774 unsigned int skip_emul_byte_cnt;
1776 header_data = (unsigned int *)encode_state->packed_header_data_ext[slice_header_index]->buffer;
1778 param = (VAEncPackedHeaderParameterBuffer *)
1779 (encode_state->packed_header_params_ext[slice_header_index]->buffer);
1780 length_in_bits = param->bit_length;
1782 /* as the slice header is the last header data for one slice,
1783 * the last header flag is set to one.
1785 skip_emul_byte_cnt = intel_avc_find_skipemulcnt((unsigned char *)header_data, length_in_bits);
1787 mfc_context->insert_object(ctx,
1790 ALIGN(length_in_bits, 32) >> 5,
1791 length_in_bits & 0x1f,
1795 !param->has_emulation_bytes,
1803 intel_h264_initialize_mbmv_cost(VADriverContextP ctx,
1804 struct encode_state *encode_state,
1805 struct intel_encoder_context *encoder_context)
1807 struct i965_driver_data *i965 = i965_driver_data(ctx);
1808 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1809 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
1812 uint8_t *cost_table;
1814 int slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
1817 if (slice_type == SLICE_TYPE_I) {
1818 if (vme_context->i_qp_cost_table)
1820 } else if (slice_type == SLICE_TYPE_P) {
1821 if (vme_context->p_qp_cost_table)
1824 if (vme_context->b_qp_cost_table)
1828 /* It is enough to allocate 32 bytes for each qp. */
1829 bo = dri_bo_alloc(i965->intel.bufmgr,
1835 assert(bo->virtual);
1836 cost_table = (uint8_t *)(bo->virtual);
1837 for (qp = 0; qp < QP_MAX; qp++) {
1838 intel_h264_calc_mbmvcost_qp(qp, slice_type, cost_table);
1844 if (slice_type == SLICE_TYPE_I) {
1845 vme_context->i_qp_cost_table = bo;
1846 } else if (slice_type == SLICE_TYPE_P) {
1847 vme_context->p_qp_cost_table = bo;
1849 vme_context->b_qp_cost_table = bo;
1852 vme_context->cost_table_size = QP_MAX * 32;
1857 intel_h264_setup_cost_surface(VADriverContextP ctx,
1858 struct encode_state *encode_state,
1859 struct intel_encoder_context *encoder_context,
1860 unsigned long binding_table_offset,
1861 unsigned long surface_state_offset)
1863 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1864 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
1868 struct i965_buffer_surface cost_table;
1870 int slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
1873 if (slice_type == SLICE_TYPE_I) {
1874 bo = vme_context->i_qp_cost_table;
1875 } else if (slice_type == SLICE_TYPE_P) {
1876 bo = vme_context->p_qp_cost_table;
1878 bo = vme_context->b_qp_cost_table;
1882 cost_table.num_blocks = QP_MAX;
1883 cost_table.pitch = 16;
1884 cost_table.size_block = 32;
1886 vme_context->vme_buffer_suface_setup(ctx,
1887 &vme_context->gpe_context,
1889 binding_table_offset,
1890 surface_state_offset);
1894 * the idea of conversion between qp and qstep comes from scaling process
1895 * of transform coeff for Luma component in H264 spec.
1897 * In order to avoid too small qstep, it is multiplied by 16.
1899 static float intel_h264_qp_qstep(int qp)
1903 value = value / 6 - 2;
1904 qstep = powf(2, value);
1908 static int intel_h264_qstep_qp(float qstep)
1912 qp = 12.0f + 6.0f * log2f(qstep);
1918 * Currently it is based on the following assumption:
1919 * SUM(roi_area * 1 / roi_qstep) + non_area * 1 / nonroi_qstep =
1920 * total_aread * 1 / baseqp_qstep
1922 * qstep is the linearized quantizer of H264 quantizer
1925 int row_start_in_mb;
1927 int col_start_in_mb;
1937 intel_h264_enc_roi_cbr(VADriverContextP ctx,
1939 struct encode_state *encode_state,
1940 struct intel_encoder_context *encoder_context)
1943 int min_qp = MAX(1, encoder_context->brc.min_qp);
1946 ROIRegionParam param_regions[I965_MAX_NUM_ROI_REGIONS];
1951 float qstep_nonroi, qstep_base;
1952 float roi_area, total_area, nonroi_area;
1955 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
1956 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
1957 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
1958 int mbs_in_picture = width_in_mbs * height_in_mbs;
1960 struct gen6_vme_context *vme_context = encoder_context->vme_context;
1961 VAStatus vaStatus = VA_STATUS_SUCCESS;
1963 /* currently roi_value_is_qp_delta is the only supported mode of priority.
1965 * qp_delta set by user is added to base_qp, which is then clapped by
1966 * [base_qp-min_delta, base_qp+max_delta].
1968 ASSERT_RET(encoder_context->brc.roi_value_is_qp_delta, VA_STATUS_ERROR_INVALID_PARAMETER);
1970 num_roi = encoder_context->brc.num_roi;
1972 /* when the base_qp is lower than 12, the quality is quite good based
1973 * on the H264 test experience.
1974 * In such case it is unnecessary to adjust the quality for ROI region.
1976 if (base_qp <= 12) {
1977 nonroi_qp = base_qp;
1984 for (i = 0; i < num_roi; i++) {
1985 int row_start, row_end, col_start, col_end;
1986 int roi_width_mbs, roi_height_mbs;
1991 col_start = encoder_context->brc.roi[i].left;
1992 col_end = encoder_context->brc.roi[i].right;
1993 row_start = encoder_context->brc.roi[i].top;
1994 row_end = encoder_context->brc.roi[i].bottom;
1996 col_start = col_start / 16;
1997 col_end = (col_end + 15) / 16;
1998 row_start = row_start / 16;
1999 row_end = (row_end + 15) / 16;
2001 roi_width_mbs = col_end - col_start;
2002 roi_height_mbs = row_end - row_start;
2003 mbs_in_roi = roi_width_mbs * roi_height_mbs;
2005 param_regions[i].row_start_in_mb = row_start;
2006 param_regions[i].row_end_in_mb = row_end;
2007 param_regions[i].col_start_in_mb = col_start;
2008 param_regions[i].col_end_in_mb = col_end;
2009 param_regions[i].width_mbs = roi_width_mbs;
2010 param_regions[i].height_mbs = roi_height_mbs;
2012 roi_qp = base_qp + encoder_context->brc.roi[i].value;
2013 BRC_CLIP(roi_qp, min_qp, 51);
2015 param_regions[i].roi_qp = roi_qp;
2016 qstep_roi = intel_h264_qp_qstep(roi_qp);
2018 roi_area += mbs_in_roi;
2019 sum_roi += mbs_in_roi / qstep_roi;
2022 total_area = mbs_in_picture;
2023 nonroi_area = total_area - roi_area;
2025 qstep_base = intel_h264_qp_qstep(base_qp);
2026 temp = (total_area / qstep_base - sum_roi);
2031 qstep_nonroi = nonroi_area / temp;
2032 nonroi_qp = intel_h264_qstep_qp(qstep_nonroi);
2035 BRC_CLIP(nonroi_qp, min_qp, 51);
2038 memset(vme_context->qp_per_mb, nonroi_qp, mbs_in_picture);
2042 for (i = 0; i < num_roi; i++) {
2043 for (j = param_regions[i].row_start_in_mb; j < param_regions[i].row_end_in_mb; j++) {
2044 qp_ptr = vme_context->qp_per_mb + (j * width_in_mbs) + param_regions[i].col_start_in_mb;
2045 memset(qp_ptr, param_regions[i].roi_qp, param_regions[i].width_mbs);
2053 intel_h264_enc_roi_config(VADriverContextP ctx,
2054 struct encode_state *encode_state,
2055 struct intel_encoder_context *encoder_context)
2059 struct i965_driver_data *i965 = i965_driver_data(ctx);
2060 struct gen6_vme_context *vme_context = encoder_context->vme_context;
2061 struct gen6_mfc_context *mfc_context = encoder_context->mfc_context;
2062 VAEncSequenceParameterBufferH264 *pSequenceParameter = (VAEncSequenceParameterBufferH264 *)encode_state->seq_param_ext->buffer;
2063 int width_in_mbs = pSequenceParameter->picture_width_in_mbs;
2064 int height_in_mbs = pSequenceParameter->picture_height_in_mbs;
2066 int row_start, row_end, col_start, col_end;
2069 vme_context->roi_enabled = 0;
2070 /* Restriction: Disable ROI when multi-slice is enabled */
2071 if (!encoder_context->context_roi || (encode_state->num_slice_params_ext > 1))
2074 vme_context->roi_enabled = !!encoder_context->brc.num_roi;
2076 if (!vme_context->roi_enabled)
2079 if ((vme_context->saved_width_mbs != width_in_mbs) ||
2080 (vme_context->saved_height_mbs != height_in_mbs)) {
2081 free(vme_context->qp_per_mb);
2082 vme_context->qp_per_mb = calloc(1, width_in_mbs * height_in_mbs);
2084 vme_context->saved_width_mbs = width_in_mbs;
2085 vme_context->saved_height_mbs = height_in_mbs;
2086 assert(vme_context->qp_per_mb);
2088 if (encoder_context->rate_control_mode == VA_RC_CBR) {
2090 * TODO: More complex Qp adjust needs to be added.
2091 * Currently it is initialized to slice_qp.
2093 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
2095 int slice_type = intel_avc_enc_slice_type_fixup(slice_param->slice_type);
2097 qp = mfc_context->brc.qp_prime_y[encoder_context->layer.curr_frame_layer_id][slice_type];
2098 intel_h264_enc_roi_cbr(ctx, qp, encode_state, encoder_context);
2100 } else if (encoder_context->rate_control_mode == VA_RC_CQP){
2101 VAEncPictureParameterBufferH264 *pic_param = (VAEncPictureParameterBufferH264 *)encode_state->pic_param_ext->buffer;
2102 VAEncSliceParameterBufferH264 *slice_param = (VAEncSliceParameterBufferH264 *)encode_state->slice_params_ext[0]->buffer;
2104 int min_qp = MAX(1, encoder_context->brc.min_qp);
2106 qp = pic_param->pic_init_qp + slice_param->slice_qp_delta;
2107 memset(vme_context->qp_per_mb, qp, width_in_mbs * height_in_mbs);
2110 for (j = num_roi; j ; j--) {
2111 int qp_delta, qp_clip;
2113 col_start = encoder_context->brc.roi[i].left;
2114 col_end = encoder_context->brc.roi[i].right;
2115 row_start = encoder_context->brc.roi[i].top;
2116 row_end = encoder_context->brc.roi[i].bottom;
2118 col_start = col_start / 16;
2119 col_end = (col_end + 15) / 16;
2120 row_start = row_start / 16;
2121 row_end = (row_end + 15) / 16;
2123 qp_delta = encoder_context->brc.roi[i].value;
2124 qp_clip = qp + qp_delta;
2126 BRC_CLIP(qp_clip, min_qp, 51);
2128 for (i = row_start; i < row_end; i++) {
2129 qp_ptr = vme_context->qp_per_mb + (i * width_in_mbs) + col_start;
2130 memset(qp_ptr, qp_clip, (col_end - col_start));
2135 * TODO: Disable it for non CBR-CQP.
2137 vme_context->roi_enabled = 0;
2140 if (vme_context->roi_enabled && IS_GEN7(i965->intel.device_info))
2141 encoder_context->soft_batch_force = 1;
2148 hevc_temporal_find_surface(VAPictureHEVC *curr_pic,
2149 VAPictureHEVC *ref_list,
2153 int i, found = -1, min = 0x7FFFFFFF;
2155 for (i = 0; i < num_pictures; i++) {
2158 if ((ref_list[i].flags & VA_PICTURE_HEVC_INVALID) ||
2159 (ref_list[i].picture_id == VA_INVALID_SURFACE))
2162 tmp = curr_pic->pic_order_cnt - ref_list[i].pic_order_cnt;
2167 if (tmp > 0 && tmp < min) {
2176 intel_hevc_vme_reference_state(VADriverContextP ctx,
2177 struct encode_state *encode_state,
2178 struct intel_encoder_context *encoder_context,
2181 void (* vme_source_surface_state)(
2182 VADriverContextP ctx,
2184 struct object_surface *obj_surface,
2185 struct intel_encoder_context *encoder_context))
2187 struct gen6_vme_context *vme_context = encoder_context->vme_context;
2188 struct object_surface *obj_surface = NULL;
2189 struct i965_driver_data *i965 = i965_driver_data(ctx);
2190 VASurfaceID ref_surface_id;
2191 VAEncSequenceParameterBufferHEVC *pSequenceParameter = (VAEncSequenceParameterBufferHEVC *)encode_state->seq_param_ext->buffer;
2192 VAEncPictureParameterBufferHEVC *pic_param = (VAEncPictureParameterBufferHEVC *)encode_state->pic_param_ext->buffer;
2193 VAEncSliceParameterBufferHEVC *slice_param = (VAEncSliceParameterBufferHEVC *)encode_state->slice_params_ext[0]->buffer;
2194 int max_num_references;
2195 VAPictureHEVC *curr_pic;
2196 VAPictureHEVC *ref_list;
2198 unsigned int is_hevc10 = 0;
2199 GenHevcSurface *hevc_encoder_surface = NULL;
2201 if((pSequenceParameter->seq_fields.bits.bit_depth_luma_minus8 > 0)
2202 || (pSequenceParameter->seq_fields.bits.bit_depth_chroma_minus8 > 0))
2205 if (list_index == 0) {
2206 max_num_references = pic_param->num_ref_idx_l0_default_active_minus1 + 1;
2207 ref_list = slice_param->ref_pic_list0;
2209 max_num_references = pic_param->num_ref_idx_l1_default_active_minus1 + 1;
2210 ref_list = slice_param->ref_pic_list1;
2213 if (max_num_references == 1) {
2214 if (list_index == 0) {
2215 ref_surface_id = slice_param->ref_pic_list0[0].picture_id;
2216 vme_context->used_references[0] = &slice_param->ref_pic_list0[0];
2218 ref_surface_id = slice_param->ref_pic_list1[0].picture_id;
2219 vme_context->used_references[1] = &slice_param->ref_pic_list1[0];
2222 if (ref_surface_id != VA_INVALID_SURFACE)
2223 obj_surface = SURFACE(ref_surface_id);
2227 obj_surface = encode_state->reference_objects[list_index];
2228 vme_context->used_references[list_index] = &pic_param->reference_frames[list_index];
2233 curr_pic = &pic_param->decoded_curr_pic;
2235 /* select the reference frame in temporal space */
2236 ref_idx = hevc_temporal_find_surface(curr_pic, ref_list, max_num_references, list_index == 1);
2237 ref_surface_id = ref_list[ref_idx].picture_id;
2239 if (ref_surface_id != VA_INVALID_SURFACE) /* otherwise warning later */
2240 obj_surface = SURFACE(ref_surface_id);
2242 vme_context->used_reference_objects[list_index] = obj_surface;
2243 vme_context->used_references[list_index] = &ref_list[ref_idx];
2248 assert(ref_idx >= 0);
2249 vme_context->used_reference_objects[list_index] = obj_surface;
2252 hevc_encoder_surface = (GenHevcSurface *) obj_surface->private_data;
2253 assert(hevc_encoder_surface);
2254 obj_surface = hevc_encoder_surface->nv12_surface_obj;
2256 vme_source_surface_state(ctx, surface_index, obj_surface, encoder_context);
2257 vme_context->ref_index_in_mb[list_index] = (ref_idx << 24 |
2262 vme_context->used_reference_objects[list_index] = NULL;
2263 vme_context->used_references[list_index] = NULL;
2264 vme_context->ref_index_in_mb[list_index] = 0;
2268 void intel_vme_hevc_update_mbmv_cost(VADriverContextP ctx,
2269 struct encode_state *encode_state,
2270 struct intel_encoder_context *encoder_context)
2272 struct gen9_hcpe_context *mfc_context = encoder_context->mfc_context;
2273 struct gen6_vme_context *vme_context = encoder_context->vme_context;
2274 VAEncPictureParameterBufferHEVC *pic_param = (VAEncPictureParameterBufferHEVC *)encode_state->pic_param_ext->buffer;
2275 VAEncSliceParameterBufferHEVC *slice_param = (VAEncSliceParameterBufferHEVC *)encode_state->slice_params_ext[0]->buffer;
2276 VAEncSequenceParameterBufferHEVC *pSequenceParameter = (VAEncSequenceParameterBufferHEVC *)encode_state->seq_param_ext->buffer;
2277 int qp, m_cost, j, mv_count;
2278 uint8_t *vme_state_message = (uint8_t *)(vme_context->vme_state_message);
2279 float lambda, m_costf;
2281 /* here no SI SP slice for HEVC, do not need slice fixup */
2282 int slice_type = slice_param->slice_type;
2285 qp = pic_param->pic_init_qp + slice_param->slice_qp_delta;
2287 if(encoder_context->rate_control_mode == VA_RC_CBR)
2289 qp = mfc_context->bit_rate_control_context[slice_type].QpPrimeY;
2290 if(slice_type == HEVC_SLICE_B) {
2291 if(pSequenceParameter->ip_period == 1)
2293 slice_type = HEVC_SLICE_P;
2294 qp = mfc_context->bit_rate_control_context[HEVC_SLICE_P].QpPrimeY;
2296 }else if(mfc_context->vui_hrd.i_frame_number % pSequenceParameter->ip_period == 1){
2297 slice_type = HEVC_SLICE_P;
2298 qp = mfc_context->bit_rate_control_context[HEVC_SLICE_P].QpPrimeY;
2304 if (vme_state_message == NULL)
2307 assert(qp <= QP_MAX);
2308 lambda = intel_lambda_qp(qp);
2309 if (slice_type == HEVC_SLICE_I) {
2310 vme_state_message[MODE_INTRA_16X16] = 0;
2311 m_cost = lambda * 4;
2312 vme_state_message[MODE_INTRA_8X8] = intel_format_lutvalue(m_cost, 0x8f);
2313 m_cost = lambda * 16;
2314 vme_state_message[MODE_INTRA_4X4] = intel_format_lutvalue(m_cost, 0x8f);
2315 m_cost = lambda * 3;
2316 vme_state_message[MODE_INTRA_NONPRED] = intel_format_lutvalue(m_cost, 0x6f);
2319 vme_state_message[MODE_INTER_MV0] = intel_format_lutvalue(m_cost, 0x6f);
2320 for (j = 1; j < 3; j++) {
2321 m_costf = (log2f((float)(j + 1)) + 1.718f) * lambda;
2322 m_cost = (int)m_costf;
2323 vme_state_message[MODE_INTER_MV0 + j] = intel_format_lutvalue(m_cost, 0x6f);
2326 for (j = 4; j <= 64; j *= 2) {
2327 m_costf = (log2f((float)(j + 1)) + 1.718f) * lambda;
2328 m_cost = (int)m_costf;
2329 vme_state_message[MODE_INTER_MV0 + mv_count] = intel_format_lutvalue(m_cost, 0x6f);
2334 vme_state_message[MODE_INTRA_16X16] = 0x4a;
2335 vme_state_message[MODE_INTRA_8X8] = 0x4a;
2336 vme_state_message[MODE_INTRA_4X4] = 0x4a;
2337 vme_state_message[MODE_INTRA_NONPRED] = 0x4a;
2338 vme_state_message[MODE_INTER_16X16] = 0x4a;
2339 vme_state_message[MODE_INTER_16X8] = 0x4a;
2340 vme_state_message[MODE_INTER_8X8] = 0x4a;
2341 vme_state_message[MODE_INTER_8X4] = 0x4a;
2342 vme_state_message[MODE_INTER_4X4] = 0x4a;
2343 vme_state_message[MODE_INTER_BWD] = 0x2a;
2346 m_costf = lambda * 10;
2347 vme_state_message[MODE_INTRA_16X16] = intel_format_lutvalue(m_cost, 0x8f);
2348 m_cost = lambda * 14;
2349 vme_state_message[MODE_INTRA_8X8] = intel_format_lutvalue(m_cost, 0x8f);
2350 m_cost = lambda * 24;
2351 vme_state_message[MODE_INTRA_4X4] = intel_format_lutvalue(m_cost, 0x8f);
2352 m_costf = lambda * 3.5;
2354 vme_state_message[MODE_INTRA_NONPRED] = intel_format_lutvalue(m_cost, 0x6f);
2355 if (slice_type == HEVC_SLICE_P) {
2356 m_costf = lambda * 2.5;
2358 vme_state_message[MODE_INTER_16X16] = intel_format_lutvalue(m_cost, 0x8f);
2359 m_costf = lambda * 4;
2361 vme_state_message[MODE_INTER_16X8] = intel_format_lutvalue(m_cost, 0x8f);
2362 m_costf = lambda * 1.5;
2364 vme_state_message[MODE_INTER_8X8] = intel_format_lutvalue(m_cost, 0x6f);
2365 m_costf = lambda * 3;
2367 vme_state_message[MODE_INTER_8X4] = intel_format_lutvalue(m_cost, 0x6f);
2368 m_costf = lambda * 5;
2370 vme_state_message[MODE_INTER_4X4] = intel_format_lutvalue(m_cost, 0x6f);
2371 /* BWD is not used in P-frame */
2372 vme_state_message[MODE_INTER_BWD] = 0;
2374 m_costf = lambda * 2.5;
2376 vme_state_message[MODE_INTER_16X16] = intel_format_lutvalue(m_cost, 0x8f);
2377 m_costf = lambda * 5.5;
2379 vme_state_message[MODE_INTER_16X8] = intel_format_lutvalue(m_cost, 0x8f);
2380 m_costf = lambda * 3.5;
2382 vme_state_message[MODE_INTER_8X8] = intel_format_lutvalue(m_cost, 0x6f);
2383 m_costf = lambda * 5.0;
2385 vme_state_message[MODE_INTER_8X4] = intel_format_lutvalue(m_cost, 0x6f);
2386 m_costf = lambda * 6.5;
2388 vme_state_message[MODE_INTER_4X4] = intel_format_lutvalue(m_cost, 0x6f);
2389 m_costf = lambda * 1.5;
2391 vme_state_message[MODE_INTER_BWD] = intel_format_lutvalue(m_cost, 0x6f);