2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of Libav.
10 * Libav is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * Libav is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with Libav; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
46 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
47 DCTELEM *block, int n, int qscale);
48 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
49 DCTELEM *block, int n, int qscale);
50 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
51 DCTELEM *block, int n, int qscale);
52 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
53 DCTELEM *block, int n, int qscale);
54 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
55 DCTELEM *block, int n, int qscale);
56 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
57 DCTELEM *block, int n, int qscale);
58 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
59 DCTELEM *block, int n, int qscale);
62 /* enable all paranoid tests for rounding, overflows, etc... */
68 static const uint8_t ff_default_chroma_qscale_table[32]={
69 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
70 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
73 const uint8_t ff_mpeg1_dc_scale_table[128]={
74 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
75 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
81 static const uint8_t mpeg2_dc_scale_table1[128]={
82 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
83 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
89 static const uint8_t mpeg2_dc_scale_table2[128]={
90 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
91 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
97 static const uint8_t mpeg2_dc_scale_table3[128]={
98 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
106 ff_mpeg1_dc_scale_table,
107 mpeg2_dc_scale_table1,
108 mpeg2_dc_scale_table2,
109 mpeg2_dc_scale_table3,
112 const enum PixelFormat ff_pixfmt_list_420[] = {
117 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
124 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
132 uint32_t tmp= *state << 8;
133 *state= tmp + *(p++);
134 if(tmp == 0x100 || p==end)
139 if (p[-1] > 1 ) p+= 3;
140 else if(p[-2] ) p+= 2;
141 else if(p[-3]|(p[-1]-1)) p++;
154 /* init common dct for both encoder and decoder */
155 av_cold int ff_dct_common_init(MpegEncContext *s)
157 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
158 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
159 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
160 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
161 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
162 if(s->flags & CODEC_FLAG_BITEXACT)
163 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
164 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
167 MPV_common_init_mmx(s);
169 MPV_common_init_axp(s);
171 MPV_common_init_mlib(s);
173 MPV_common_init_mmi(s);
175 MPV_common_init_arm(s);
177 MPV_common_init_altivec(s);
179 MPV_common_init_bfin(s);
182 /* load & permutate scantables
183 note: only wmv uses different ones
185 if(s->alternate_scan){
186 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
187 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
189 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
190 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
192 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
198 void ff_copy_picture(Picture *dst, Picture *src){
200 dst->type= FF_BUFFER_TYPE_COPY;
204 * Release a frame buffer
206 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
208 s->avctx->release_buffer(s->avctx, (AVFrame*)pic);
209 av_freep(&pic->hwaccel_picture_private);
213 * Allocate a frame buffer
215 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
219 if (s->avctx->hwaccel) {
220 assert(!pic->hwaccel_picture_private);
221 if (s->avctx->hwaccel->priv_data_size) {
222 pic->hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
223 if (!pic->hwaccel_picture_private) {
224 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
230 r = s->avctx->get_buffer(s->avctx, (AVFrame*)pic);
232 if (r<0 || !pic->age || !pic->type || !pic->data[0]) {
233 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]);
234 av_freep(&pic->hwaccel_picture_private);
238 if (s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])) {
239 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
240 free_frame_buffer(s, pic);
244 if (pic->linesize[1] != pic->linesize[2]) {
245 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
246 free_frame_buffer(s, pic);
254 * allocates a Picture
255 * The pixels are allocated/set by calling get_buffer() if shared=0
257 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
258 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
259 const int mb_array_size= s->mb_stride*s->mb_height;
260 const int b8_array_size= s->b8_stride*s->mb_height*2;
261 const int b4_array_size= s->b4_stride*s->mb_height*4;
266 assert(pic->data[0]);
267 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
268 pic->type= FF_BUFFER_TYPE_SHARED;
270 assert(!pic->data[0]);
272 if (alloc_frame_buffer(s, pic) < 0)
275 s->linesize = pic->linesize[0];
276 s->uvlinesize= pic->linesize[1];
279 if(pic->qscale_table==NULL){
281 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
282 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
286 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mbskip_table , mb_array_size * sizeof(uint8_t)+2, fail) //the +2 is for the slice end check
287 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table , mb_array_size * sizeof(uint8_t) , fail)
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
289 pic->mb_type= pic->mb_type_base + 2*s->mb_stride+1;
290 if(s->out_format == FMT_H264){
292 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
293 pic->motion_val[i]= pic->motion_val_base[i]+4;
294 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
296 pic->motion_subsample_log2= 2;
297 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
299 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
300 pic->motion_val[i]= pic->motion_val_base[i]+4;
301 FF_ALLOCZ_OR_GOTO(s->avctx, pic->ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
303 pic->motion_subsample_log2= 3;
305 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
306 FF_ALLOCZ_OR_GOTO(s->avctx, pic->dct_coeff, 64 * mb_array_size * sizeof(DCTELEM)*6, fail)
308 pic->qstride= s->mb_stride;
309 FF_ALLOCZ_OR_GOTO(s->avctx, pic->pan_scan , 1 * sizeof(AVPanScan), fail)
312 /* It might be nicer if the application would keep track of these
313 * but it would require an API change. */
314 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
315 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
316 if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == AV_PICTURE_TYPE_B)
317 pic->age= INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
320 fail: //for the FF_ALLOCZ_OR_GOTO macro
322 free_frame_buffer(s, pic);
327 * deallocates a picture
329 static void free_picture(MpegEncContext *s, Picture *pic){
332 if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){
333 free_frame_buffer(s, pic);
336 av_freep(&pic->mb_var);
337 av_freep(&pic->mc_mb_var);
338 av_freep(&pic->mb_mean);
339 av_freep(&pic->mbskip_table);
340 av_freep(&pic->qscale_table);
341 av_freep(&pic->mb_type_base);
342 av_freep(&pic->dct_coeff);
343 av_freep(&pic->pan_scan);
346 av_freep(&pic->motion_val_base[i]);
347 av_freep(&pic->ref_index[i]);
350 if(pic->type == FF_BUFFER_TYPE_SHARED){
359 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
360 int y_size = s->b8_stride * (2 * s->mb_height + 1);
361 int c_size = s->mb_stride * (s->mb_height + 1);
362 int yc_size = y_size + 2 * c_size;
365 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
366 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
367 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
369 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
370 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
371 s->me.temp= s->me.scratchpad;
372 s->rd_scratchpad= s->me.scratchpad;
373 s->b_scratchpad= s->me.scratchpad;
374 s->obmc_scratchpad= s->me.scratchpad + 16;
376 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
377 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
378 if(s->avctx->noise_reduction){
379 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
382 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
383 s->block= s->blocks[0];
386 s->pblocks[i] = &s->block[i];
389 if (s->out_format == FMT_H263) {
391 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
392 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
393 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
394 s->ac_val[2] = s->ac_val[1] + c_size;
399 return -1; //free() through MPV_common_end()
402 static void free_duplicate_context(MpegEncContext *s){
405 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
406 av_freep(&s->me.scratchpad);
410 s->obmc_scratchpad= NULL;
412 av_freep(&s->dct_error_sum);
413 av_freep(&s->me.map);
414 av_freep(&s->me.score_map);
415 av_freep(&s->blocks);
416 av_freep(&s->ac_val_base);
420 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
421 #define COPY(a) bak->a= src->a
422 COPY(allocated_edge_emu_buffer);
423 COPY(edge_emu_buffer);
428 COPY(obmc_scratchpad);
435 COPY(me.map_generation);
447 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
450 //FIXME copy only needed parts
452 backup_duplicate_context(&bak, dst);
453 memcpy(dst, src, sizeof(MpegEncContext));
454 backup_duplicate_context(dst, &bak);
456 dst->pblocks[i] = &dst->block[i];
458 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
462 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
463 * the changed fields will not depend upon the prior state of the MpegEncContext.
465 void MPV_common_defaults(MpegEncContext *s){
467 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
468 s->chroma_qscale_table= ff_default_chroma_qscale_table;
469 s->progressive_frame= 1;
470 s->progressive_sequence= 1;
471 s->picture_structure= PICT_FRAME;
473 s->coded_picture_number = 0;
474 s->picture_number = 0;
475 s->input_picture_number = 0;
477 s->picture_in_gop_number = 0;
484 * sets the given MpegEncContext to defaults for decoding.
485 * the changed fields will not depend upon the prior state of the MpegEncContext.
487 void MPV_decode_defaults(MpegEncContext *s){
488 MPV_common_defaults(s);
492 * init common structure for both encoder and decoder.
493 * this assumes that some variables like width/height are already set
495 av_cold int MPV_common_init(MpegEncContext *s)
497 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y, threads;
499 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
500 s->mb_height = (s->height + 31) / 32 * 2;
501 else if (s->codec_id != CODEC_ID_H264)
502 s->mb_height = (s->height + 15) / 16;
504 if(s->avctx->pix_fmt == PIX_FMT_NONE){
505 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
509 if(s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height)){
510 av_log(s->avctx, AV_LOG_ERROR, "too many threads\n");
514 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
517 dsputil_init(&s->dsp, s->avctx);
518 ff_dct_common_init(s);
520 s->flags= s->avctx->flags;
521 s->flags2= s->avctx->flags2;
523 if (s->width && s->height) {
524 s->mb_width = (s->width + 15) / 16;
525 s->mb_stride = s->mb_width + 1;
526 s->b8_stride = s->mb_width*2 + 1;
527 s->b4_stride = s->mb_width*4 + 1;
528 mb_array_size= s->mb_height * s->mb_stride;
529 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
531 /* set chroma shifts */
532 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
533 &(s->chroma_y_shift) );
535 /* set default edge pos, will be overriden in decode_header if needed */
536 s->h_edge_pos= s->mb_width*16;
537 s->v_edge_pos= s->mb_height*16;
539 s->mb_num = s->mb_width * s->mb_height;
544 s->block_wrap[3]= s->b8_stride;
546 s->block_wrap[5]= s->mb_stride;
548 y_size = s->b8_stride * (2 * s->mb_height + 1);
549 c_size = s->mb_stride * (s->mb_height + 1);
550 yc_size = y_size + 2 * c_size;
552 /* convert fourcc to upper case */
553 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
555 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
557 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
559 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
560 for(y=0; y<s->mb_height; y++){
561 for(x=0; x<s->mb_width; x++){
562 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
565 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
568 /* Allocate MV tables */
569 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
570 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
571 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
572 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
573 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
574 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
575 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
576 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
577 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
578 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
579 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
580 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
582 if(s->msmpeg4_version){
583 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
585 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
587 /* Allocate MB type table */
588 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
590 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
592 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
593 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
594 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
595 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
596 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
597 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
599 if(s->avctx->noise_reduction){
600 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
605 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, MAX_PICTURE_COUNT * sizeof(Picture), fail)
606 for(i = 0; i < MAX_PICTURE_COUNT; i++) {
607 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
610 if (s->width && s->height) {
611 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
613 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
614 /* interlaced direct mode decoding tables */
619 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
620 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
622 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
623 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
624 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
626 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
629 if (s->out_format == FMT_H263) {
631 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
632 s->coded_block= s->coded_block_base + s->b8_stride + 1;
634 /* cbp, ac_pred, pred_dir */
635 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
636 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
639 if (s->h263_pred || s->h263_plus || !s->encoding) {
641 //MN: we need these for error resilience of intra-frames
642 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
643 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
644 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
645 s->dc_val[2] = s->dc_val[1] + c_size;
646 for(i=0;i<yc_size;i++)
647 s->dc_val_base[i] = 1024;
650 /* which mb is a intra block */
651 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
652 memset(s->mbintra_table, 1, mb_array_size);
654 /* init macroblock skip table */
655 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
656 //Note the +1 is for a quicker mpeg4 slice_end detection
657 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
659 s->parse_context.state= -1;
660 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
661 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
662 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
663 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
667 s->context_initialized = 1;
669 if (s->width && s->height) {
670 s->thread_context[0]= s;
671 threads = s->avctx->thread_count;
673 for(i=1; i<threads; i++){
674 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
675 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
678 for(i=0; i<threads; i++){
679 if(init_duplicate_context(s->thread_context[i], s) < 0)
681 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
682 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
692 /* init common structure for both encoder and decoder */
693 void MPV_common_end(MpegEncContext *s)
697 for(i=0; i<s->avctx->thread_count; i++){
698 free_duplicate_context(s->thread_context[i]);
700 for(i=1; i<s->avctx->thread_count; i++){
701 av_freep(&s->thread_context[i]);
704 av_freep(&s->parse_context.buffer);
705 s->parse_context.buffer_size=0;
707 av_freep(&s->mb_type);
708 av_freep(&s->p_mv_table_base);
709 av_freep(&s->b_forw_mv_table_base);
710 av_freep(&s->b_back_mv_table_base);
711 av_freep(&s->b_bidir_forw_mv_table_base);
712 av_freep(&s->b_bidir_back_mv_table_base);
713 av_freep(&s->b_direct_mv_table_base);
715 s->b_forw_mv_table= NULL;
716 s->b_back_mv_table= NULL;
717 s->b_bidir_forw_mv_table= NULL;
718 s->b_bidir_back_mv_table= NULL;
719 s->b_direct_mv_table= NULL;
723 av_freep(&s->b_field_mv_table_base[i][j][k]);
724 s->b_field_mv_table[i][j][k]=NULL;
726 av_freep(&s->b_field_select_table[i][j]);
727 av_freep(&s->p_field_mv_table_base[i][j]);
728 s->p_field_mv_table[i][j]=NULL;
730 av_freep(&s->p_field_select_table[i]);
733 av_freep(&s->dc_val_base);
734 av_freep(&s->coded_block_base);
735 av_freep(&s->mbintra_table);
736 av_freep(&s->cbp_table);
737 av_freep(&s->pred_dir_table);
739 av_freep(&s->mbskip_table);
740 av_freep(&s->prev_pict_types);
741 av_freep(&s->bitstream_buffer);
742 s->allocated_bitstream_buffer_size=0;
744 av_freep(&s->avctx->stats_out);
745 av_freep(&s->ac_stats);
746 av_freep(&s->error_status_table);
747 av_freep(&s->mb_index2xy);
748 av_freep(&s->lambda_table);
749 av_freep(&s->q_intra_matrix);
750 av_freep(&s->q_inter_matrix);
751 av_freep(&s->q_intra_matrix16);
752 av_freep(&s->q_inter_matrix16);
753 av_freep(&s->input_picture);
754 av_freep(&s->reordered_input_picture);
755 av_freep(&s->dct_offset);
758 for(i=0; i<MAX_PICTURE_COUNT; i++){
759 free_picture(s, &s->picture[i]);
762 av_freep(&s->picture);
763 s->context_initialized = 0;
766 s->current_picture_ptr= NULL;
767 s->linesize= s->uvlinesize= 0;
770 av_freep(&s->visualization_buffer[i]);
772 avcodec_default_free_buffers(s->avctx);
775 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
777 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
778 uint8_t index_run[MAX_RUN+1];
779 int last, run, level, start, end, i;
781 /* If table is static, we can quit if rl->max_level[0] is not NULL */
782 if(static_store && rl->max_level[0])
785 /* compute max_level[], max_run[] and index_run[] */
786 for(last=0;last<2;last++) {
795 memset(max_level, 0, MAX_RUN + 1);
796 memset(max_run, 0, MAX_LEVEL + 1);
797 memset(index_run, rl->n, MAX_RUN + 1);
798 for(i=start;i<end;i++) {
799 run = rl->table_run[i];
800 level = rl->table_level[i];
801 if (index_run[run] == rl->n)
803 if (level > max_level[run])
804 max_level[run] = level;
805 if (run > max_run[level])
806 max_run[level] = run;
809 rl->max_level[last] = static_store[last];
811 rl->max_level[last] = av_malloc(MAX_RUN + 1);
812 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
814 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
816 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
817 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
819 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
821 rl->index_run[last] = av_malloc(MAX_RUN + 1);
822 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
826 void init_vlc_rl(RLTable *rl)
838 for(i=0; i<rl->vlc.table_size; i++){
839 int code= rl->vlc.table[i][0];
840 int len = rl->vlc.table[i][1];
843 if(len==0){ // illegal code
846 }else if(len<0){ //more bits needed
850 if(code==rl->n){ //esc
854 run= rl->table_run [code] + 1;
855 level= rl->table_level[code] * qmul + qadd;
856 if(code >= rl->last) run+=192;
859 rl->rl_vlc[q][i].len= len;
860 rl->rl_vlc[q][i].level= level;
861 rl->rl_vlc[q][i].run= run;
866 int ff_find_unused_picture(MpegEncContext *s, int shared){
870 for(i=0; i<MAX_PICTURE_COUNT; i++){
871 if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
874 for(i=0; i<MAX_PICTURE_COUNT; i++){
875 if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
877 for(i=0; i<MAX_PICTURE_COUNT; i++){
878 if(s->picture[i].data[0]==NULL) return i;
882 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
883 /* We could return -1, but the codec would crash trying to draw into a
884 * non-existing frame anyway. This is safer than waiting for a random crash.
885 * Also the return of this is never useful, an encoder must only allocate
886 * as much as allowed in the specification. This has no relationship to how
887 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
888 * enough for such valid streams).
889 * Plus, a decoder has to check stream validity and remove frames if too
890 * many reference frames are around. Waiting for "OOM" is not correct at
891 * all. Similarly, missing reference frames have to be replaced by
892 * interpolated/MC frames, anything else is a bug in the codec ...
898 static void update_noise_reduction(MpegEncContext *s){
901 for(intra=0; intra<2; intra++){
902 if(s->dct_count[intra] > (1<<16)){
904 s->dct_error_sum[intra][i] >>=1;
906 s->dct_count[intra] >>= 1;
910 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
916 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
918 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
924 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
926 /* mark&release old frames */
927 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) {
928 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
929 free_frame_buffer(s, s->last_picture_ptr);
931 /* release forgotten pictures */
932 /* if(mpeg124/h263) */
934 for(i=0; i<MAX_PICTURE_COUNT; i++){
935 if(s->picture[i].data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].reference){
936 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
937 free_frame_buffer(s, &s->picture[i]);
945 /* release non reference frames */
946 for(i=0; i<MAX_PICTURE_COUNT; i++){
947 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
948 free_frame_buffer(s, &s->picture[i]);
952 if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
953 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
955 i= ff_find_unused_picture(s, 0);
961 if (s->codec_id == CODEC_ID_H264)
962 pic->reference = s->picture_structure;
963 else if (s->pict_type != AV_PICTURE_TYPE_B)
967 pic->coded_picture_number= s->coded_picture_number++;
969 if(ff_alloc_picture(s, pic, 0) < 0)
972 s->current_picture_ptr= pic;
973 //FIXME use only the vars from current_pic
974 s->current_picture_ptr->top_field_first= s->top_field_first;
975 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
976 if(s->picture_structure != PICT_FRAME)
977 s->current_picture_ptr->top_field_first= (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
979 s->current_picture_ptr->interlaced_frame= !s->progressive_frame && !s->progressive_sequence;
982 s->current_picture_ptr->pict_type= s->pict_type;
983 // if(s->flags && CODEC_FLAG_QSCALE)
984 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
985 s->current_picture_ptr->key_frame= s->pict_type == AV_PICTURE_TYPE_I;
987 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
989 if (s->pict_type != AV_PICTURE_TYPE_B) {
990 s->last_picture_ptr= s->next_picture_ptr;
992 s->next_picture_ptr= s->current_picture_ptr;
994 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
995 s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL,
996 s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL,
997 s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL,
998 s->pict_type, s->dropable);*/
1000 if(s->codec_id != CODEC_ID_H264){
1001 if((s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL) &&
1002 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1003 if (s->pict_type != AV_PICTURE_TYPE_I)
1004 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1005 else if (s->picture_structure != PICT_FRAME)
1006 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1008 /* Allocate a dummy frame */
1009 i= ff_find_unused_picture(s, 0);
1010 s->last_picture_ptr= &s->picture[i];
1011 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1014 if((s->next_picture_ptr==NULL || s->next_picture_ptr->data[0]==NULL) && s->pict_type==AV_PICTURE_TYPE_B){
1015 /* Allocate a dummy frame */
1016 i= ff_find_unused_picture(s, 0);
1017 s->next_picture_ptr= &s->picture[i];
1018 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1023 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1024 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1026 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->data[0]));
1028 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1031 if(s->picture_structure == PICT_BOTTOM_FIELD){
1032 s->current_picture.data[i] += s->current_picture.linesize[i];
1034 s->current_picture.linesize[i] *= 2;
1035 s->last_picture.linesize[i] *=2;
1036 s->next_picture.linesize[i] *=2;
1040 s->error_recognition= avctx->error_recognition;
1042 /* set dequantizer, we can't do it during init as it might change for mpeg4
1043 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1044 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1045 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1046 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1047 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1048 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1049 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1051 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1052 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1055 if(s->dct_error_sum){
1056 assert(s->avctx->noise_reduction && s->encoding);
1058 update_noise_reduction(s);
1061 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1062 return ff_xvmc_field_start(s, avctx);
1067 /* generic function for encode/decode called after a frame has been coded/decoded */
1068 void MPV_frame_end(MpegEncContext *s)
1071 /* draw edge for correct motion prediction if outside */
1072 //just to make sure that all data is rendered.
1073 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1074 ff_xvmc_field_end(s);
1075 }else if(!s->avctx->hwaccel
1076 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1077 && s->unrestricted_mv
1078 && s->current_picture.reference
1080 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1081 s->dsp.draw_edges(s->current_picture.data[0], s->linesize ,
1082 s->h_edge_pos , s->v_edge_pos ,
1083 EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1084 s->dsp.draw_edges(s->current_picture.data[1], s->uvlinesize,
1085 s->h_edge_pos>>1, s->v_edge_pos>>1,
1086 EDGE_WIDTH/2, EDGE_TOP | EDGE_BOTTOM);
1087 s->dsp.draw_edges(s->current_picture.data[2], s->uvlinesize,
1088 s->h_edge_pos>>1, s->v_edge_pos>>1,
1089 EDGE_WIDTH/2, EDGE_TOP | EDGE_BOTTOM);
1093 s->last_pict_type = s->pict_type;
1094 s->last_lambda_for[s->pict_type]= s->current_picture_ptr->quality;
1095 if(s->pict_type!=AV_PICTURE_TYPE_B){
1096 s->last_non_b_pict_type= s->pict_type;
1099 /* copy back current_picture variables */
1100 for(i=0; i<MAX_PICTURE_COUNT; i++){
1101 if(s->picture[i].data[0] == s->current_picture.data[0]){
1102 s->picture[i]= s->current_picture;
1106 assert(i<MAX_PICTURE_COUNT);
1110 /* release non-reference frames */
1111 for(i=0; i<MAX_PICTURE_COUNT; i++){
1112 if(s->picture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
1113 free_frame_buffer(s, &s->picture[i]);
1117 // clear copies, to avoid confusion
1119 memset(&s->last_picture, 0, sizeof(Picture));
1120 memset(&s->next_picture, 0, sizeof(Picture));
1121 memset(&s->current_picture, 0, sizeof(Picture));
1123 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1127 * draws an line from (ex, ey) -> (sx, sy).
1128 * @param w width of the image
1129 * @param h height of the image
1130 * @param stride stride/linesize of the image
1131 * @param color color of the arrow
1133 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1136 sx= av_clip(sx, 0, w-1);
1137 sy= av_clip(sy, 0, h-1);
1138 ex= av_clip(ex, 0, w-1);
1139 ey= av_clip(ey, 0, h-1);
1141 buf[sy*stride + sx]+= color;
1143 if(FFABS(ex - sx) > FFABS(ey - sy)){
1145 FFSWAP(int, sx, ex);
1146 FFSWAP(int, sy, ey);
1148 buf+= sx + sy*stride;
1150 f= ((ey-sy)<<16)/ex;
1151 for(x= 0; x <= ex; x++){
1154 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1155 buf[(y+1)*stride + x]+= (color* fr )>>16;
1159 FFSWAP(int, sx, ex);
1160 FFSWAP(int, sy, ey);
1162 buf+= sx + sy*stride;
1164 if(ey) f= ((ex-sx)<<16)/ey;
1166 for(y= 0; y <= ey; y++){
1169 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1170 buf[y*stride + x+1]+= (color* fr )>>16;
1176 * draws an arrow from (ex, ey) -> (sx, sy).
1177 * @param w width of the image
1178 * @param h height of the image
1179 * @param stride stride/linesize of the image
1180 * @param color color of the arrow
1182 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1185 sx= av_clip(sx, -100, w+100);
1186 sy= av_clip(sy, -100, h+100);
1187 ex= av_clip(ex, -100, w+100);
1188 ey= av_clip(ey, -100, h+100);
1193 if(dx*dx + dy*dy > 3*3){
1196 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1198 //FIXME subpixel accuracy
1199 rx= ROUNDED_DIV(rx*3<<4, length);
1200 ry= ROUNDED_DIV(ry*3<<4, length);
1202 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1203 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1205 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1209 * prints debuging info for the given picture.
1211 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1213 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1215 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1218 av_log(s->avctx,AV_LOG_DEBUG,"New frame, type: ");
1219 switch (pict->pict_type) {
1220 case AV_PICTURE_TYPE_I: av_log(s->avctx,AV_LOG_DEBUG,"I\n"); break;
1221 case AV_PICTURE_TYPE_P: av_log(s->avctx,AV_LOG_DEBUG,"P\n"); break;
1222 case AV_PICTURE_TYPE_B: av_log(s->avctx,AV_LOG_DEBUG,"B\n"); break;
1223 case AV_PICTURE_TYPE_S: av_log(s->avctx,AV_LOG_DEBUG,"S\n"); break;
1224 case AV_PICTURE_TYPE_SI: av_log(s->avctx,AV_LOG_DEBUG,"SI\n"); break;
1225 case AV_PICTURE_TYPE_SP: av_log(s->avctx,AV_LOG_DEBUG,"SP\n"); break;
1227 for(y=0; y<s->mb_height; y++){
1228 for(x=0; x<s->mb_width; x++){
1229 if(s->avctx->debug&FF_DEBUG_SKIP){
1230 int count= s->mbskip_table[x + y*s->mb_stride];
1231 if(count>9) count=9;
1232 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1234 if(s->avctx->debug&FF_DEBUG_QP){
1235 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1237 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1238 int mb_type= pict->mb_type[x + y*s->mb_stride];
1239 //Type & MV direction
1241 av_log(s->avctx, AV_LOG_DEBUG, "P");
1242 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1243 av_log(s->avctx, AV_LOG_DEBUG, "A");
1244 else if(IS_INTRA4x4(mb_type))
1245 av_log(s->avctx, AV_LOG_DEBUG, "i");
1246 else if(IS_INTRA16x16(mb_type))
1247 av_log(s->avctx, AV_LOG_DEBUG, "I");
1248 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1249 av_log(s->avctx, AV_LOG_DEBUG, "d");
1250 else if(IS_DIRECT(mb_type))
1251 av_log(s->avctx, AV_LOG_DEBUG, "D");
1252 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1253 av_log(s->avctx, AV_LOG_DEBUG, "g");
1254 else if(IS_GMC(mb_type))
1255 av_log(s->avctx, AV_LOG_DEBUG, "G");
1256 else if(IS_SKIP(mb_type))
1257 av_log(s->avctx, AV_LOG_DEBUG, "S");
1258 else if(!USES_LIST(mb_type, 1))
1259 av_log(s->avctx, AV_LOG_DEBUG, ">");
1260 else if(!USES_LIST(mb_type, 0))
1261 av_log(s->avctx, AV_LOG_DEBUG, "<");
1263 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1264 av_log(s->avctx, AV_LOG_DEBUG, "X");
1269 av_log(s->avctx, AV_LOG_DEBUG, "+");
1270 else if(IS_16X8(mb_type))
1271 av_log(s->avctx, AV_LOG_DEBUG, "-");
1272 else if(IS_8X16(mb_type))
1273 av_log(s->avctx, AV_LOG_DEBUG, "|");
1274 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1275 av_log(s->avctx, AV_LOG_DEBUG, " ");
1277 av_log(s->avctx, AV_LOG_DEBUG, "?");
1280 if(IS_INTERLACED(mb_type))
1281 av_log(s->avctx, AV_LOG_DEBUG, "=");
1283 av_log(s->avctx, AV_LOG_DEBUG, " ");
1285 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1287 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1291 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1292 const int shift= 1 + s->quarter_sample;
1296 int h_chroma_shift, v_chroma_shift, block_height;
1297 const int width = s->avctx->width;
1298 const int height= s->avctx->height;
1299 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1300 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1301 s->low_delay=0; //needed to see the vectors without trashing the buffers
1303 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1305 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1306 pict->data[i]= s->visualization_buffer[i];
1308 pict->type= FF_BUFFER_TYPE_COPY;
1310 block_height = 16>>v_chroma_shift;
1312 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1314 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1315 const int mb_index= mb_x + mb_y*s->mb_stride;
1316 if((s->avctx->debug_mv) && pict->motion_val){
1318 for(type=0; type<3; type++){
1321 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1325 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1329 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1334 if(!USES_LIST(pict->mb_type[mb_index], direction))
1337 if(IS_8X8(pict->mb_type[mb_index])){
1340 int sx= mb_x*16 + 4 + 8*(i&1);
1341 int sy= mb_y*16 + 4 + 8*(i>>1);
1342 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1343 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1344 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1345 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1347 }else if(IS_16X8(pict->mb_type[mb_index])){
1351 int sy=mb_y*16 + 4 + 8*i;
1352 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1353 int mx=(pict->motion_val[direction][xy][0]>>shift);
1354 int my=(pict->motion_val[direction][xy][1]>>shift);
1356 if(IS_INTERLACED(pict->mb_type[mb_index]))
1359 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1361 }else if(IS_8X16(pict->mb_type[mb_index])){
1364 int sx=mb_x*16 + 4 + 8*i;
1366 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1367 int mx=(pict->motion_val[direction][xy][0]>>shift);
1368 int my=(pict->motion_val[direction][xy][1]>>shift);
1370 if(IS_INTERLACED(pict->mb_type[mb_index]))
1373 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1376 int sx= mb_x*16 + 8;
1377 int sy= mb_y*16 + 8;
1378 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1379 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1380 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1381 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1385 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1386 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1388 for(y=0; y<block_height; y++){
1389 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1390 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1393 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1394 int mb_type= pict->mb_type[mb_index];
1397 #define COLOR(theta, r)\
1398 u= (int)(128 + r*cos(theta*3.141592/180));\
1399 v= (int)(128 + r*sin(theta*3.141592/180));
1403 if(IS_PCM(mb_type)){
1405 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1407 }else if(IS_INTRA4x4(mb_type)){
1409 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1411 }else if(IS_DIRECT(mb_type)){
1413 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1415 }else if(IS_GMC(mb_type)){
1417 }else if(IS_SKIP(mb_type)){
1419 }else if(!USES_LIST(mb_type, 1)){
1421 }else if(!USES_LIST(mb_type, 0)){
1424 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1428 u*= 0x0101010101010101ULL;
1429 v*= 0x0101010101010101ULL;
1430 for(y=0; y<block_height; y++){
1431 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1432 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1436 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1437 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1438 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1440 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1442 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1444 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1445 int dm= 1 << (mv_sample_log2-2);
1447 int sx= mb_x*16 + 8*(i&1);
1448 int sy= mb_y*16 + 8*(i>>1);
1449 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1451 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1452 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1454 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1455 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1456 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1460 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1464 s->mbskip_table[mb_index]=0;
1470 static inline int hpel_motion_lowres(MpegEncContext *s,
1471 uint8_t *dest, uint8_t *src,
1472 int field_based, int field_select,
1473 int src_x, int src_y,
1474 int width, int height, int stride,
1475 int h_edge_pos, int v_edge_pos,
1476 int w, int h, h264_chroma_mc_func *pix_op,
1477 int motion_x, int motion_y)
1479 const int lowres= s->avctx->lowres;
1480 const int op_index= FFMIN(lowres, 2);
1481 const int s_mask= (2<<lowres)-1;
1485 if(s->quarter_sample){
1490 sx= motion_x & s_mask;
1491 sy= motion_y & s_mask;
1492 src_x += motion_x >> (lowres+1);
1493 src_y += motion_y >> (lowres+1);
1495 src += src_y * stride + src_x;
1497 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1498 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1499 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1500 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1501 src= s->edge_emu_buffer;
1505 sx= (sx << 2) >> lowres;
1506 sy= (sy << 2) >> lowres;
1509 pix_op[op_index](dest, src, stride, h, sx, sy);
1513 /* apply one mpeg motion vector to the three components */
1514 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1515 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1516 int field_based, int bottom_field, int field_select,
1517 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1518 int motion_x, int motion_y, int h, int mb_y)
1520 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1521 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1522 const int lowres= s->avctx->lowres;
1523 const int op_index= FFMIN(lowres, 2);
1524 const int block_s= 8>>lowres;
1525 const int s_mask= (2<<lowres)-1;
1526 const int h_edge_pos = s->h_edge_pos >> lowres;
1527 const int v_edge_pos = s->v_edge_pos >> lowres;
1528 linesize = s->current_picture.linesize[0] << field_based;
1529 uvlinesize = s->current_picture.linesize[1] << field_based;
1531 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1537 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1540 sx= motion_x & s_mask;
1541 sy= motion_y & s_mask;
1542 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1543 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1545 if (s->out_format == FMT_H263) {
1546 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1547 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1550 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1553 uvsx = (2*mx) & s_mask;
1554 uvsy = (2*my) & s_mask;
1555 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1556 uvsrc_y = mb_y*block_s + (my >> lowres);
1562 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1563 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1566 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1567 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1568 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1570 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1571 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1572 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1573 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1574 ptr_y = s->edge_emu_buffer;
1575 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1576 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1577 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1578 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1579 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1580 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1586 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
1587 dest_y += s->linesize;
1588 dest_cb+= s->uvlinesize;
1589 dest_cr+= s->uvlinesize;
1593 ptr_y += s->linesize;
1594 ptr_cb+= s->uvlinesize;
1595 ptr_cr+= s->uvlinesize;
1598 sx= (sx << 2) >> lowres;
1599 sy= (sy << 2) >> lowres;
1600 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1602 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1603 uvsx= (uvsx << 2) >> lowres;
1604 uvsy= (uvsy << 2) >> lowres;
1605 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1606 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1608 //FIXME h261 lowres loop filter
1611 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1612 uint8_t *dest_cb, uint8_t *dest_cr,
1613 uint8_t **ref_picture,
1614 h264_chroma_mc_func *pix_op,
1616 const int lowres= s->avctx->lowres;
1617 const int op_index= FFMIN(lowres, 2);
1618 const int block_s= 8>>lowres;
1619 const int s_mask= (2<<lowres)-1;
1620 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1621 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1622 int emu=0, src_x, src_y, offset, sx, sy;
1625 if(s->quarter_sample){
1630 /* In case of 8X8, we construct a single chroma motion vector
1631 with a special rounding */
1632 mx= ff_h263_round_chroma(mx);
1633 my= ff_h263_round_chroma(my);
1637 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1638 src_y = s->mb_y*block_s + (my >> (lowres+1));
1640 offset = src_y * s->uvlinesize + src_x;
1641 ptr = ref_picture[1] + offset;
1642 if(s->flags&CODEC_FLAG_EMU_EDGE){
1643 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1644 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1645 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1646 ptr= s->edge_emu_buffer;
1650 sx= (sx << 2) >> lowres;
1651 sy= (sy << 2) >> lowres;
1652 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1654 ptr = ref_picture[2] + offset;
1656 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1657 ptr= s->edge_emu_buffer;
1659 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1663 * motion compensation of a single macroblock
1665 * @param dest_y luma destination pointer
1666 * @param dest_cb chroma cb/u destination pointer
1667 * @param dest_cr chroma cr/v destination pointer
1668 * @param dir direction (0->forward, 1->backward)
1669 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1670 * @param pix_op halfpel motion compensation function (average or put normally)
1671 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1673 static inline void MPV_motion_lowres(MpegEncContext *s,
1674 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1675 int dir, uint8_t **ref_picture,
1676 h264_chroma_mc_func *pix_op)
1680 const int lowres= s->avctx->lowres;
1681 const int block_s= 8>>lowres;
1686 switch(s->mv_type) {
1688 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1690 ref_picture, pix_op,
1691 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1697 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1698 ref_picture[0], 0, 0,
1699 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1700 s->width, s->height, s->linesize,
1701 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1702 block_s, block_s, pix_op,
1703 s->mv[dir][i][0], s->mv[dir][i][1]);
1705 mx += s->mv[dir][i][0];
1706 my += s->mv[dir][i][1];
1709 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1710 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1713 if (s->picture_structure == PICT_FRAME) {
1715 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1716 1, 0, s->field_select[dir][0],
1717 ref_picture, pix_op,
1718 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1720 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1721 1, 1, s->field_select[dir][1],
1722 ref_picture, pix_op,
1723 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1725 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1726 ref_picture= s->current_picture_ptr->data;
1729 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1730 0, 0, s->field_select[dir][0],
1731 ref_picture, pix_op,
1732 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1737 uint8_t ** ref2picture;
1739 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1740 ref2picture= ref_picture;
1742 ref2picture= s->current_picture_ptr->data;
1745 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1746 0, 0, s->field_select[dir][i],
1747 ref2picture, pix_op,
1748 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1750 dest_y += 2*block_s*s->linesize;
1751 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1752 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1756 if(s->picture_structure == PICT_FRAME){
1760 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1762 ref_picture, pix_op,
1763 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1765 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1769 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1770 0, 0, s->picture_structure != i+1,
1771 ref_picture, pix_op,
1772 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1774 // after put we make avg of the same block
1775 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1777 //opposite parity is always in the same frame if this is second field
1778 if(!s->first_field){
1779 ref_picture = s->current_picture_ptr->data;
1788 /* put block[] to dest[] */
1789 static inline void put_dct(MpegEncContext *s,
1790 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1792 s->dct_unquantize_intra(s, block, i, qscale);
1793 s->dsp.idct_put (dest, line_size, block);
1796 /* add block[] to dest[] */
1797 static inline void add_dct(MpegEncContext *s,
1798 DCTELEM *block, int i, uint8_t *dest, int line_size)
1800 if (s->block_last_index[i] >= 0) {
1801 s->dsp.idct_add (dest, line_size, block);
1805 static inline void add_dequant_dct(MpegEncContext *s,
1806 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1808 if (s->block_last_index[i] >= 0) {
1809 s->dct_unquantize_inter(s, block, i, qscale);
1811 s->dsp.idct_add (dest, line_size, block);
1816 * cleans dc, ac, coded_block for the current non intra MB
1818 void ff_clean_intra_table_entries(MpegEncContext *s)
1820 int wrap = s->b8_stride;
1821 int xy = s->block_index[0];
1824 s->dc_val[0][xy + 1 ] =
1825 s->dc_val[0][xy + wrap] =
1826 s->dc_val[0][xy + 1 + wrap] = 1024;
1828 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
1829 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
1830 if (s->msmpeg4_version>=3) {
1831 s->coded_block[xy ] =
1832 s->coded_block[xy + 1 ] =
1833 s->coded_block[xy + wrap] =
1834 s->coded_block[xy + 1 + wrap] = 0;
1837 wrap = s->mb_stride;
1838 xy = s->mb_x + s->mb_y * wrap;
1840 s->dc_val[2][xy] = 1024;
1842 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
1843 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
1845 s->mbintra_table[xy]= 0;
1848 /* generic function called after a macroblock has been parsed by the
1849 decoder or after it has been encoded by the encoder.
1851 Important variables used:
1852 s->mb_intra : true if intra macroblock
1853 s->mv_dir : motion vector direction
1854 s->mv_type : motion vector type
1855 s->mv : motion vector
1856 s->interlaced_dct : true if interlaced dct used (mpeg2)
1858 static av_always_inline
1859 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
1860 int lowres_flag, int is_mpeg12)
1862 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
1863 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1864 ff_xvmc_decode_mb(s);//xvmc uses pblocks
1868 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
1869 /* save DCT coefficients */
1871 DCTELEM *dct = &s->current_picture.dct_coeff[mb_xy*64*6];
1872 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
1874 for(j=0; j<64; j++){
1875 *dct++ = block[i][s->dsp.idct_permutation[j]];
1876 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
1878 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1882 s->current_picture.qscale_table[mb_xy]= s->qscale;
1884 /* update DC predictors for P macroblocks */
1886 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
1887 if(s->mbintra_table[mb_xy])
1888 ff_clean_intra_table_entries(s);
1892 s->last_dc[2] = 128 << s->intra_dc_precision;
1895 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
1896 s->mbintra_table[mb_xy]=1;
1898 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
1899 uint8_t *dest_y, *dest_cb, *dest_cr;
1900 int dct_linesize, dct_offset;
1901 op_pixels_func (*op_pix)[4];
1902 qpel_mc_func (*op_qpix)[16];
1903 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
1904 const int uvlinesize= s->current_picture.linesize[1];
1905 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
1906 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
1908 /* avoid copy if macroblock skipped in last frame too */
1909 /* skip only during decoding as we might trash the buffers during encoding a bit */
1911 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
1912 const int age= s->current_picture.age;
1916 if (s->mb_skipped) {
1918 assert(s->pict_type!=AV_PICTURE_TYPE_I);
1920 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
1921 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1923 /* if previous was skipped too, then nothing to do ! */
1924 if (*mbskip_ptr >= age && s->current_picture.reference){
1927 } else if(!s->current_picture.reference){
1928 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
1929 if(*mbskip_ptr >99) *mbskip_ptr= 99;
1931 *mbskip_ptr = 0; /* not skipped */
1935 dct_linesize = linesize << s->interlaced_dct;
1936 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
1940 dest_cb= s->dest[1];
1941 dest_cr= s->dest[2];
1943 dest_y = s->b_scratchpad;
1944 dest_cb= s->b_scratchpad+16*linesize;
1945 dest_cr= s->b_scratchpad+32*linesize;
1949 /* motion handling */
1950 /* decoding or more than one mb_type (MC was already done otherwise) */
1953 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
1955 if (s->mv_dir & MV_DIR_FORWARD) {
1956 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix);
1957 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
1959 if (s->mv_dir & MV_DIR_BACKWARD) {
1960 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix);
1963 op_qpix= s->me.qpel_put;
1964 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
1965 op_pix = s->dsp.put_pixels_tab;
1967 op_pix = s->dsp.put_no_rnd_pixels_tab;
1969 if (s->mv_dir & MV_DIR_FORWARD) {
1970 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.data, op_pix, op_qpix);
1971 op_pix = s->dsp.avg_pixels_tab;
1972 op_qpix= s->me.qpel_avg;
1974 if (s->mv_dir & MV_DIR_BACKWARD) {
1975 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.data, op_pix, op_qpix);
1980 /* skip dequant / idct if we are really late ;) */
1981 if(s->avctx->skip_idct){
1982 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
1983 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
1984 || s->avctx->skip_idct >= AVDISCARD_ALL)
1988 /* add dct residue */
1989 if(s->encoding || !( s->h263_msmpeg4 || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
1990 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
1991 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
1992 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
1993 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
1994 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
1996 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1997 if (s->chroma_y_shift){
1998 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
1999 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2003 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2004 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2005 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2006 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2009 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2010 add_dct(s, block[0], 0, dest_y , dct_linesize);
2011 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2012 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2013 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2015 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2016 if(s->chroma_y_shift){//Chroma420
2017 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2018 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2021 dct_linesize = uvlinesize << s->interlaced_dct;
2022 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2024 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2025 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2026 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2027 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2028 if(!s->chroma_x_shift){//Chroma444
2029 add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
2030 add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
2031 add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
2032 add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
2037 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2038 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2041 /* dct only in intra block */
2042 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2043 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2044 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2045 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2046 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2048 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2049 if(s->chroma_y_shift){
2050 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2051 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2055 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2056 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2057 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2058 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2062 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2063 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2064 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2065 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2067 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2068 if(s->chroma_y_shift){
2069 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2070 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2073 dct_linesize = uvlinesize << s->interlaced_dct;
2074 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*8;
2076 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2077 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2078 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2079 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2080 if(!s->chroma_x_shift){//Chroma444
2081 s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
2082 s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
2083 s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
2084 s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
2092 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2093 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2094 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2099 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2101 if(s->out_format == FMT_MPEG1) {
2102 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2103 else MPV_decode_mb_internal(s, block, 0, 1);
2106 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2107 else MPV_decode_mb_internal(s, block, 0, 0);
2112 * @param h is the normal height, this will be reduced automatically if needed for the last row
2114 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2115 if (s->avctx->draw_horiz_band) {
2117 const int field_pic= s->picture_structure != PICT_FRAME;
2120 h= FFMIN(h, (s->avctx->height>>field_pic) - y);
2122 if(field_pic && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)){
2125 if(s->first_field) return;
2128 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2129 src= (AVFrame*)s->current_picture_ptr;
2130 else if(s->last_picture_ptr)
2131 src= (AVFrame*)s->last_picture_ptr;
2135 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2141 offset[0]= y * s->linesize;
2143 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2149 s->avctx->draw_horiz_band(s->avctx, src, offset,
2150 y, s->picture_structure, h);
2154 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2155 const int linesize= s->current_picture.linesize[0]; //not s->linesize as this would be wrong for field pics
2156 const int uvlinesize= s->current_picture.linesize[1];
2157 const int mb_size= 4 - s->avctx->lowres;
2159 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2160 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2161 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2162 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2163 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2164 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2165 //block_index is not used by mpeg2, so it is not affected by chroma_format
2167 s->dest[0] = s->current_picture.data[0] + ((s->mb_x - 1) << mb_size);
2168 s->dest[1] = s->current_picture.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2169 s->dest[2] = s->current_picture.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2171 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2173 if(s->picture_structure==PICT_FRAME){
2174 s->dest[0] += s->mb_y * linesize << mb_size;
2175 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2176 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2178 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2179 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2180 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2181 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2186 void ff_mpeg_flush(AVCodecContext *avctx){
2188 MpegEncContext *s = avctx->priv_data;
2190 if(s==NULL || s->picture==NULL)
2193 for(i=0; i<MAX_PICTURE_COUNT; i++){
2194 if(s->picture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL
2195 || s->picture[i].type == FF_BUFFER_TYPE_USER))
2196 free_frame_buffer(s, &s->picture[i]);
2198 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2200 s->mb_x= s->mb_y= 0;
2203 s->parse_context.state= -1;
2204 s->parse_context.frame_start_found= 0;
2205 s->parse_context.overread= 0;
2206 s->parse_context.overread_index= 0;
2207 s->parse_context.index= 0;
2208 s->parse_context.last_index= 0;
2209 s->bitstream_buffer_size=0;
2213 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2214 DCTELEM *block, int n, int qscale)
2216 int i, level, nCoeffs;
2217 const uint16_t *quant_matrix;
2219 nCoeffs= s->block_last_index[n];
2222 block[0] = block[0] * s->y_dc_scale;
2224 block[0] = block[0] * s->c_dc_scale;
2225 /* XXX: only mpeg1 */
2226 quant_matrix = s->intra_matrix;
2227 for(i=1;i<=nCoeffs;i++) {
2228 int j= s->intra_scantable.permutated[i];
2233 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2234 level = (level - 1) | 1;
2237 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2238 level = (level - 1) | 1;
2245 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2246 DCTELEM *block, int n, int qscale)
2248 int i, level, nCoeffs;
2249 const uint16_t *quant_matrix;
2251 nCoeffs= s->block_last_index[n];
2253 quant_matrix = s->inter_matrix;
2254 for(i=0; i<=nCoeffs; i++) {
2255 int j= s->intra_scantable.permutated[i];
2260 level = (((level << 1) + 1) * qscale *
2261 ((int) (quant_matrix[j]))) >> 4;
2262 level = (level - 1) | 1;
2265 level = (((level << 1) + 1) * qscale *
2266 ((int) (quant_matrix[j]))) >> 4;
2267 level = (level - 1) | 1;
2274 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2275 DCTELEM *block, int n, int qscale)
2277 int i, level, nCoeffs;
2278 const uint16_t *quant_matrix;
2280 if(s->alternate_scan) nCoeffs= 63;
2281 else nCoeffs= s->block_last_index[n];
2284 block[0] = block[0] * s->y_dc_scale;
2286 block[0] = block[0] * s->c_dc_scale;
2287 quant_matrix = s->intra_matrix;
2288 for(i=1;i<=nCoeffs;i++) {
2289 int j= s->intra_scantable.permutated[i];
2294 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2297 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2304 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2305 DCTELEM *block, int n, int qscale)
2307 int i, level, nCoeffs;
2308 const uint16_t *quant_matrix;
2311 if(s->alternate_scan) nCoeffs= 63;
2312 else nCoeffs= s->block_last_index[n];
2315 block[0] = block[0] * s->y_dc_scale;
2317 block[0] = block[0] * s->c_dc_scale;
2318 quant_matrix = s->intra_matrix;
2319 for(i=1;i<=nCoeffs;i++) {
2320 int j= s->intra_scantable.permutated[i];
2325 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2328 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2337 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2338 DCTELEM *block, int n, int qscale)
2340 int i, level, nCoeffs;
2341 const uint16_t *quant_matrix;
2344 if(s->alternate_scan) nCoeffs= 63;
2345 else nCoeffs= s->block_last_index[n];
2347 quant_matrix = s->inter_matrix;
2348 for(i=0; i<=nCoeffs; i++) {
2349 int j= s->intra_scantable.permutated[i];
2354 level = (((level << 1) + 1) * qscale *
2355 ((int) (quant_matrix[j]))) >> 4;
2358 level = (((level << 1) + 1) * qscale *
2359 ((int) (quant_matrix[j]))) >> 4;
2368 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2369 DCTELEM *block, int n, int qscale)
2371 int i, level, qmul, qadd;
2374 assert(s->block_last_index[n]>=0);
2380 block[0] = block[0] * s->y_dc_scale;
2382 block[0] = block[0] * s->c_dc_scale;
2383 qadd = (qscale - 1) | 1;
2390 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2392 for(i=1; i<=nCoeffs; i++) {
2396 level = level * qmul - qadd;
2398 level = level * qmul + qadd;
2405 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2406 DCTELEM *block, int n, int qscale)
2408 int i, level, qmul, qadd;
2411 assert(s->block_last_index[n]>=0);
2413 qadd = (qscale - 1) | 1;
2416 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2418 for(i=0; i<=nCoeffs; i++) {
2422 level = level * qmul - qadd;
2424 level = level * qmul + qadd;
2432 * set qscale and update qscale dependent variables.
2434 void ff_set_qscale(MpegEncContext * s, int qscale)
2438 else if (qscale > 31)
2442 s->chroma_qscale= s->chroma_qscale_table[qscale];
2444 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2445 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];