2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #include "libavutil/intmath.h"
31 #include "libavutil/imgutils.h"
35 #include "mpegvideo.h"
36 #include "mpegvideo_common.h"
40 #include "xvmc_internal.h"
47 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
48 DCTELEM *block, int n, int qscale);
49 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
50 DCTELEM *block, int n, int qscale);
51 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
52 DCTELEM *block, int n, int qscale);
53 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
54 DCTELEM *block, int n, int qscale);
55 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
56 DCTELEM *block, int n, int qscale);
57 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
58 DCTELEM *block, int n, int qscale);
59 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
60 DCTELEM *block, int n, int qscale);
63 /* enable all paranoid tests for rounding, overflows, etc... */
69 static const uint8_t ff_default_chroma_qscale_table[32]={
70 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
71 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
74 const uint8_t ff_mpeg1_dc_scale_table[128]={
75 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
76 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
77 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
78 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
79 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
82 static const uint8_t mpeg2_dc_scale_table1[128]={
83 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
84 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
85 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
86 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
87 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
90 static const uint8_t mpeg2_dc_scale_table2[128]={
91 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
92 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
93 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
94 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
95 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
98 static const uint8_t mpeg2_dc_scale_table3[128]={
99 // 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31
100 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 const uint8_t * const ff_mpeg2_dc_scale_table[4]={
107 ff_mpeg1_dc_scale_table,
108 mpeg2_dc_scale_table1,
109 mpeg2_dc_scale_table2,
110 mpeg2_dc_scale_table3,
113 const enum PixelFormat ff_pixfmt_list_420[] = {
118 const enum PixelFormat ff_hwaccel_pixfmt_list_420[] = {
125 const uint8_t *ff_find_start_code(const uint8_t * restrict p, const uint8_t *end, uint32_t * restrict state){
133 uint32_t tmp= *state << 8;
134 *state= tmp + *(p++);
135 if(tmp == 0x100 || p==end)
140 if (p[-1] > 1 ) p+= 3;
141 else if(p[-2] ) p+= 2;
142 else if(p[-3]|(p[-1]-1)) p++;
155 /* init common dct for both encoder and decoder */
156 av_cold int ff_dct_common_init(MpegEncContext *s)
158 s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
159 s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_c;
160 s->dct_unquantize_mpeg1_intra = dct_unquantize_mpeg1_intra_c;
161 s->dct_unquantize_mpeg1_inter = dct_unquantize_mpeg1_inter_c;
162 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_c;
163 if(s->flags & CODEC_FLAG_BITEXACT)
164 s->dct_unquantize_mpeg2_intra = dct_unquantize_mpeg2_intra_bitexact;
165 s->dct_unquantize_mpeg2_inter = dct_unquantize_mpeg2_inter_c;
168 MPV_common_init_mmx(s);
170 MPV_common_init_axp(s);
172 MPV_common_init_mlib(s);
174 MPV_common_init_mmi(s);
176 MPV_common_init_arm(s);
178 MPV_common_init_altivec(s);
180 MPV_common_init_bfin(s);
183 /* load & permutate scantables
184 note: only wmv uses different ones
186 if(s->alternate_scan){
187 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_alternate_vertical_scan);
188 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_alternate_vertical_scan);
190 ff_init_scantable(s->dsp.idct_permutation, &s->inter_scantable , ff_zigzag_direct);
191 ff_init_scantable(s->dsp.idct_permutation, &s->intra_scantable , ff_zigzag_direct);
193 ff_init_scantable(s->dsp.idct_permutation, &s->intra_h_scantable, ff_alternate_horizontal_scan);
194 ff_init_scantable(s->dsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
199 void ff_copy_picture(Picture *dst, Picture *src){
201 dst->f.type= FF_BUFFER_TYPE_COPY;
205 * Release a frame buffer
207 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
209 ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
210 av_freep(&pic->f.hwaccel_picture_private);
214 * Allocate a frame buffer
216 static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
220 if (s->avctx->hwaccel) {
221 assert(!pic->hwaccel_picture_private);
222 if (s->avctx->hwaccel->priv_data_size) {
223 pic->f.hwaccel_picture_private = av_mallocz(s->avctx->hwaccel->priv_data_size);
224 if (!pic->f.hwaccel_picture_private) {
225 av_log(s->avctx, AV_LOG_ERROR, "alloc_frame_buffer() failed (hwaccel private data allocation)\n");
231 r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
233 if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
234 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
235 r, pic->f.age, pic->f.type, pic->f.data[0]);
236 av_freep(&pic->f.hwaccel_picture_private);
240 if (s->linesize && (s->linesize != pic->f.linesize[0] || s->uvlinesize != pic->f.linesize[1])) {
241 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n");
242 free_frame_buffer(s, pic);
246 if (pic->f.linesize[1] != pic->f.linesize[2]) {
247 av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride mismatch)\n");
248 free_frame_buffer(s, pic);
256 * allocates a Picture
257 * The pixels are allocated/set by calling get_buffer() if shared=0
259 int ff_alloc_picture(MpegEncContext *s, Picture *pic, int shared){
260 const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; //the +1 is needed so memset(,,stride*height) does not sig11
261 const int mb_array_size= s->mb_stride*s->mb_height;
262 const int b8_array_size= s->b8_stride*s->mb_height*2;
263 const int b4_array_size= s->b4_stride*s->mb_height*4;
268 assert(pic->f.data[0]);
269 assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED);
270 pic->f.type = FF_BUFFER_TYPE_SHARED;
272 assert(!pic->f.data[0]);
274 if (alloc_frame_buffer(s, pic) < 0)
277 s->linesize = pic->f.linesize[0];
278 s->uvlinesize = pic->f.linesize[1];
281 if (pic->f.qscale_table == NULL) {
283 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_var , mb_array_size * sizeof(int16_t) , fail)
284 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mc_mb_var, mb_array_size * sizeof(int16_t) , fail)
285 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_mean , mb_array_size * sizeof(int8_t ) , fail)
288 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.mbskip_table, mb_array_size * sizeof(uint8_t) + 2, fail) //the +2 is for the slice end check
289 FF_ALLOCZ_OR_GOTO(s->avctx, pic->qscale_table_base , (big_mb_num + s->mb_stride) * sizeof(uint8_t) , fail)
290 FF_ALLOCZ_OR_GOTO(s->avctx, pic->mb_type_base , (big_mb_num + s->mb_stride) * sizeof(uint32_t), fail)
291 pic->f.mb_type = pic->mb_type_base + 2*s->mb_stride + 1;
292 pic->f.qscale_table = pic->qscale_table_base + 2*s->mb_stride + 1;
293 if(s->out_format == FMT_H264){
295 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b4_array_size+4) * sizeof(int16_t), fail)
296 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
297 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
299 pic->f.motion_subsample_log2 = 2;
300 }else if(s->out_format == FMT_H263 || s->encoding || (s->avctx->debug&FF_DEBUG_MV) || (s->avctx->debug_mv)){
302 FF_ALLOCZ_OR_GOTO(s->avctx, pic->motion_val_base[i], 2 * (b8_array_size+4) * sizeof(int16_t), fail)
303 pic->f.motion_val[i] = pic->motion_val_base[i] + 4;
304 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.ref_index[i], 4*mb_array_size * sizeof(uint8_t), fail)
306 pic->f.motion_subsample_log2 = 3;
308 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
309 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.dct_coeff, 64 * mb_array_size * sizeof(DCTELEM) * 6, fail)
311 pic->f.qstride = s->mb_stride;
312 FF_ALLOCZ_OR_GOTO(s->avctx, pic->f.pan_scan , 1 * sizeof(AVPanScan), fail)
315 /* It might be nicer if the application would keep track of these
316 * but it would require an API change. */
317 memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1);
318 s->prev_pict_types[0]= s->dropable ? AV_PICTURE_TYPE_B : s->pict_type;
319 if (pic->f.age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->f.age] == AV_PICTURE_TYPE_B)
320 pic->f.age = INT_MAX; // Skipped MBs in B-frames are quite rare in MPEG-1/2 and it is a bit tricky to skip them anyway.
324 fail: //for the FF_ALLOCZ_OR_GOTO macro
326 free_frame_buffer(s, pic);
331 * deallocates a picture
333 static void free_picture(MpegEncContext *s, Picture *pic){
336 if (pic->f.data[0] && pic->f.type != FF_BUFFER_TYPE_SHARED) {
337 free_frame_buffer(s, pic);
340 av_freep(&pic->mb_var);
341 av_freep(&pic->mc_mb_var);
342 av_freep(&pic->mb_mean);
343 av_freep(&pic->f.mbskip_table);
344 av_freep(&pic->qscale_table_base);
345 av_freep(&pic->mb_type_base);
346 av_freep(&pic->f.dct_coeff);
347 av_freep(&pic->f.pan_scan);
348 pic->f.mb_type = NULL;
350 av_freep(&pic->motion_val_base[i]);
351 av_freep(&pic->f.ref_index[i]);
354 if (pic->f.type == FF_BUFFER_TYPE_SHARED) {
357 pic->f.data[i] = NULL;
363 static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base){
364 int y_size = s->b8_stride * (2 * s->mb_height + 1);
365 int c_size = s->mb_stride * (s->mb_height + 1);
366 int yc_size = y_size + 2 * c_size;
369 // edge emu needs blocksize + filter length - 1 (=17x17 for halfpel / 21x21 for h264)
370 FF_ALLOCZ_OR_GOTO(s->avctx, s->allocated_edge_emu_buffer, (s->width+64)*2*21*2, fail); //(width + edge + align)*interlaced*MBsize*tolerance
371 s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*21;
373 //FIXME should be linesize instead of s->width*2 but that is not known before get_buffer()
374 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t), fail)
375 s->me.temp= s->me.scratchpad;
376 s->rd_scratchpad= s->me.scratchpad;
377 s->b_scratchpad= s->me.scratchpad;
378 s->obmc_scratchpad= s->me.scratchpad + 16;
380 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.map , ME_MAP_SIZE*sizeof(uint32_t), fail)
381 FF_ALLOCZ_OR_GOTO(s->avctx, s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t), fail)
382 if(s->avctx->noise_reduction){
383 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_error_sum, 2 * 64 * sizeof(int), fail)
386 FF_ALLOCZ_OR_GOTO(s->avctx, s->blocks, 64*12*2 * sizeof(DCTELEM), fail)
387 s->block= s->blocks[0];
390 s->pblocks[i] = &s->block[i];
393 if (s->out_format == FMT_H263) {
395 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_val_base, yc_size * sizeof(int16_t) * 16, fail);
396 s->ac_val[0] = s->ac_val_base + s->b8_stride + 1;
397 s->ac_val[1] = s->ac_val_base + y_size + s->mb_stride + 1;
398 s->ac_val[2] = s->ac_val[1] + c_size;
403 return -1; //free() through MPV_common_end()
406 static void free_duplicate_context(MpegEncContext *s){
409 av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL;
410 av_freep(&s->me.scratchpad);
414 s->obmc_scratchpad= NULL;
416 av_freep(&s->dct_error_sum);
417 av_freep(&s->me.map);
418 av_freep(&s->me.score_map);
419 av_freep(&s->blocks);
420 av_freep(&s->ac_val_base);
424 static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src){
425 #define COPY(a) bak->a= src->a
426 COPY(allocated_edge_emu_buffer);
427 COPY(edge_emu_buffer);
432 COPY(obmc_scratchpad);
439 COPY(me.map_generation);
451 void ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src){
454 //FIXME copy only needed parts
456 backup_duplicate_context(&bak, dst);
457 memcpy(dst, src, sizeof(MpegEncContext));
458 backup_duplicate_context(dst, &bak);
460 dst->pblocks[i] = &dst->block[i];
462 //STOP_TIMER("update_duplicate_context") //about 10k cycles / 0.01 sec for 1000frames on 1ghz with 2 threads
465 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
467 MpegEncContext *s = dst->priv_data, *s1 = src->priv_data;
469 if(dst == src || !s1->context_initialized) return 0;
471 //FIXME can parameters change on I-frames? in that case dst may need a reinit
472 if(!s->context_initialized){
473 memcpy(s, s1, sizeof(MpegEncContext));
476 s->picture_range_start += MAX_PICTURE_COUNT;
477 s->picture_range_end += MAX_PICTURE_COUNT;
478 s->bitstream_buffer = NULL;
479 s->bitstream_buffer_size = s->allocated_bitstream_buffer_size = 0;
484 s->avctx->coded_height = s1->avctx->coded_height;
485 s->avctx->coded_width = s1->avctx->coded_width;
486 s->avctx->width = s1->avctx->width;
487 s->avctx->height = s1->avctx->height;
489 s->coded_picture_number = s1->coded_picture_number;
490 s->picture_number = s1->picture_number;
491 s->input_picture_number = s1->input_picture_number;
493 memcpy(s->picture, s1->picture, s1->picture_count * sizeof(Picture));
494 memcpy(&s->last_picture, &s1->last_picture, (char*)&s1->last_picture_ptr - (char*)&s1->last_picture);
496 s->last_picture_ptr = REBASE_PICTURE(s1->last_picture_ptr, s, s1);
497 s->current_picture_ptr = REBASE_PICTURE(s1->current_picture_ptr, s, s1);
498 s->next_picture_ptr = REBASE_PICTURE(s1->next_picture_ptr, s, s1);
500 memcpy(s->prev_pict_types, s1->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE);
502 //Error/bug resilience
503 s->next_p_frame_damaged = s1->next_p_frame_damaged;
504 s->workaround_bugs = s1->workaround_bugs;
507 memcpy(&s->time_increment_bits, &s1->time_increment_bits, (char*)&s1->shape - (char*)&s1->time_increment_bits);
510 s->max_b_frames = s1->max_b_frames;
511 s->low_delay = s1->low_delay;
512 s->dropable = s1->dropable;
514 //DivX handling (doesn't work)
515 s->divx_packed = s1->divx_packed;
517 if(s1->bitstream_buffer){
518 if (s1->bitstream_buffer_size + FF_INPUT_BUFFER_PADDING_SIZE > s->allocated_bitstream_buffer_size)
519 av_fast_malloc(&s->bitstream_buffer, &s->allocated_bitstream_buffer_size, s1->allocated_bitstream_buffer_size);
520 s->bitstream_buffer_size = s1->bitstream_buffer_size;
521 memcpy(s->bitstream_buffer, s1->bitstream_buffer, s1->bitstream_buffer_size);
522 memset(s->bitstream_buffer+s->bitstream_buffer_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
525 //MPEG2/interlacing info
526 memcpy(&s->progressive_sequence, &s1->progressive_sequence, (char*)&s1->rtp_mode - (char*)&s1->progressive_sequence);
528 if(!s1->first_field){
529 s->last_pict_type= s1->pict_type;
530 if (s1->current_picture_ptr) s->last_lambda_for[s1->pict_type] = s1->current_picture_ptr->f.quality;
532 if(s1->pict_type!=FF_B_TYPE){
533 s->last_non_b_pict_type= s1->pict_type;
541 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
542 * the changed fields will not depend upon the prior state of the MpegEncContext.
544 void MPV_common_defaults(MpegEncContext *s){
546 s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
547 s->chroma_qscale_table= ff_default_chroma_qscale_table;
548 s->progressive_frame= 1;
549 s->progressive_sequence= 1;
550 s->picture_structure= PICT_FRAME;
552 s->coded_picture_number = 0;
553 s->picture_number = 0;
554 s->input_picture_number = 0;
556 s->picture_in_gop_number = 0;
561 s->picture_range_start = 0;
562 s->picture_range_end = MAX_PICTURE_COUNT;
566 * sets the given MpegEncContext to defaults for decoding.
567 * the changed fields will not depend upon the prior state of the MpegEncContext.
569 void MPV_decode_defaults(MpegEncContext *s){
570 MPV_common_defaults(s);
574 * init common structure for both encoder and decoder.
575 * this assumes that some variables like width/height are already set
577 av_cold int MPV_common_init(MpegEncContext *s)
579 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y,
580 threads = (s->encoding ||
582 s->avctx->active_thread_type & FF_THREAD_SLICE)) ?
583 s->avctx->thread_count : 1;
585 if(s->codec_id == CODEC_ID_MPEG2VIDEO && !s->progressive_sequence)
586 s->mb_height = (s->height + 31) / 32 * 2;
587 else if (s->codec_id != CODEC_ID_H264)
588 s->mb_height = (s->height + 15) / 16;
590 if(s->avctx->pix_fmt == PIX_FMT_NONE){
591 av_log(s->avctx, AV_LOG_ERROR, "decoding to PIX_FMT_NONE is not supported.\n");
595 if((s->encoding || (s->avctx->active_thread_type & FF_THREAD_SLICE)) &&
596 (s->avctx->thread_count > MAX_THREADS || (s->avctx->thread_count > s->mb_height && s->mb_height))){
597 int max_threads = FFMIN(MAX_THREADS, s->mb_height);
598 av_log(s->avctx, AV_LOG_WARNING, "too many threads (%d), reducing to %d\n",
599 s->avctx->thread_count, max_threads);
600 threads = max_threads;
603 if((s->width || s->height) && av_image_check_size(s->width, s->height, 0, s->avctx))
606 dsputil_init(&s->dsp, s->avctx);
607 ff_dct_common_init(s);
609 s->flags= s->avctx->flags;
610 s->flags2= s->avctx->flags2;
612 s->mb_width = (s->width + 15) / 16;
613 s->mb_stride = s->mb_width + 1;
614 s->b8_stride = s->mb_width*2 + 1;
615 s->b4_stride = s->mb_width*4 + 1;
616 mb_array_size= s->mb_height * s->mb_stride;
617 mv_table_size= (s->mb_height+2) * s->mb_stride + 1;
619 /* set chroma shifts */
620 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift),
621 &(s->chroma_y_shift) );
623 /* set default edge pos, will be overriden in decode_header if needed */
624 s->h_edge_pos= s->mb_width*16;
625 s->v_edge_pos= s->mb_height*16;
627 s->mb_num = s->mb_width * s->mb_height;
632 s->block_wrap[3]= s->b8_stride;
634 s->block_wrap[5]= s->mb_stride;
636 y_size = s->b8_stride * (2 * s->mb_height + 1);
637 c_size = s->mb_stride * (s->mb_height + 1);
638 yc_size = y_size + 2 * c_size;
640 /* convert fourcc to upper case */
641 s->codec_tag = ff_toupper4(s->avctx->codec_tag);
643 s->stream_codec_tag = ff_toupper4(s->avctx->stream_codec_tag);
645 s->avctx->coded_frame= (AVFrame*)&s->current_picture;
647 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_index2xy, (s->mb_num+1)*sizeof(int), fail) //error ressilience code looks cleaner with this
648 for(y=0; y<s->mb_height; y++){
649 for(x=0; x<s->mb_width; x++){
650 s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride;
653 s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; //FIXME really needed?
656 /* Allocate MV tables */
657 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
658 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
659 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
660 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
661 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
662 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t), fail)
663 s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1;
664 s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1;
665 s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1;
666 s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1;
667 s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1;
668 s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1;
670 if(s->msmpeg4_version){
671 FF_ALLOCZ_OR_GOTO(s->avctx, s->ac_stats, 2*2*(MAX_LEVEL+1)*(MAX_RUN+1)*2*sizeof(int), fail);
673 FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
675 /* Allocate MB type table */
676 FF_ALLOCZ_OR_GOTO(s->avctx, s->mb_type , mb_array_size * sizeof(uint16_t), fail) //needed for encoding
678 FF_ALLOCZ_OR_GOTO(s->avctx, s->lambda_table, mb_array_size * sizeof(int), fail)
680 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix , 64*32 * sizeof(int), fail)
681 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix , 64*32 * sizeof(int), fail)
682 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t), fail)
683 FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t), fail)
684 FF_ALLOCZ_OR_GOTO(s->avctx, s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
685 FF_ALLOCZ_OR_GOTO(s->avctx, s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*), fail)
687 if(s->avctx->noise_reduction){
688 FF_ALLOCZ_OR_GOTO(s->avctx, s->dct_offset, 2 * 64 * sizeof(uint16_t), fail)
692 s->picture_count = MAX_PICTURE_COUNT * FFMAX(1, s->avctx->thread_count);
693 FF_ALLOCZ_OR_GOTO(s->avctx, s->picture, s->picture_count * sizeof(Picture), fail)
694 for(i = 0; i < s->picture_count; i++) {
695 avcodec_get_frame_defaults((AVFrame *)&s->picture[i]);
698 FF_ALLOCZ_OR_GOTO(s->avctx, s->error_status_table, mb_array_size*sizeof(uint8_t), fail)
700 if(s->codec_id==CODEC_ID_MPEG4 || (s->flags & CODEC_FLAG_INTERLACED_ME)){
701 /* interlaced direct mode decoding tables */
706 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_mv_table_base[i][j][k], mv_table_size * 2 * sizeof(int16_t), fail)
707 s->b_field_mv_table[i][j][k] = s->b_field_mv_table_base[i][j][k] + s->mb_stride + 1;
709 FF_ALLOCZ_OR_GOTO(s->avctx, s->b_field_select_table [i][j], mb_array_size * 2 * sizeof(uint8_t), fail)
710 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_mv_table_base[i][j], mv_table_size * 2 * sizeof(int16_t), fail)
711 s->p_field_mv_table[i][j] = s->p_field_mv_table_base[i][j]+ s->mb_stride + 1;
713 FF_ALLOCZ_OR_GOTO(s->avctx, s->p_field_select_table[i], mb_array_size * 2 * sizeof(uint8_t), fail)
716 if (s->out_format == FMT_H263) {
718 FF_ALLOCZ_OR_GOTO(s->avctx, s->coded_block_base, y_size, fail);
719 s->coded_block= s->coded_block_base + s->b8_stride + 1;
721 /* cbp, ac_pred, pred_dir */
722 FF_ALLOCZ_OR_GOTO(s->avctx, s->cbp_table , mb_array_size * sizeof(uint8_t), fail)
723 FF_ALLOCZ_OR_GOTO(s->avctx, s->pred_dir_table, mb_array_size * sizeof(uint8_t), fail)
726 if (s->h263_pred || s->h263_plus || !s->encoding) {
728 //MN: we need these for error resilience of intra-frames
729 FF_ALLOCZ_OR_GOTO(s->avctx, s->dc_val_base, yc_size * sizeof(int16_t), fail);
730 s->dc_val[0] = s->dc_val_base + s->b8_stride + 1;
731 s->dc_val[1] = s->dc_val_base + y_size + s->mb_stride + 1;
732 s->dc_val[2] = s->dc_val[1] + c_size;
733 for(i=0;i<yc_size;i++)
734 s->dc_val_base[i] = 1024;
737 /* which mb is a intra block */
738 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbintra_table, mb_array_size, fail);
739 memset(s->mbintra_table, 1, mb_array_size);
741 /* init macroblock skip table */
742 FF_ALLOCZ_OR_GOTO(s->avctx, s->mbskip_table, mb_array_size+2, fail);
743 //Note the +1 is for a quicker mpeg4 slice_end detection
744 FF_ALLOCZ_OR_GOTO(s->avctx, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE, fail);
746 s->parse_context.state= -1;
747 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
748 s->visualization_buffer[0] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
749 s->visualization_buffer[1] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
750 s->visualization_buffer[2] = av_malloc((s->mb_width*16 + 2*EDGE_WIDTH) * s->mb_height*16 + 2*EDGE_WIDTH);
753 s->context_initialized = 1;
754 s->thread_context[0]= s;
756 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
757 for(i=1; i<threads; i++){
758 s->thread_context[i]= av_malloc(sizeof(MpegEncContext));
759 memcpy(s->thread_context[i], s, sizeof(MpegEncContext));
762 for(i=0; i<threads; i++){
763 if(init_duplicate_context(s->thread_context[i], s) < 0)
765 s->thread_context[i]->start_mb_y= (s->mb_height*(i ) + s->avctx->thread_count/2) / s->avctx->thread_count;
766 s->thread_context[i]->end_mb_y = (s->mb_height*(i+1) + s->avctx->thread_count/2) / s->avctx->thread_count;
769 if(init_duplicate_context(s, s) < 0) goto fail;
771 s->end_mb_y = s->mb_height;
781 /* init common structure for both encoder and decoder */
782 void MPV_common_end(MpegEncContext *s)
786 if (s->encoding || (HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_SLICE)) {
787 for(i=0; i<s->avctx->thread_count; i++){
788 free_duplicate_context(s->thread_context[i]);
790 for(i=1; i<s->avctx->thread_count; i++){
791 av_freep(&s->thread_context[i]);
793 } else free_duplicate_context(s);
795 av_freep(&s->parse_context.buffer);
796 s->parse_context.buffer_size=0;
798 av_freep(&s->mb_type);
799 av_freep(&s->p_mv_table_base);
800 av_freep(&s->b_forw_mv_table_base);
801 av_freep(&s->b_back_mv_table_base);
802 av_freep(&s->b_bidir_forw_mv_table_base);
803 av_freep(&s->b_bidir_back_mv_table_base);
804 av_freep(&s->b_direct_mv_table_base);
806 s->b_forw_mv_table= NULL;
807 s->b_back_mv_table= NULL;
808 s->b_bidir_forw_mv_table= NULL;
809 s->b_bidir_back_mv_table= NULL;
810 s->b_direct_mv_table= NULL;
814 av_freep(&s->b_field_mv_table_base[i][j][k]);
815 s->b_field_mv_table[i][j][k]=NULL;
817 av_freep(&s->b_field_select_table[i][j]);
818 av_freep(&s->p_field_mv_table_base[i][j]);
819 s->p_field_mv_table[i][j]=NULL;
821 av_freep(&s->p_field_select_table[i]);
824 av_freep(&s->dc_val_base);
825 av_freep(&s->coded_block_base);
826 av_freep(&s->mbintra_table);
827 av_freep(&s->cbp_table);
828 av_freep(&s->pred_dir_table);
830 av_freep(&s->mbskip_table);
831 av_freep(&s->prev_pict_types);
832 av_freep(&s->bitstream_buffer);
833 s->allocated_bitstream_buffer_size=0;
835 av_freep(&s->avctx->stats_out);
836 av_freep(&s->ac_stats);
837 av_freep(&s->error_status_table);
838 av_freep(&s->mb_index2xy);
839 av_freep(&s->lambda_table);
840 av_freep(&s->q_intra_matrix);
841 av_freep(&s->q_inter_matrix);
842 av_freep(&s->q_intra_matrix16);
843 av_freep(&s->q_inter_matrix16);
844 av_freep(&s->input_picture);
845 av_freep(&s->reordered_input_picture);
846 av_freep(&s->dct_offset);
848 if(s->picture && !s->avctx->is_copy){
849 for(i=0; i<s->picture_count; i++){
850 free_picture(s, &s->picture[i]);
853 av_freep(&s->picture);
854 s->context_initialized = 0;
857 s->current_picture_ptr= NULL;
858 s->linesize= s->uvlinesize= 0;
861 av_freep(&s->visualization_buffer[i]);
863 if(!(s->avctx->active_thread_type&FF_THREAD_FRAME))
864 avcodec_default_free_buffers(s->avctx);
867 void init_rl(RLTable *rl, uint8_t static_store[2][2*MAX_RUN + MAX_LEVEL + 3])
869 int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1];
870 uint8_t index_run[MAX_RUN+1];
871 int last, run, level, start, end, i;
873 /* If table is static, we can quit if rl->max_level[0] is not NULL */
874 if(static_store && rl->max_level[0])
877 /* compute max_level[], max_run[] and index_run[] */
878 for(last=0;last<2;last++) {
887 memset(max_level, 0, MAX_RUN + 1);
888 memset(max_run, 0, MAX_LEVEL + 1);
889 memset(index_run, rl->n, MAX_RUN + 1);
890 for(i=start;i<end;i++) {
891 run = rl->table_run[i];
892 level = rl->table_level[i];
893 if (index_run[run] == rl->n)
895 if (level > max_level[run])
896 max_level[run] = level;
897 if (run > max_run[level])
898 max_run[level] = run;
901 rl->max_level[last] = static_store[last];
903 rl->max_level[last] = av_malloc(MAX_RUN + 1);
904 memcpy(rl->max_level[last], max_level, MAX_RUN + 1);
906 rl->max_run[last] = static_store[last] + MAX_RUN + 1;
908 rl->max_run[last] = av_malloc(MAX_LEVEL + 1);
909 memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1);
911 rl->index_run[last] = static_store[last] + MAX_RUN + MAX_LEVEL + 2;
913 rl->index_run[last] = av_malloc(MAX_RUN + 1);
914 memcpy(rl->index_run[last], index_run, MAX_RUN + 1);
918 void init_vlc_rl(RLTable *rl)
930 for(i=0; i<rl->vlc.table_size; i++){
931 int code= rl->vlc.table[i][0];
932 int len = rl->vlc.table[i][1];
935 if(len==0){ // illegal code
938 }else if(len<0){ //more bits needed
942 if(code==rl->n){ //esc
946 run= rl->table_run [code] + 1;
947 level= rl->table_level[code] * qmul + qadd;
948 if(code >= rl->last) run+=192;
951 rl->rl_vlc[q][i].len= len;
952 rl->rl_vlc[q][i].level= level;
953 rl->rl_vlc[q][i].run= run;
958 void ff_release_unused_pictures(MpegEncContext *s, int remove_current)
962 /* release non reference frames */
963 for(i=0; i<s->picture_count; i++){
964 if (s->picture[i].f.data[0] && !s->picture[i].f.reference
965 && (!s->picture[i].owner2 || s->picture[i].owner2 == s)
966 && (remove_current || &s->picture[i] != s->current_picture_ptr)
967 /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){
968 free_frame_buffer(s, &s->picture[i]);
973 int ff_find_unused_picture(MpegEncContext *s, int shared){
977 for(i=s->picture_range_start; i<s->picture_range_end; i++){
978 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type == 0)
982 for(i=s->picture_range_start; i<s->picture_range_end; i++){
983 if (s->picture[i].f.data[0] == NULL && s->picture[i].f.type != 0)
986 for(i=s->picture_range_start; i<s->picture_range_end; i++){
987 if (s->picture[i].f.data[0] == NULL)
992 av_log(s->avctx, AV_LOG_FATAL, "Internal error, picture buffer overflow\n");
993 /* We could return -1, but the codec would crash trying to draw into a
994 * non-existing frame anyway. This is safer than waiting for a random crash.
995 * Also the return of this is never useful, an encoder must only allocate
996 * as much as allowed in the specification. This has no relationship to how
997 * much libavcodec could allocate (and MAX_PICTURE_COUNT is always large
998 * enough for such valid streams).
999 * Plus, a decoder has to check stream validity and remove frames if too
1000 * many reference frames are around. Waiting for "OOM" is not correct at
1001 * all. Similarly, missing reference frames have to be replaced by
1002 * interpolated/MC frames, anything else is a bug in the codec ...
1008 static void update_noise_reduction(MpegEncContext *s){
1011 for(intra=0; intra<2; intra++){
1012 if(s->dct_count[intra] > (1<<16)){
1013 for(i=0; i<64; i++){
1014 s->dct_error_sum[intra][i] >>=1;
1016 s->dct_count[intra] >>= 1;
1019 for(i=0; i<64; i++){
1020 s->dct_offset[intra][i]= (s->avctx->noise_reduction * s->dct_count[intra] + s->dct_error_sum[intra][i]/2) / (s->dct_error_sum[intra][i]+1);
1026 * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
1028 int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
1034 assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
1036 /* mark&release old frames */
1037 if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->f.data[0]) {
1038 if(s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3){
1039 if (s->last_picture_ptr->owner2 == s)
1040 free_frame_buffer(s, s->last_picture_ptr);
1042 /* release forgotten pictures */
1043 /* if(mpeg124/h263) */
1045 for(i=0; i<s->picture_count; i++){
1046 if (s->picture[i].owner2 == s && s->picture[i].f.data[0] && &s->picture[i] != s->next_picture_ptr && s->picture[i].f.reference) {
1047 if (!(avctx->active_thread_type & FF_THREAD_FRAME))
1048 av_log(avctx, AV_LOG_ERROR, "releasing zombie picture\n");
1049 free_frame_buffer(s, &s->picture[i]);
1057 ff_release_unused_pictures(s, 1);
1059 if (s->current_picture_ptr && s->current_picture_ptr->f.data[0] == NULL)
1060 pic= s->current_picture_ptr; //we already have a unused image (maybe it was set before reading the header)
1062 i= ff_find_unused_picture(s, 0);
1063 pic= &s->picture[i];
1066 pic->f.reference = 0;
1068 if (s->codec_id == CODEC_ID_H264)
1069 pic->f.reference = s->picture_structure;
1070 else if (s->pict_type != AV_PICTURE_TYPE_B)
1071 pic->f.reference = 3;
1074 pic->f.coded_picture_number = s->coded_picture_number++;
1076 if(ff_alloc_picture(s, pic, 0) < 0)
1079 s->current_picture_ptr= pic;
1080 //FIXME use only the vars from current_pic
1081 s->current_picture_ptr->f.top_field_first = s->top_field_first;
1082 if(s->codec_id == CODEC_ID_MPEG1VIDEO || s->codec_id == CODEC_ID_MPEG2VIDEO) {
1083 if(s->picture_structure != PICT_FRAME)
1084 s->current_picture_ptr->f.top_field_first = (s->picture_structure == PICT_TOP_FIELD) == s->first_field;
1086 s->current_picture_ptr->f.interlaced_frame = !s->progressive_frame && !s->progressive_sequence;
1087 s->current_picture_ptr->field_picture = s->picture_structure != PICT_FRAME;
1090 s->current_picture_ptr->f.pict_type = s->pict_type;
1091 // if(s->flags && CODEC_FLAG_QSCALE)
1092 // s->current_picture_ptr->quality= s->new_picture_ptr->quality;
1093 s->current_picture_ptr->f.key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1095 ff_copy_picture(&s->current_picture, s->current_picture_ptr);
1097 if (s->pict_type != AV_PICTURE_TYPE_B) {
1098 s->last_picture_ptr= s->next_picture_ptr;
1100 s->next_picture_ptr= s->current_picture_ptr;
1102 /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr,
1103 s->last_picture_ptr ? s->last_picture_ptr->f.data[0] : NULL,
1104 s->next_picture_ptr ? s->next_picture_ptr->f.data[0] : NULL,
1105 s->current_picture_ptr ? s->current_picture_ptr->f.data[0] : NULL,
1106 s->pict_type, s->dropable);*/
1108 if(s->codec_id != CODEC_ID_H264){
1109 if ((s->last_picture_ptr == NULL || s->last_picture_ptr->f.data[0] == NULL) &&
1110 (s->pict_type!=AV_PICTURE_TYPE_I || s->picture_structure != PICT_FRAME)){
1111 if (s->pict_type != AV_PICTURE_TYPE_I)
1112 av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n");
1113 else if (s->picture_structure != PICT_FRAME)
1114 av_log(avctx, AV_LOG_INFO, "allocate dummy last picture for field based first keyframe\n");
1116 /* Allocate a dummy frame */
1117 i= ff_find_unused_picture(s, 0);
1118 s->last_picture_ptr= &s->picture[i];
1119 s->last_picture_ptr->f.key_frame = 0;
1120 if(ff_alloc_picture(s, s->last_picture_ptr, 0) < 0)
1122 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 0);
1123 ff_thread_report_progress((AVFrame*)s->last_picture_ptr, INT_MAX, 1);
1125 if ((s->next_picture_ptr == NULL || s->next_picture_ptr->f.data[0] == NULL) && s->pict_type == AV_PICTURE_TYPE_B) {
1126 /* Allocate a dummy frame */
1127 i= ff_find_unused_picture(s, 0);
1128 s->next_picture_ptr= &s->picture[i];
1129 s->next_picture_ptr->f.key_frame = 0;
1130 if(ff_alloc_picture(s, s->next_picture_ptr, 0) < 0)
1132 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 0);
1133 ff_thread_report_progress((AVFrame*)s->next_picture_ptr, INT_MAX, 1);
1137 if(s->last_picture_ptr) ff_copy_picture(&s->last_picture, s->last_picture_ptr);
1138 if(s->next_picture_ptr) ff_copy_picture(&s->next_picture, s->next_picture_ptr);
1140 assert(s->pict_type == AV_PICTURE_TYPE_I || (s->last_picture_ptr && s->last_picture_ptr->f.data[0]));
1142 if(s->picture_structure!=PICT_FRAME && s->out_format != FMT_H264){
1145 if(s->picture_structure == PICT_BOTTOM_FIELD){
1146 s->current_picture.f.data[i] += s->current_picture.f.linesize[i];
1148 s->current_picture.f.linesize[i] *= 2;
1149 s->last_picture.f.linesize[i] *= 2;
1150 s->next_picture.f.linesize[i] *= 2;
1154 s->error_recognition= avctx->error_recognition;
1156 /* set dequantizer, we can't do it during init as it might change for mpeg4
1157 and we can't do it in the header decode as init is not called for mpeg4 there yet */
1158 if(s->mpeg_quant || s->codec_id == CODEC_ID_MPEG2VIDEO){
1159 s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1160 s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1161 }else if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
1162 s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1163 s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1165 s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1166 s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1169 if(s->dct_error_sum){
1170 assert(s->avctx->noise_reduction && s->encoding);
1172 update_noise_reduction(s);
1175 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration)
1176 return ff_xvmc_field_start(s, avctx);
1181 /* generic function for encode/decode called after a frame has been coded/decoded */
1182 void MPV_frame_end(MpegEncContext *s)
1185 /* redraw edges for the frame if decoding didn't complete */
1186 //just to make sure that all data is rendered.
1187 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
1188 ff_xvmc_field_end(s);
1189 }else if((s->error_count || s->encoding || !(s->avctx->codec->capabilities&CODEC_CAP_DRAW_HORIZ_BAND))
1190 && !s->avctx->hwaccel
1191 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
1192 && s->unrestricted_mv
1193 && s->current_picture.f.reference
1195 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
1196 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
1197 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
1198 s->dsp.draw_edges(s->current_picture.f.data[0], s->linesize,
1199 s->h_edge_pos , s->v_edge_pos,
1200 EDGE_WIDTH , EDGE_WIDTH , EDGE_TOP | EDGE_BOTTOM);
1201 s->dsp.draw_edges(s->current_picture.f.data[1], s->uvlinesize,
1202 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1203 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1204 s->dsp.draw_edges(s->current_picture.f.data[2], s->uvlinesize,
1205 s->h_edge_pos>>hshift, s->v_edge_pos>>vshift,
1206 EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, EDGE_TOP | EDGE_BOTTOM);
1211 s->last_pict_type = s->pict_type;
1212 s->last_lambda_for[s->pict_type] = s->current_picture_ptr->f.quality;
1213 if(s->pict_type!=AV_PICTURE_TYPE_B){
1214 s->last_non_b_pict_type= s->pict_type;
1217 /* copy back current_picture variables */
1218 for(i=0; i<MAX_PICTURE_COUNT; i++){
1219 if(s->picture[i].f.data[0] == s->current_picture.f.data[0]){
1220 s->picture[i]= s->current_picture;
1224 assert(i<MAX_PICTURE_COUNT);
1228 /* release non-reference frames */
1229 for(i=0; i<s->picture_count; i++){
1230 if (s->picture[i].f.data[0] && !s->picture[i].f.reference /*&& s->picture[i].type != FF_BUFFER_TYPE_SHARED*/) {
1231 free_frame_buffer(s, &s->picture[i]);
1235 // clear copies, to avoid confusion
1237 memset(&s->last_picture, 0, sizeof(Picture));
1238 memset(&s->next_picture, 0, sizeof(Picture));
1239 memset(&s->current_picture, 0, sizeof(Picture));
1241 s->avctx->coded_frame= (AVFrame*)s->current_picture_ptr;
1243 if (s->codec_id != CODEC_ID_H264 && s->current_picture.f.reference) {
1244 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_height-1, 0);
1249 * draws an line from (ex, ey) -> (sx, sy).
1250 * @param w width of the image
1251 * @param h height of the image
1252 * @param stride stride/linesize of the image
1253 * @param color color of the arrow
1255 static void draw_line(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1258 sx= av_clip(sx, 0, w-1);
1259 sy= av_clip(sy, 0, h-1);
1260 ex= av_clip(ex, 0, w-1);
1261 ey= av_clip(ey, 0, h-1);
1263 buf[sy*stride + sx]+= color;
1265 if(FFABS(ex - sx) > FFABS(ey - sy)){
1267 FFSWAP(int, sx, ex);
1268 FFSWAP(int, sy, ey);
1270 buf+= sx + sy*stride;
1272 f= ((ey-sy)<<16)/ex;
1273 for(x= 0; x <= ex; x++){
1276 buf[ y *stride + x]+= (color*(0x10000-fr))>>16;
1277 buf[(y+1)*stride + x]+= (color* fr )>>16;
1281 FFSWAP(int, sx, ex);
1282 FFSWAP(int, sy, ey);
1284 buf+= sx + sy*stride;
1286 if(ey) f= ((ex-sx)<<16)/ey;
1288 for(y= 0; y <= ey; y++){
1291 buf[y*stride + x ]+= (color*(0x10000-fr))>>16;
1292 buf[y*stride + x+1]+= (color* fr )>>16;
1298 * draws an arrow from (ex, ey) -> (sx, sy).
1299 * @param w width of the image
1300 * @param h height of the image
1301 * @param stride stride/linesize of the image
1302 * @param color color of the arrow
1304 static void draw_arrow(uint8_t *buf, int sx, int sy, int ex, int ey, int w, int h, int stride, int color){
1307 sx= av_clip(sx, -100, w+100);
1308 sy= av_clip(sy, -100, h+100);
1309 ex= av_clip(ex, -100, w+100);
1310 ey= av_clip(ey, -100, h+100);
1315 if(dx*dx + dy*dy > 3*3){
1318 int length= ff_sqrt((rx*rx + ry*ry)<<8);
1320 //FIXME subpixel accuracy
1321 rx= ROUNDED_DIV(rx*3<<4, length);
1322 ry= ROUNDED_DIV(ry*3<<4, length);
1324 draw_line(buf, sx, sy, sx + rx, sy + ry, w, h, stride, color);
1325 draw_line(buf, sx, sy, sx - ry, sy + rx, w, h, stride, color);
1327 draw_line(buf, sx, sy, ex, ey, w, h, stride, color);
1331 * prints debuging info for the given picture.
1333 void ff_print_debug_info(MpegEncContext *s, AVFrame *pict){
1335 if(s->avctx->hwaccel || !pict || !pict->mb_type) return;
1337 if(s->avctx->debug&(FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)){
1340 av_log(s->avctx, AV_LOG_DEBUG, "New frame, type: %c\n",
1341 av_get_picture_type_char(pict->pict_type));
1342 for(y=0; y<s->mb_height; y++){
1343 for(x=0; x<s->mb_width; x++){
1344 if(s->avctx->debug&FF_DEBUG_SKIP){
1345 int count= s->mbskip_table[x + y*s->mb_stride];
1346 if(count>9) count=9;
1347 av_log(s->avctx, AV_LOG_DEBUG, "%1d", count);
1349 if(s->avctx->debug&FF_DEBUG_QP){
1350 av_log(s->avctx, AV_LOG_DEBUG, "%2d", pict->qscale_table[x + y*s->mb_stride]);
1352 if(s->avctx->debug&FF_DEBUG_MB_TYPE){
1353 int mb_type= pict->mb_type[x + y*s->mb_stride];
1354 //Type & MV direction
1356 av_log(s->avctx, AV_LOG_DEBUG, "P");
1357 else if(IS_INTRA(mb_type) && IS_ACPRED(mb_type))
1358 av_log(s->avctx, AV_LOG_DEBUG, "A");
1359 else if(IS_INTRA4x4(mb_type))
1360 av_log(s->avctx, AV_LOG_DEBUG, "i");
1361 else if(IS_INTRA16x16(mb_type))
1362 av_log(s->avctx, AV_LOG_DEBUG, "I");
1363 else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type))
1364 av_log(s->avctx, AV_LOG_DEBUG, "d");
1365 else if(IS_DIRECT(mb_type))
1366 av_log(s->avctx, AV_LOG_DEBUG, "D");
1367 else if(IS_GMC(mb_type) && IS_SKIP(mb_type))
1368 av_log(s->avctx, AV_LOG_DEBUG, "g");
1369 else if(IS_GMC(mb_type))
1370 av_log(s->avctx, AV_LOG_DEBUG, "G");
1371 else if(IS_SKIP(mb_type))
1372 av_log(s->avctx, AV_LOG_DEBUG, "S");
1373 else if(!USES_LIST(mb_type, 1))
1374 av_log(s->avctx, AV_LOG_DEBUG, ">");
1375 else if(!USES_LIST(mb_type, 0))
1376 av_log(s->avctx, AV_LOG_DEBUG, "<");
1378 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1379 av_log(s->avctx, AV_LOG_DEBUG, "X");
1384 av_log(s->avctx, AV_LOG_DEBUG, "+");
1385 else if(IS_16X8(mb_type))
1386 av_log(s->avctx, AV_LOG_DEBUG, "-");
1387 else if(IS_8X16(mb_type))
1388 av_log(s->avctx, AV_LOG_DEBUG, "|");
1389 else if(IS_INTRA(mb_type) || IS_16X16(mb_type))
1390 av_log(s->avctx, AV_LOG_DEBUG, " ");
1392 av_log(s->avctx, AV_LOG_DEBUG, "?");
1395 if(IS_INTERLACED(mb_type))
1396 av_log(s->avctx, AV_LOG_DEBUG, "=");
1398 av_log(s->avctx, AV_LOG_DEBUG, " ");
1400 // av_log(s->avctx, AV_LOG_DEBUG, " ");
1402 av_log(s->avctx, AV_LOG_DEBUG, "\n");
1406 if((s->avctx->debug&(FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE)) || (s->avctx->debug_mv)){
1407 const int shift= 1 + s->quarter_sample;
1411 int h_chroma_shift, v_chroma_shift, block_height;
1412 const int width = s->avctx->width;
1413 const int height= s->avctx->height;
1414 const int mv_sample_log2= 4 - pict->motion_subsample_log2;
1415 const int mv_stride= (s->mb_width << mv_sample_log2) + (s->codec_id == CODEC_ID_H264 ? 0 : 1);
1416 s->low_delay=0; //needed to see the vectors without trashing the buffers
1418 avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift);
1420 memcpy(s->visualization_buffer[i], pict->data[i], (i==0) ? pict->linesize[i]*height:pict->linesize[i]*height >> v_chroma_shift);
1421 pict->data[i]= s->visualization_buffer[i];
1423 pict->type= FF_BUFFER_TYPE_COPY;
1426 block_height = 16>>v_chroma_shift;
1428 for(mb_y=0; mb_y<s->mb_height; mb_y++){
1430 for(mb_x=0; mb_x<s->mb_width; mb_x++){
1431 const int mb_index= mb_x + mb_y*s->mb_stride;
1432 if((s->avctx->debug_mv) && pict->motion_val){
1434 for(type=0; type<3; type++){
1437 case 0: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_P_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_P))
1441 case 1: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_FOR)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1445 case 2: if ((!(s->avctx->debug_mv&FF_DEBUG_VIS_MV_B_BACK)) || (pict->pict_type!=AV_PICTURE_TYPE_B))
1450 if(!USES_LIST(pict->mb_type[mb_index], direction))
1453 if(IS_8X8(pict->mb_type[mb_index])){
1456 int sx= mb_x*16 + 4 + 8*(i&1);
1457 int sy= mb_y*16 + 4 + 8*(i>>1);
1458 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1459 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1460 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1461 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1463 }else if(IS_16X8(pict->mb_type[mb_index])){
1467 int sy=mb_y*16 + 4 + 8*i;
1468 int xy= (mb_x*2 + (mb_y*2 + i)*mv_stride) << (mv_sample_log2-1);
1469 int mx=(pict->motion_val[direction][xy][0]>>shift);
1470 int my=(pict->motion_val[direction][xy][1]>>shift);
1472 if(IS_INTERLACED(pict->mb_type[mb_index]))
1475 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1477 }else if(IS_8X16(pict->mb_type[mb_index])){
1480 int sx=mb_x*16 + 4 + 8*i;
1482 int xy= (mb_x*2 + i + mb_y*2*mv_stride) << (mv_sample_log2-1);
1483 int mx=(pict->motion_val[direction][xy][0]>>shift);
1484 int my=(pict->motion_val[direction][xy][1]>>shift);
1486 if(IS_INTERLACED(pict->mb_type[mb_index]))
1489 draw_arrow(ptr, sx, sy, mx+sx, my+sy, width, height, s->linesize, 100);
1492 int sx= mb_x*16 + 8;
1493 int sy= mb_y*16 + 8;
1494 int xy= (mb_x + mb_y*mv_stride) << mv_sample_log2;
1495 int mx= (pict->motion_val[direction][xy][0]>>shift) + sx;
1496 int my= (pict->motion_val[direction][xy][1]>>shift) + sy;
1497 draw_arrow(ptr, sx, sy, mx, my, width, height, s->linesize, 100);
1501 if((s->avctx->debug&FF_DEBUG_VIS_QP) && pict->motion_val){
1502 uint64_t c= (pict->qscale_table[mb_index]*128/31) * 0x0101010101010101ULL;
1504 for(y=0; y<block_height; y++){
1505 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= c;
1506 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= c;
1509 if((s->avctx->debug&FF_DEBUG_VIS_MB_TYPE) && pict->motion_val){
1510 int mb_type= pict->mb_type[mb_index];
1513 #define COLOR(theta, r)\
1514 u= (int)(128 + r*cos(theta*3.141592/180));\
1515 v= (int)(128 + r*sin(theta*3.141592/180));
1519 if(IS_PCM(mb_type)){
1521 }else if((IS_INTRA(mb_type) && IS_ACPRED(mb_type)) || IS_INTRA16x16(mb_type)){
1523 }else if(IS_INTRA4x4(mb_type)){
1525 }else if(IS_DIRECT(mb_type) && IS_SKIP(mb_type)){
1527 }else if(IS_DIRECT(mb_type)){
1529 }else if(IS_GMC(mb_type) && IS_SKIP(mb_type)){
1531 }else if(IS_GMC(mb_type)){
1533 }else if(IS_SKIP(mb_type)){
1535 }else if(!USES_LIST(mb_type, 1)){
1537 }else if(!USES_LIST(mb_type, 0)){
1540 assert(USES_LIST(mb_type, 0) && USES_LIST(mb_type, 1));
1544 u*= 0x0101010101010101ULL;
1545 v*= 0x0101010101010101ULL;
1546 for(y=0; y<block_height; y++){
1547 *(uint64_t*)(pict->data[1] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[1])= u;
1548 *(uint64_t*)(pict->data[2] + 8*mb_x + (block_height*mb_y + y)*pict->linesize[2])= v;
1552 if(IS_8X8(mb_type) || IS_16X8(mb_type)){
1553 *(uint64_t*)(pict->data[0] + 16*mb_x + 0 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1554 *(uint64_t*)(pict->data[0] + 16*mb_x + 8 + (16*mb_y + 8)*pict->linesize[0])^= 0x8080808080808080ULL;
1556 if(IS_8X8(mb_type) || IS_8X16(mb_type)){
1558 pict->data[0][16*mb_x + 8 + (16*mb_y + y)*pict->linesize[0]]^= 0x80;
1560 if(IS_8X8(mb_type) && mv_sample_log2 >= 2){
1561 int dm= 1 << (mv_sample_log2-2);
1563 int sx= mb_x*16 + 8*(i&1);
1564 int sy= mb_y*16 + 8*(i>>1);
1565 int xy= (mb_x*2 + (i&1) + (mb_y*2 + (i>>1))*mv_stride) << (mv_sample_log2-1);
1567 int32_t *mv = (int32_t*)&pict->motion_val[0][xy];
1568 if(mv[0] != mv[dm] || mv[dm*mv_stride] != mv[dm*(mv_stride+1)])
1570 pict->data[0][sx + 4 + (sy + y)*pict->linesize[0]]^= 0x80;
1571 if(mv[0] != mv[dm*mv_stride] || mv[dm] != mv[dm*(mv_stride+1)])
1572 *(uint64_t*)(pict->data[0] + sx + (sy + 4)*pict->linesize[0])^= 0x8080808080808080ULL;
1576 if(IS_INTERLACED(mb_type) && s->codec_id == CODEC_ID_H264){
1580 s->mbskip_table[mb_index]=0;
1586 static inline int hpel_motion_lowres(MpegEncContext *s,
1587 uint8_t *dest, uint8_t *src,
1588 int field_based, int field_select,
1589 int src_x, int src_y,
1590 int width, int height, int stride,
1591 int h_edge_pos, int v_edge_pos,
1592 int w, int h, h264_chroma_mc_func *pix_op,
1593 int motion_x, int motion_y)
1595 const int lowres= s->avctx->lowres;
1596 const int op_index= FFMIN(lowres, 2);
1597 const int s_mask= (2<<lowres)-1;
1601 if(s->quarter_sample){
1606 sx= motion_x & s_mask;
1607 sy= motion_y & s_mask;
1608 src_x += motion_x >> (lowres+1);
1609 src_y += motion_y >> (lowres+1);
1611 src += src_y * stride + src_x;
1613 if( (unsigned)src_x > h_edge_pos - (!!sx) - w
1614 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1615 s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w+1, (h+1)<<field_based,
1616 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1617 src= s->edge_emu_buffer;
1621 sx= (sx << 2) >> lowres;
1622 sy= (sy << 2) >> lowres;
1625 pix_op[op_index](dest, src, stride, h, sx, sy);
1629 /* apply one mpeg motion vector to the three components */
1630 static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
1631 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1632 int field_based, int bottom_field, int field_select,
1633 uint8_t **ref_picture, h264_chroma_mc_func *pix_op,
1634 int motion_x, int motion_y, int h, int mb_y)
1636 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1637 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy, uvsx, uvsy;
1638 const int lowres= s->avctx->lowres;
1639 const int op_index= FFMIN(lowres-1+s->chroma_x_shift, 2);
1640 const int block_s= 8>>lowres;
1641 const int s_mask= (2<<lowres)-1;
1642 const int h_edge_pos = s->h_edge_pos >> lowres;
1643 const int v_edge_pos = s->v_edge_pos >> lowres;
1644 linesize = s->current_picture.f.linesize[0] << field_based;
1645 uvlinesize = s->current_picture.f.linesize[1] << field_based;
1647 if(s->quarter_sample){ //FIXME obviously not perfect but qpel will not work in lowres anyway
1653 motion_y += (bottom_field - field_select)*((1<<lowres)-1);
1656 sx= motion_x & s_mask;
1657 sy= motion_y & s_mask;
1658 src_x = s->mb_x*2*block_s + (motion_x >> (lowres+1));
1659 src_y =( mb_y*2*block_s>>field_based) + (motion_y >> (lowres+1));
1661 if (s->out_format == FMT_H263) {
1662 uvsx = ((motion_x>>1) & s_mask) | (sx&1);
1663 uvsy = ((motion_y>>1) & s_mask) | (sy&1);
1666 }else if(s->out_format == FMT_H261){//even chroma mv's are full pel in H261
1669 uvsx = (2*mx) & s_mask;
1670 uvsy = (2*my) & s_mask;
1671 uvsrc_x = s->mb_x*block_s + (mx >> lowres);
1672 uvsrc_y = mb_y*block_s + (my >> lowres);
1674 if(s->chroma_y_shift){
1679 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1680 uvsrc_y =( mb_y*block_s>>field_based) + (my >> (lowres+1));
1682 if(s->chroma_x_shift){
1686 uvsy = motion_y & s_mask;
1688 uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
1691 uvsx = motion_x & s_mask;
1692 uvsy = motion_y & s_mask;
1699 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1700 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1701 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1703 if( (unsigned)src_x > h_edge_pos - (!!sx) - 2*block_s
1704 || (unsigned)src_y >(v_edge_pos >> field_based) - (!!sy) - h){
1705 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y, s->linesize, 17, 17+field_based,
1706 src_x, src_y<<field_based, h_edge_pos, v_edge_pos);
1707 ptr_y = s->edge_emu_buffer;
1708 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1709 uint8_t *uvbuf= s->edge_emu_buffer+18*s->linesize;
1710 s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9, 9+field_based,
1711 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1712 s->dsp.emulated_edge_mc(uvbuf+16, ptr_cr, s->uvlinesize, 9, 9+field_based,
1713 uvsrc_x, uvsrc_y<<field_based, h_edge_pos>>1, v_edge_pos>>1);
1719 if(bottom_field){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
1720 dest_y += s->linesize;
1721 dest_cb+= s->uvlinesize;
1722 dest_cr+= s->uvlinesize;
1726 ptr_y += s->linesize;
1727 ptr_cb+= s->uvlinesize;
1728 ptr_cr+= s->uvlinesize;
1731 sx= (sx << 2) >> lowres;
1732 sy= (sy << 2) >> lowres;
1733 pix_op[lowres-1](dest_y, ptr_y, linesize, h, sx, sy);
1735 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
1736 uvsx= (uvsx << 2) >> lowres;
1737 uvsy= (uvsy << 2) >> lowres;
1738 if(h >> s->chroma_y_shift){
1739 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1740 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
1743 //FIXME h261 lowres loop filter
1746 static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
1747 uint8_t *dest_cb, uint8_t *dest_cr,
1748 uint8_t **ref_picture,
1749 h264_chroma_mc_func *pix_op,
1751 const int lowres= s->avctx->lowres;
1752 const int op_index= FFMIN(lowres, 2);
1753 const int block_s= 8>>lowres;
1754 const int s_mask= (2<<lowres)-1;
1755 const int h_edge_pos = s->h_edge_pos >> (lowres+1);
1756 const int v_edge_pos = s->v_edge_pos >> (lowres+1);
1757 int emu=0, src_x, src_y, offset, sx, sy;
1760 if(s->quarter_sample){
1765 /* In case of 8X8, we construct a single chroma motion vector
1766 with a special rounding */
1767 mx= ff_h263_round_chroma(mx);
1768 my= ff_h263_round_chroma(my);
1772 src_x = s->mb_x*block_s + (mx >> (lowres+1));
1773 src_y = s->mb_y*block_s + (my >> (lowres+1));
1775 offset = src_y * s->uvlinesize + src_x;
1776 ptr = ref_picture[1] + offset;
1777 if(s->flags&CODEC_FLAG_EMU_EDGE){
1778 if( (unsigned)src_x > h_edge_pos - (!!sx) - block_s
1779 || (unsigned)src_y > v_edge_pos - (!!sy) - block_s){
1780 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1781 ptr= s->edge_emu_buffer;
1785 sx= (sx << 2) >> lowres;
1786 sy= (sy << 2) >> lowres;
1787 pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
1789 ptr = ref_picture[2] + offset;
1791 s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
1792 ptr= s->edge_emu_buffer;
1794 pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
1798 * motion compensation of a single macroblock
1800 * @param dest_y luma destination pointer
1801 * @param dest_cb chroma cb/u destination pointer
1802 * @param dest_cr chroma cr/v destination pointer
1803 * @param dir direction (0->forward, 1->backward)
1804 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
1805 * @param pix_op halfpel motion compensation function (average or put normally)
1806 * the motion vectors are taken from s->mv and the MV type from s->mv_type
1808 static inline void MPV_motion_lowres(MpegEncContext *s,
1809 uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
1810 int dir, uint8_t **ref_picture,
1811 h264_chroma_mc_func *pix_op)
1815 const int lowres= s->avctx->lowres;
1816 const int block_s= 8>>lowres;
1821 switch(s->mv_type) {
1823 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1825 ref_picture, pix_op,
1826 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y);
1832 hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) * s->linesize)*block_s,
1833 ref_picture[0], 0, 0,
1834 (2*mb_x + (i & 1))*block_s, (2*mb_y + (i >>1))*block_s,
1835 s->width, s->height, s->linesize,
1836 s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
1837 block_s, block_s, pix_op,
1838 s->mv[dir][i][0], s->mv[dir][i][1]);
1840 mx += s->mv[dir][i][0];
1841 my += s->mv[dir][i][1];
1844 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY))
1845 chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture, pix_op, mx, my);
1848 if (s->picture_structure == PICT_FRAME) {
1850 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1851 1, 0, s->field_select[dir][0],
1852 ref_picture, pix_op,
1853 s->mv[dir][0][0], s->mv[dir][0][1], block_s, mb_y);
1855 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1856 1, 1, s->field_select[dir][1],
1857 ref_picture, pix_op,
1858 s->mv[dir][1][0], s->mv[dir][1][1], block_s, mb_y);
1860 if(s->picture_structure != s->field_select[dir][0] + 1 && s->pict_type != AV_PICTURE_TYPE_B && !s->first_field){
1861 ref_picture = s->current_picture_ptr->f.data;
1864 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1865 0, 0, s->field_select[dir][0],
1866 ref_picture, pix_op,
1867 s->mv[dir][0][0], s->mv[dir][0][1], 2*block_s, mb_y>>1);
1872 uint8_t ** ref2picture;
1874 if(s->picture_structure == s->field_select[dir][i] + 1 || s->pict_type == AV_PICTURE_TYPE_B || s->first_field){
1875 ref2picture= ref_picture;
1877 ref2picture = s->current_picture_ptr->f.data;
1880 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1881 0, 0, s->field_select[dir][i],
1882 ref2picture, pix_op,
1883 s->mv[dir][i][0], s->mv[dir][i][1] + 2*block_s*i, block_s, mb_y>>1);
1885 dest_y += 2*block_s*s->linesize;
1886 dest_cb+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1887 dest_cr+= (2*block_s>>s->chroma_y_shift)*s->uvlinesize;
1891 if(s->picture_structure == PICT_FRAME){
1895 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1897 ref_picture, pix_op,
1898 s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][1], block_s, mb_y);
1900 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1904 mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
1905 0, 0, s->picture_structure != i+1,
1906 ref_picture, pix_op,
1907 s->mv[dir][2*i][0],s->mv[dir][2*i][1],2*block_s, mb_y>>1);
1909 // after put we make avg of the same block
1910 pix_op = s->dsp.avg_h264_chroma_pixels_tab;
1912 //opposite parity is always in the same frame if this is second field
1913 if(!s->first_field){
1914 ref_picture = s->current_picture_ptr->f.data;
1924 * find the lowest MB row referenced in the MVs
1926 int MPV_lowest_referenced_row(MpegEncContext *s, int dir)
1928 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
1929 int my, off, i, mvs;
1931 if (s->picture_structure != PICT_FRAME) goto unhandled;
1933 switch (s->mv_type) {
1947 for (i = 0; i < mvs; i++) {
1948 my = s->mv[dir][i][1]<<qpel_shift;
1949 my_max = FFMAX(my_max, my);
1950 my_min = FFMIN(my_min, my);
1953 off = (FFMAX(-my_min, my_max) + 63) >> 6;
1955 return FFMIN(FFMAX(s->mb_y + off, 0), s->mb_height-1);
1957 return s->mb_height-1;
1960 /* put block[] to dest[] */
1961 static inline void put_dct(MpegEncContext *s,
1962 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1964 s->dct_unquantize_intra(s, block, i, qscale);
1965 s->dsp.idct_put (dest, line_size, block);
1968 /* add block[] to dest[] */
1969 static inline void add_dct(MpegEncContext *s,
1970 DCTELEM *block, int i, uint8_t *dest, int line_size)
1972 if (s->block_last_index[i] >= 0) {
1973 s->dsp.idct_add (dest, line_size, block);
1977 static inline void add_dequant_dct(MpegEncContext *s,
1978 DCTELEM *block, int i, uint8_t *dest, int line_size, int qscale)
1980 if (s->block_last_index[i] >= 0) {
1981 s->dct_unquantize_inter(s, block, i, qscale);
1983 s->dsp.idct_add (dest, line_size, block);
1988 * cleans dc, ac, coded_block for the current non intra MB
1990 void ff_clean_intra_table_entries(MpegEncContext *s)
1992 int wrap = s->b8_stride;
1993 int xy = s->block_index[0];
1996 s->dc_val[0][xy + 1 ] =
1997 s->dc_val[0][xy + wrap] =
1998 s->dc_val[0][xy + 1 + wrap] = 1024;
2000 memset(s->ac_val[0][xy ], 0, 32 * sizeof(int16_t));
2001 memset(s->ac_val[0][xy + wrap], 0, 32 * sizeof(int16_t));
2002 if (s->msmpeg4_version>=3) {
2003 s->coded_block[xy ] =
2004 s->coded_block[xy + 1 ] =
2005 s->coded_block[xy + wrap] =
2006 s->coded_block[xy + 1 + wrap] = 0;
2009 wrap = s->mb_stride;
2010 xy = s->mb_x + s->mb_y * wrap;
2012 s->dc_val[2][xy] = 1024;
2014 memset(s->ac_val[1][xy], 0, 16 * sizeof(int16_t));
2015 memset(s->ac_val[2][xy], 0, 16 * sizeof(int16_t));
2017 s->mbintra_table[xy]= 0;
2020 /* generic function called after a macroblock has been parsed by the
2021 decoder or after it has been encoded by the encoder.
2023 Important variables used:
2024 s->mb_intra : true if intra macroblock
2025 s->mv_dir : motion vector direction
2026 s->mv_type : motion vector type
2027 s->mv : motion vector
2028 s->interlaced_dct : true if interlaced dct used (mpeg2)
2030 static av_always_inline
2031 void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
2032 int lowres_flag, int is_mpeg12)
2034 const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
2035 if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
2036 ff_xvmc_decode_mb(s);//xvmc uses pblocks
2040 if(s->avctx->debug&FF_DEBUG_DCT_COEFF) {
2041 /* save DCT coefficients */
2043 DCTELEM *dct = &s->current_picture.f.dct_coeff[mb_xy * 64 * 6];
2044 av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
2046 for(j=0; j<64; j++){
2047 *dct++ = block[i][s->dsp.idct_permutation[j]];
2048 av_log(s->avctx, AV_LOG_DEBUG, "%5d", dct[-1]);
2050 av_log(s->avctx, AV_LOG_DEBUG, "\n");
2054 s->current_picture.f.qscale_table[mb_xy] = s->qscale;
2056 /* update DC predictors for P macroblocks */
2058 if (!is_mpeg12 && (s->h263_pred || s->h263_aic)) {
2059 if(s->mbintra_table[mb_xy])
2060 ff_clean_intra_table_entries(s);
2064 s->last_dc[2] = 128 << s->intra_dc_precision;
2067 else if (!is_mpeg12 && (s->h263_pred || s->h263_aic))
2068 s->mbintra_table[mb_xy]=1;
2070 if ((s->flags&CODEC_FLAG_PSNR) || !(s->encoding && (s->intra_only || s->pict_type==AV_PICTURE_TYPE_B) && s->avctx->mb_decision != FF_MB_DECISION_RD)) { //FIXME precalc
2071 uint8_t *dest_y, *dest_cb, *dest_cr;
2072 int dct_linesize, dct_offset;
2073 op_pixels_func (*op_pix)[4];
2074 qpel_mc_func (*op_qpix)[16];
2075 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2076 const int uvlinesize = s->current_picture.f.linesize[1];
2077 const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
2078 const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
2080 /* avoid copy if macroblock skipped in last frame too */
2081 /* skip only during decoding as we might trash the buffers during encoding a bit */
2083 uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
2084 const int age = s->current_picture.f.age;
2088 if (s->mb_skipped) {
2090 assert(s->pict_type!=AV_PICTURE_TYPE_I);
2092 (*mbskip_ptr) ++; /* indicate that this time we skipped it */
2093 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2095 /* if previous was skipped too, then nothing to do ! */
2096 if (*mbskip_ptr >= age && s->current_picture.f.reference){
2099 } else if(!s->current_picture.f.reference) {
2100 (*mbskip_ptr) ++; /* increase counter so the age can be compared cleanly */
2101 if(*mbskip_ptr >99) *mbskip_ptr= 99;
2103 *mbskip_ptr = 0; /* not skipped */
2107 dct_linesize = linesize << s->interlaced_dct;
2108 dct_offset =(s->interlaced_dct)? linesize : linesize*block_size;
2112 dest_cb= s->dest[1];
2113 dest_cr= s->dest[2];
2115 dest_y = s->b_scratchpad;
2116 dest_cb= s->b_scratchpad+16*linesize;
2117 dest_cr= s->b_scratchpad+32*linesize;
2121 /* motion handling */
2122 /* decoding or more than one mb_type (MC was already done otherwise) */
2125 if(HAVE_PTHREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
2126 if (s->mv_dir & MV_DIR_FORWARD) {
2127 ff_thread_await_progress((AVFrame*)s->last_picture_ptr, MPV_lowest_referenced_row(s, 0), 0);
2129 if (s->mv_dir & MV_DIR_BACKWARD) {
2130 ff_thread_await_progress((AVFrame*)s->next_picture_ptr, MPV_lowest_referenced_row(s, 1), 0);
2135 h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
2137 if (s->mv_dir & MV_DIR_FORWARD) {
2138 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
2139 op_pix = s->dsp.avg_h264_chroma_pixels_tab;
2141 if (s->mv_dir & MV_DIR_BACKWARD) {
2142 MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
2145 op_qpix= s->me.qpel_put;
2146 if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
2147 op_pix = s->dsp.put_pixels_tab;
2149 op_pix = s->dsp.put_no_rnd_pixels_tab;
2151 if (s->mv_dir & MV_DIR_FORWARD) {
2152 MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
2153 op_pix = s->dsp.avg_pixels_tab;
2154 op_qpix= s->me.qpel_avg;
2156 if (s->mv_dir & MV_DIR_BACKWARD) {
2157 MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
2162 /* skip dequant / idct if we are really late ;) */
2163 if(s->avctx->skip_idct){
2164 if( (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
2165 ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
2166 || s->avctx->skip_idct >= AVDISCARD_ALL)
2170 /* add dct residue */
2171 if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
2172 || (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
2173 add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2174 add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2175 add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2176 add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2178 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2179 if (s->chroma_y_shift){
2180 add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2181 add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2185 add_dequant_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2186 add_dequant_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2187 add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2188 add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2191 } else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
2192 add_dct(s, block[0], 0, dest_y , dct_linesize);
2193 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2194 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2195 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2197 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2198 if(s->chroma_y_shift){//Chroma420
2199 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2200 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2203 dct_linesize = uvlinesize << s->interlaced_dct;
2204 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2206 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2207 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2208 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2209 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2210 if(!s->chroma_x_shift){//Chroma444
2211 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2212 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2213 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2214 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2219 else if (CONFIG_WMV2_DECODER || CONFIG_WMV2_ENCODER) {
2220 ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
2223 /* dct only in intra block */
2224 if(s->encoding || !(s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO)){
2225 put_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
2226 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
2227 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
2228 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
2230 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2231 if(s->chroma_y_shift){
2232 put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
2233 put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
2237 put_dct(s, block[4], 4, dest_cb, dct_linesize, s->chroma_qscale);
2238 put_dct(s, block[5], 5, dest_cr, dct_linesize, s->chroma_qscale);
2239 put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
2240 put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
2244 s->dsp.idct_put(dest_y , dct_linesize, block[0]);
2245 s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
2246 s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
2247 s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2249 if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
2250 if(s->chroma_y_shift){
2251 s->dsp.idct_put(dest_cb, uvlinesize, block[4]);
2252 s->dsp.idct_put(dest_cr, uvlinesize, block[5]);
2255 dct_linesize = uvlinesize << s->interlaced_dct;
2256 dct_offset =(s->interlaced_dct)? uvlinesize : uvlinesize*block_size;
2258 s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
2259 s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
2260 s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2261 s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2262 if(!s->chroma_x_shift){//Chroma444
2263 s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
2264 s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
2265 s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2266 s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2274 s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
2275 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
2276 s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
2281 void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
2283 if(s->out_format == FMT_MPEG1) {
2284 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
2285 else MPV_decode_mb_internal(s, block, 0, 1);
2288 if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
2289 else MPV_decode_mb_internal(s, block, 0, 0);
2294 * @param h is the normal height, this will be reduced automatically if needed for the last row
2296 void ff_draw_horiz_band(MpegEncContext *s, int y, int h){
2297 const int field_pic= s->picture_structure != PICT_FRAME;
2303 if (!s->avctx->hwaccel
2304 && !(s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
2305 && s->unrestricted_mv
2306 && s->current_picture.f.reference
2308 && !(s->flags&CODEC_FLAG_EMU_EDGE)) {
2309 int sides = 0, edge_h;
2310 int hshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_w;
2311 int vshift = av_pix_fmt_descriptors[s->avctx->pix_fmt].log2_chroma_h;
2312 if (y==0) sides |= EDGE_TOP;
2313 if (y + h >= s->v_edge_pos) sides |= EDGE_BOTTOM;
2315 edge_h= FFMIN(h, s->v_edge_pos - y);
2317 s->dsp.draw_edges(s->current_picture_ptr->f.data[0] + y *s->linesize , s->linesize,
2318 s->h_edge_pos , edge_h , EDGE_WIDTH , EDGE_WIDTH , sides);
2319 s->dsp.draw_edges(s->current_picture_ptr->f.data[1] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2320 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2321 s->dsp.draw_edges(s->current_picture_ptr->f.data[2] + (y>>vshift)*s->uvlinesize, s->uvlinesize,
2322 s->h_edge_pos>>hshift, edge_h>>hshift, EDGE_WIDTH>>hshift, EDGE_WIDTH>>vshift, sides);
2325 h= FFMIN(h, s->avctx->height - y);
2327 if(field_pic && s->first_field && !(s->avctx->slice_flags&SLICE_FLAG_ALLOW_FIELD)) return;
2329 if (s->avctx->draw_horiz_band) {
2333 if(s->pict_type==AV_PICTURE_TYPE_B || s->low_delay || (s->avctx->slice_flags&SLICE_FLAG_CODED_ORDER))
2334 src= (AVFrame*)s->current_picture_ptr;
2335 else if(s->last_picture_ptr)
2336 src= (AVFrame*)s->last_picture_ptr;
2340 if(s->pict_type==AV_PICTURE_TYPE_B && s->picture_structure == PICT_FRAME && s->out_format != FMT_H264){
2346 offset[0]= y * s->linesize;
2348 offset[2]= (y >> s->chroma_y_shift) * s->uvlinesize;
2354 s->avctx->draw_horiz_band(s->avctx, src, offset,
2355 y, s->picture_structure, h);
2359 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
2360 const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
2361 const int uvlinesize = s->current_picture.f.linesize[1];
2362 const int mb_size= 4 - s->avctx->lowres;
2364 s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2;
2365 s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2;
2366 s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2;
2367 s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2;
2368 s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2369 s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1;
2370 //block_index is not used by mpeg2, so it is not affected by chroma_format
2372 s->dest[0] = s->current_picture.f.data[0] + ((s->mb_x - 1) << mb_size);
2373 s->dest[1] = s->current_picture.f.data[1] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2374 s->dest[2] = s->current_picture.f.data[2] + ((s->mb_x - 1) << (mb_size - s->chroma_x_shift));
2376 if(!(s->pict_type==AV_PICTURE_TYPE_B && s->avctx->draw_horiz_band && s->picture_structure==PICT_FRAME))
2378 if(s->picture_structure==PICT_FRAME){
2379 s->dest[0] += s->mb_y * linesize << mb_size;
2380 s->dest[1] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2381 s->dest[2] += s->mb_y * uvlinesize << (mb_size - s->chroma_y_shift);
2383 s->dest[0] += (s->mb_y>>1) * linesize << mb_size;
2384 s->dest[1] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2385 s->dest[2] += (s->mb_y>>1) * uvlinesize << (mb_size - s->chroma_y_shift);
2386 assert((s->mb_y&1) == (s->picture_structure == PICT_BOTTOM_FIELD));
2391 void ff_mpeg_flush(AVCodecContext *avctx){
2393 MpegEncContext *s = avctx->priv_data;
2395 if(s==NULL || s->picture==NULL)
2398 for(i=0; i<s->picture_count; i++){
2399 if (s->picture[i].f.data[0] &&
2400 (s->picture[i].f.type == FF_BUFFER_TYPE_INTERNAL ||
2401 s->picture[i].f.type == FF_BUFFER_TYPE_USER))
2402 free_frame_buffer(s, &s->picture[i]);
2404 s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL;
2406 s->mb_x= s->mb_y= 0;
2409 s->parse_context.state= -1;
2410 s->parse_context.frame_start_found= 0;
2411 s->parse_context.overread= 0;
2412 s->parse_context.overread_index= 0;
2413 s->parse_context.index= 0;
2414 s->parse_context.last_index= 0;
2415 s->bitstream_buffer_size=0;
2419 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
2420 DCTELEM *block, int n, int qscale)
2422 int i, level, nCoeffs;
2423 const uint16_t *quant_matrix;
2425 nCoeffs= s->block_last_index[n];
2428 block[0] = block[0] * s->y_dc_scale;
2430 block[0] = block[0] * s->c_dc_scale;
2431 /* XXX: only mpeg1 */
2432 quant_matrix = s->intra_matrix;
2433 for(i=1;i<=nCoeffs;i++) {
2434 int j= s->intra_scantable.permutated[i];
2439 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2440 level = (level - 1) | 1;
2443 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2444 level = (level - 1) | 1;
2451 static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
2452 DCTELEM *block, int n, int qscale)
2454 int i, level, nCoeffs;
2455 const uint16_t *quant_matrix;
2457 nCoeffs= s->block_last_index[n];
2459 quant_matrix = s->inter_matrix;
2460 for(i=0; i<=nCoeffs; i++) {
2461 int j= s->intra_scantable.permutated[i];
2466 level = (((level << 1) + 1) * qscale *
2467 ((int) (quant_matrix[j]))) >> 4;
2468 level = (level - 1) | 1;
2471 level = (((level << 1) + 1) * qscale *
2472 ((int) (quant_matrix[j]))) >> 4;
2473 level = (level - 1) | 1;
2480 static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
2481 DCTELEM *block, int n, int qscale)
2483 int i, level, nCoeffs;
2484 const uint16_t *quant_matrix;
2486 if(s->alternate_scan) nCoeffs= 63;
2487 else nCoeffs= s->block_last_index[n];
2490 block[0] = block[0] * s->y_dc_scale;
2492 block[0] = block[0] * s->c_dc_scale;
2493 quant_matrix = s->intra_matrix;
2494 for(i=1;i<=nCoeffs;i++) {
2495 int j= s->intra_scantable.permutated[i];
2500 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2503 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2510 static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
2511 DCTELEM *block, int n, int qscale)
2513 int i, level, nCoeffs;
2514 const uint16_t *quant_matrix;
2517 if(s->alternate_scan) nCoeffs= 63;
2518 else nCoeffs= s->block_last_index[n];
2521 block[0] = block[0] * s->y_dc_scale;
2523 block[0] = block[0] * s->c_dc_scale;
2524 quant_matrix = s->intra_matrix;
2525 for(i=1;i<=nCoeffs;i++) {
2526 int j= s->intra_scantable.permutated[i];
2531 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2534 level = (int)(level * qscale * quant_matrix[j]) >> 3;
2543 static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
2544 DCTELEM *block, int n, int qscale)
2546 int i, level, nCoeffs;
2547 const uint16_t *quant_matrix;
2550 if(s->alternate_scan) nCoeffs= 63;
2551 else nCoeffs= s->block_last_index[n];
2553 quant_matrix = s->inter_matrix;
2554 for(i=0; i<=nCoeffs; i++) {
2555 int j= s->intra_scantable.permutated[i];
2560 level = (((level << 1) + 1) * qscale *
2561 ((int) (quant_matrix[j]))) >> 4;
2564 level = (((level << 1) + 1) * qscale *
2565 ((int) (quant_matrix[j]))) >> 4;
2574 static void dct_unquantize_h263_intra_c(MpegEncContext *s,
2575 DCTELEM *block, int n, int qscale)
2577 int i, level, qmul, qadd;
2580 assert(s->block_last_index[n]>=0);
2586 block[0] = block[0] * s->y_dc_scale;
2588 block[0] = block[0] * s->c_dc_scale;
2589 qadd = (qscale - 1) | 1;
2596 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2598 for(i=1; i<=nCoeffs; i++) {
2602 level = level * qmul - qadd;
2604 level = level * qmul + qadd;
2611 static void dct_unquantize_h263_inter_c(MpegEncContext *s,
2612 DCTELEM *block, int n, int qscale)
2614 int i, level, qmul, qadd;
2617 assert(s->block_last_index[n]>=0);
2619 qadd = (qscale - 1) | 1;
2622 nCoeffs= s->inter_scantable.raster_end[ s->block_last_index[n] ];
2624 for(i=0; i<=nCoeffs; i++) {
2628 level = level * qmul - qadd;
2630 level = level * qmul + qadd;
2638 * set qscale and update qscale dependent variables.
2640 void ff_set_qscale(MpegEncContext * s, int qscale)
2644 else if (qscale > 31)
2648 s->chroma_qscale= s->chroma_qscale_table[qscale];
2650 s->y_dc_scale= s->y_dc_scale_table[ qscale ];
2651 s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ];
2654 void MPV_report_decode_progress(MpegEncContext *s)
2656 if (s->pict_type != FF_B_TYPE && !s->partitioned_frame && !s->error_occurred)
2657 ff_thread_report_progress((AVFrame*)s->current_picture_ptr, s->mb_y, 0);