1 /* $Id: decomb.c,v 1.14 2008/04/25 5:00:00 jbrjake Exp $
3 This file is part of the HandBrake source code.
4 Homepage: <http://handbrake.fr/>.
5 It may be used under the terms of the GNU General Public License.
7 The yadif algorithm was created by Michael Niedermayer. */
9 #include "libavcodec/avcodec.h"
10 #include "mpeg2dec/mpeg2.h"
12 #define SUPPRESS_AV_LOG
14 #define MODE_DEFAULT 4
15 #define PARITY_DEFAULT -1
17 #define MCDEINT_MODE_DEFAULT -1
18 #define MCDEINT_QP_DEFAULT 1
20 #define ABS(a) ((a) > 0 ? (a) : (-(a)))
21 #define MIN3(a,b,c) MIN(MIN(a,b),c)
22 #define MAX3(a,b,c) MAX(MAX(a,b),c)
24 struct hb_filter_private_s
38 int mcdeint_outbuf_size;
39 uint8_t * mcdeint_outbuf;
40 AVCodecContext * mcdeint_avctx_enc;
41 AVFrame * mcdeint_frame;
42 AVFrame * mcdeint_frame_dec;
51 int deinterlaced_frames;
59 hb_buffer_t * buf_out[2];
60 hb_buffer_t * buf_settings;
62 int cc_array[3][480][270];
63 int combed_macroblocks;
64 int uncombed_macroblocks;
67 hb_filter_private_t * hb_decomb_init( int pix_fmt,
72 int hb_decomb_work( const hb_buffer_t * buf_in,
73 hb_buffer_t ** buf_out,
77 hb_filter_private_t * pv );
79 void hb_decomb_close( hb_filter_private_t * pv );
81 hb_filter_object_t hb_filter_decomb =
84 "Decombs selectively with (ffmpeg or yadif/mcdeint or blending)",
91 int cubic_interpolate( int y0, int y1, int y2, int y3 )
93 /* From http://www.neuron2.net/library/cubicinterp.html */
94 int result = ( y0 * -3 ) + ( y1 * 23 ) + ( y2 * 23 ) + ( y3 * -3 );
101 else if( result < 0 )
109 static void store_ref( const uint8_t ** pic,
110 hb_filter_private_t * pv )
114 sizeof(uint8_t *)*3 );
118 sizeof(uint8_t *)*3*3 );
121 for( i = 0; i < 3; i++ )
123 const uint8_t * src = pic[i];
124 uint8_t * ref = pv->ref[2][i];
126 int w = pv->width[i];
127 int h = pv->height[i];
128 int ref_stride = pv->ref_stride[i];
131 for( y = 0; y < pv->height[i]; y++ )
134 src = (uint8_t*)src + w;
135 ref = (uint8_t*)ref + ref_stride;
140 static void get_ref( uint8_t ** pic, hb_filter_private_t * pv, int frm )
143 for( i = 0; i < 3; i++ )
145 uint8_t * dst = pic[i];
146 const uint8_t * ref = pv->ref[frm][i];
147 int w = pv->width[i];
148 int ref_stride = pv->ref_stride[i];
151 for( y = 0; y < pv->height[i]; y++ )
160 int blend_filter_pixel( int up2, int up1, int current, int down1, int down2 )
162 /* Low-pass 5-tap filter */
166 result += current * 6;
183 static void blend_filter_line( uint8_t *dst,
187 hb_filter_private_t * pv )
189 int w = pv->width[plane];
190 int refs = pv->ref_stride[plane];
193 for( x = 0; x < w; x++)
205 /* First line, so A and B don't exist.*/
211 /* Second line, no A. */
214 else if( y == (pv->height[plane] - 2) )
216 /* Second to last line, no E. */
219 else if( y == (pv->height[plane] -1) )
221 /* Last line, no D or E. */
226 dst[0] = blend_filter_pixel( a, b, c, d, e );
233 static void yadif_filter_line( uint8_t *dst,
240 hb_filter_private_t * pv )
242 uint8_t *prev2 = parity ? prev : cur ;
243 uint8_t *next2 = parity ? cur : next;
245 int w = pv->width[plane];
246 int refs = pv->ref_stride[plane];
249 int macroblock_y = y / 8 ;
252 for( x = 0; x < w; x++)
256 /* Buggy experimental code for macroblock-by-macrobock comb detection.*/
257 if(plane == 0 && pv->mode == 7)
260 macroblock_x = x / 8;
262 if(pv->cc_array[plane][macroblock_x][macroblock_y] < 0 || pv->cc_array[plane][macroblock_x][macroblock_y] > 64)
263 hb_log("[%i][%i] ( %i * %i )macroblock %i x %i is combed: %i", pv->deinterlaced_frames, plane, x, y, macroblock_x, macroblock_y, pv->cc_array[plane][macroblock_x][macroblock_y] );
265 if(pv->cc_array[plane][macroblock_x][macroblock_y] == 0 && pv->cc_array[plane][macroblock_x+1][macroblock_y] == 0 && pv->cc_array[plane][macroblock_x-1][macroblock_y] == 0 && pv->cc_array[plane][macroblock_x][macroblock_y+1] == 0 && pv->cc_array[plane][macroblock_x][macroblock_y-1] == 0 )
268 pv->uncombed_macroblocks++;
269 goto end_of_yadif_filter_pixel;
272 pv->combed_macroblocks++;
276 /* Temporal average -- the current pixel location in the previous and next fields */
277 int d = (prev2[0] + next2[0])>>1;
281 /* How the current pixel changes from the field before to the field after */
282 int temporal_diff0 = ABS(prev2[0] - next2[0]);
283 /* The average of how much the pixels above and below change from the field before to now. */
284 int temporal_diff1 = ( ABS(prev[-refs] - cur[-refs]) + ABS(prev[+refs] - cur[+refs]) ) >> 1;
285 /* The average of how much the pixels above and below change from now to the next field. */
286 int temporal_diff2 = ( ABS(next[-refs] - cur[-refs]) + ABS(next[+refs] - cur[+refs]) ) >> 1;
287 /* For the actual difference, use the largest of the previous average diffs. */
288 int diff = MAX3(temporal_diff0>>1, temporal_diff1, temporal_diff2);
290 /* SAD of how the pixel-1, the pixel, and the pixel+1 change from the line above to below. */
291 int spatial_score = ABS(cur[-refs-1] - cur[+refs-1]) + ABS(cur[-refs]-cur[+refs]) +
292 ABS(cur[-refs+1] - cur[+refs+1]) - 1;
295 /* Spatial pred is either a bilinear or cubic vertical interpolation. */
298 spatial_pred = cubic_interpolate( cur[-3*refs], cur[-refs], cur[+refs], cur[3*refs] );
302 spatial_pred = (c+e)>>1;
305 /* EDDI: Edge Directed Deinterlacing Interpolation
306 Uses the Martinez-Lim Line Shift Parametric Modeling algorithm...I think.
307 Checks 4 different slopes to see if there is more similarity along a diagonal
308 than there was vertically. If a diagonal is more similar, then it indicates
309 an edge, so interpolate along that instead of a vertical line, using either
310 linear or cubic interpolation depending on mode. */
311 #define YADIF_CHECK(j)\
312 { int score = ABS(cur[-refs-1+j] - cur[+refs-1-j])\
313 + ABS(cur[-refs +j] - cur[+refs -j])\
314 + ABS(cur[-refs+1+j] - cur[+refs+1-j]);\
315 if( score < spatial_score ){\
316 spatial_score = score;\
322 spatial_pred = cubic_interpolate(cur[-3 * refs - 3], cur[-refs -1], cur[+refs + 1], cur[3* refs + 3] );\
325 spatial_pred = cubic_interpolate( ( ( cur[-3*refs - 4] + cur[-refs - 4] ) / 2 ) , cur[-refs -2], cur[+refs + 2], ( ( cur[3*refs + 4] + cur[refs + 4] ) / 2 ) );\
328 spatial_pred = cubic_interpolate(cur[-3 * refs +3], cur[-refs +1], cur[+refs - 1], cur[3* refs -3] );\
331 spatial_pred = cubic_interpolate(( ( cur[-3*refs + 4] + cur[-refs + 4] ) / 2 ), cur[-refs +2], cur[+refs - 2], ( ( cur[3*refs - 4] + cur[refs - 4] ) / 2 ) );\
337 spatial_pred = ( cur[-refs +j] + cur[+refs -j] ) >>1;\
340 YADIF_CHECK(-1) YADIF_CHECK(-2) }} }}
341 YADIF_CHECK( 1) YADIF_CHECK( 2) }} }}
343 /* Temporally adjust the spatial prediction by comparing against the
344 alternate (associated) fields in the previous and next frames. */
345 int b = (prev2[-2*refs] + next2[-2*refs])>>1;
346 int f = (prev2[+2*refs] + next2[+2*refs])>>1;
348 /* Find the median value */
349 int max = MAX3(d-e, d-c, MIN(b-c, f-e));
350 int min = MIN3(d-e, d-c, MAX(b-c, f-e));
351 diff = MAX3( diff, min, -max );
353 if( spatial_pred > d + diff )
355 spatial_pred = d + diff;
357 else if( spatial_pred < d - diff )
359 spatial_pred = d - diff;
362 dst[0] = spatial_pred;
364 end_of_yadif_filter_pixel:
374 static void yadif_filter( uint8_t ** dst,
377 hb_filter_private_t * pv )
381 /* Buggy, experimental code for macroblock-by-macroblock decombing.*/
384 int x, y, block_x, block_y, plane, plane_width, plane_height, offset, cc;
389 int color_diff = pv->color_diff;
390 int color_equal = pv->color_equal;
392 if ( pv->buf_settings->flags & 16 )
394 /* Frame is progressive, be more discerning. */
395 color_diff = pv->prog_diff;
396 color_equal = pv->prog_equal;
399 /* Iterate through planes */
400 for( plane = 0; plane < 1; plane++ )
402 plane_width = pv->width[plane];
403 plane_height = pv->height[plane];
407 /* Y has already been checked, now offset by Y's dimensions
408 and divide all the other values by 2, since Cr and Cb
409 are half-size compared to Y. */
410 stride = plane_width * plane_height;
412 else if ( plane == 2 )
414 /* Y and Cb are done, so the offset needs to be bumped
415 so it's width*height + (width / 2) * (height / 2) */
418 /* Grab a horizontal line */
419 for(y = 0; y < plane_height; y += block )
421 uint8_t *line = &pv->ref[1][plane][ y*plane_width ];
423 /* Iterate through it horizontally in blocks */
424 for(x = 0; x < plane_width; x += block)
426 /* Clear out the current macroblock mapping from the last frame. */
427 pv->cc_array[plane][x/block][y/block] = 0;
431 /* Go through the block horizontally */
432 for(block_x = 0; block_x < block; block_x++)
434 /* Go through the block vertically, collecting pixels */
435 for(block_y = 0; block_y < block*2; block_y++)
437 s[block_y] = line[x+block_x+(block_y*plane_width)];
440 /* Now go through the results to check combing. */
441 for(block_y = 0; block_y < block; block_y++)
443 sadA += abs(s[block_y] - s[block_y+2]);
444 sadB += abs(s[block_y] - s[block_y+1]);
446 // if( abs(s[block_y] - s[block_y+2]) < color_equal && abs(s[block_y] - s[block_y+1]) > color_diff)
448 // pv->cc_array[plane][x/block][y/block]++;
455 pv->cc_array[plane][x/block][y/block] = 1;
464 /* Visualize macroblocks */
466 fprintf(stderr, "FRAME %i VISUALIZATION\n", pv->deinterlaced_frames);
467 for( y = 0; y < 60; y++ )
469 for( x = 0; x < 90; x++ )
471 if(pv->cc_array[0][x][y])
472 fprintf(stderr, "X");
474 fprintf(stderr, "O");
477 fprintf(stderr, "\n");
479 fprintf(stderr, "\n\n");
484 for( i = 0; i < 3; i++ )
486 int w = pv->width[i];
487 int h = pv->height[i];
488 int ref_stride = pv->ref_stride[i];
491 for( y = 0; y < h; y++ )
495 uint8_t *prev = &pv->ref[0][i][y*ref_stride];
496 uint8_t *cur = &pv->ref[1][i][y*ref_stride];
497 uint8_t *next = &pv->ref[2][i][y*ref_stride];
498 uint8_t *dst2 = &dst[i][y*w];
500 blend_filter_line( dst2, cur, i, y, pv );
502 else if( (y ^ parity) & 1 )
504 uint8_t *prev = &pv->ref[0][i][y*ref_stride];
505 uint8_t *cur = &pv->ref[1][i][y*ref_stride];
506 uint8_t *next = &pv->ref[2][i][y*ref_stride];
507 uint8_t *dst2 = &dst[i][y*w];
509 yadif_filter_line( dst2, prev, cur, next, i, parity ^ tff, y, pv );
513 memcpy( &dst[i][y*w],
514 &pv->ref[1][i][y*ref_stride],
515 w * sizeof(uint8_t) );
521 static void mcdeint_filter( uint8_t ** dst,
524 hb_filter_private_t * pv )
529 #ifdef SUPPRESS_AV_LOG
530 /* TODO: temporarily change log level to suppress obnoxious debug output */
531 int loglevel = av_log_get_level();
532 av_log_set_level( AV_LOG_QUIET );
537 pv->mcdeint_frame->data[i] = src[i];
538 pv->mcdeint_frame->linesize[i] = pv->width[i];
540 pv->mcdeint_avctx_enc->me_cmp = FF_CMP_SAD;
541 pv->mcdeint_avctx_enc->me_sub_cmp = FF_CMP_SAD;
542 pv->mcdeint_frame->quality = pv->mcdeint_qp * FF_QP2LAMBDA;
544 out_size = avcodec_encode_video( pv->mcdeint_avctx_enc,
546 pv->mcdeint_outbuf_size,
549 pv->mcdeint_frame_dec = pv->mcdeint_avctx_enc->coded_frame;
551 for( i = 0; i < 3; i++ )
553 int w = pv->width[i];
554 int h = pv->height[i];
555 int fils = pv->mcdeint_frame_dec->linesize[i];
556 int srcs = pv->width[i];
558 for( y = 0; y < h; y++ )
560 if( (y ^ parity) & 1 )
562 for( x = 0; x < w; x++ )
564 if( (x-2)+(y-1)*w >= 0 && (x+2)+(y+1)*w < w*h )
567 &pv->mcdeint_frame_dec->data[i][x + y*fils];
568 uint8_t * srcp = &src[i][x + y*srcs];
570 int diff0 = filp[-fils] - srcp[-srcs];
571 int diff1 = filp[+fils] - srcp[+srcs];
574 ABS(srcp[-srcs-1] - srcp[+srcs-1])
575 + ABS(srcp[-srcs ] - srcp[+srcs ])
576 + ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;
580 #define MCDEINT_CHECK(j)\
581 { int score = ABS(srcp[-srcs-1+j] - srcp[+srcs-1-j])\
582 + ABS(srcp[-srcs +j] - srcp[+srcs -j])\
583 + ABS(srcp[-srcs+1+j] - srcp[+srcs+1-j]);\
584 if( score < spatial_score ) {\
585 spatial_score = score;\
586 diff0 = filp[-fils+j] - srcp[-srcs+j];\
587 diff1 = filp[+fils-j] - srcp[+srcs-j];
589 MCDEINT_CHECK(-1) MCDEINT_CHECK(-2) }} }}
590 MCDEINT_CHECK( 1) MCDEINT_CHECK( 2) }} }}
592 if(diff0 + diff1 > 0)
594 temp -= (diff0 + diff1 -
595 ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;
599 temp -= (diff0 + diff1 +
600 ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;
603 filp[0] = dst[i][x + y*w] =
604 temp > 255U ? ~(temp>>31) : temp;
609 pv->mcdeint_frame_dec->data[i][x + y*fils];
615 for( y = 0; y < h; y++ )
617 if( !((y ^ parity) & 1) )
619 for( x = 0; x < w; x++ )
621 pv->mcdeint_frame_dec->data[i][x + y*fils] =
622 dst[i][x + y*w]= src[i][x + y*srcs];
628 #ifdef SUPPRESS_AV_LOG
629 /* TODO: restore previous log level */
630 av_log_set_level(loglevel);
634 hb_filter_private_t * hb_decomb_init( int pix_fmt,
639 if( pix_fmt != PIX_FMT_YUV420P )
644 hb_filter_private_t * pv = calloc( 1, sizeof(struct hb_filter_private_s) );
646 pv->pix_fmt = pix_fmt;
648 pv->width[0] = width;
649 pv->height[0] = height;
650 pv->width[1] = pv->width[2] = width >> 1;
651 pv->height[1] = pv->height[2] = height >> 1;
653 int buf_size = 3 * width * height / 2;
654 pv->buf_out[0] = hb_buffer_init( buf_size );
655 pv->buf_out[1] = hb_buffer_init( buf_size );
656 pv->buf_settings = hb_buffer_init( 0 );
658 pv->deinterlaced_frames = 0;
659 pv->passed_frames = 0;
660 pv->color_equal = 10;
665 pv->prog_threshold = 9;
667 pv->combed_macroblocks = 0;
668 pv->uncombed_macroblocks = 0;
671 pv->mode = MODE_DEFAULT;
672 pv->parity = PARITY_DEFAULT;
674 pv->mcdeint_mode = MCDEINT_MODE_DEFAULT;
675 pv->mcdeint_qp = MCDEINT_QP_DEFAULT;
679 sscanf( settings, "%d:%d:%d:%d:%d:%d:%d",
686 &pv->prog_threshold );
689 if( pv->mode == 2 || pv->mode == 5 )
691 pv->mcdeint_mode = 0;
694 /* Allocate yadif specific buffers */
698 for( i = 0; i < 3; i++ )
701 int w = ((width + 31) & (~31))>>is_chroma;
702 int h = ((height+6+ 31) & (~31))>>is_chroma;
704 pv->ref_stride[i] = w;
706 for( j = 0; j < 3; j++ )
708 pv->ref[j][i] = malloc( w*h*sizeof(uint8_t) ) + 3*w;
713 /* Allocate mcdeint specific buffers */
714 if( pv->mcdeint_mode >= 0 )
717 avcodec_register_all();
719 AVCodec * enc = avcodec_find_encoder( CODEC_ID_SNOW );
722 for (i = 0; i < 3; i++ )
724 AVCodecContext * avctx_enc;
726 avctx_enc = pv->mcdeint_avctx_enc = avcodec_alloc_context();
728 avctx_enc->width = width;
729 avctx_enc->height = height;
730 avctx_enc->time_base = (AVRational){1,25}; // meaningless
731 avctx_enc->gop_size = 300;
732 avctx_enc->max_b_frames = 0;
733 avctx_enc->pix_fmt = PIX_FMT_YUV420P;
734 avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
735 avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
736 avctx_enc->global_quality = 1;
737 avctx_enc->flags2 = CODEC_FLAG2_MEMC_ONLY;
738 avctx_enc->me_cmp = FF_CMP_SAD; //SSE;
739 avctx_enc->me_sub_cmp = FF_CMP_SAD; //SSE;
740 avctx_enc->mb_cmp = FF_CMP_SSE;
742 switch( pv->mcdeint_mode )
747 avctx_enc->me_method = ME_UMH;
749 avctx_enc->flags |= CODEC_FLAG_4MV;
750 avctx_enc->dia_size =2;
752 avctx_enc->flags |= CODEC_FLAG_QPEL;
755 avcodec_open(avctx_enc, enc);
758 pv->mcdeint_frame = avcodec_alloc_frame();
759 pv->mcdeint_outbuf_size = width * height * 10;
760 pv->mcdeint_outbuf = malloc( pv->mcdeint_outbuf_size );
766 void hb_decomb_close( hb_filter_private_t * pv )
775 hb_log("decomb: deinterlaced %i | unfiltered %i | total %i", pv->deinterlaced_frames, pv->passed_frames, pv->deinterlaced_frames + pv->passed_frames);
779 hb_log("decomb macroblock: deinterlaced: %i | unfiltered %i | total %i", pv->combed_macroblocks, pv->uncombed_macroblocks, pv->combed_macroblocks + pv->uncombed_macroblocks);
782 /* Cleanup frame buffers */
785 hb_buffer_close( &pv->buf_out[0] );
789 hb_buffer_close( &pv->buf_out[1] );
791 if (pv->buf_settings )
793 hb_buffer_close( &pv->buf_settings );
796 /* Cleanup yadif specific buffers */
800 for( i = 0; i<3*3; i++ )
802 uint8_t **p = &pv->ref[i%3][i/3];
805 free( *p - 3*pv->ref_stride[i/3] );
811 /* Cleanup mcdeint specific buffers */
812 if( pv->mcdeint_mode >= 0 )
814 if( pv->mcdeint_avctx_enc )
816 avcodec_close( pv->mcdeint_avctx_enc );
817 av_freep( &pv->mcdeint_avctx_enc );
819 if( pv->mcdeint_outbuf )
821 free( pv->mcdeint_outbuf );
828 int hb_decomb_work( const hb_buffer_t * cbuf_in,
829 hb_buffer_t ** buf_out,
833 hb_filter_private_t * pv )
835 hb_buffer_t * buf_in = (hb_buffer_t *)cbuf_in;
838 pix_fmt != pv->pix_fmt ||
839 width != pv->width[0] ||
840 height != pv->height[0] )
842 return FILTER_FAILED;
845 avpicture_fill( &pv->pic_in, buf_in->data,
846 pix_fmt, width, height );
848 /* Use libavcodec deinterlace if mode == 0 */
851 avpicture_fill( &pv->pic_out, pv->buf_out[0]->data,
852 pix_fmt, width, height );
854 /* Check for combing on the input frame */
855 int interlaced = hb_detect_comb(buf_in, width, height, pv->color_equal, pv->color_diff, pv->threshold, pv->prog_equal, pv->prog_diff, pv->prog_threshold);
859 avpicture_deinterlace( &pv->pic_out, &pv->pic_in,
860 pix_fmt, width, height );
862 pv->deinterlaced_frames++;
863 //hb_log("Frame %i is combed (Progressive: %s )", pv->deinterlaced_frames + pv->passed_frames, (buf_in->flags & 16) ? "Y" : "N");
865 hb_buffer_copy_settings( pv->buf_out[0], buf_in );
866 *buf_out = pv->buf_out[0];
870 /* No combing detected, pass input frame through unmolested.*/
874 hb_buffer_copy_settings( pv->buf_out[0], buf_in );
882 /* Determine if top-field first layout */
886 tff = !!(buf_in->flags & PIC_FLAG_TOP_FIELD_FIRST);
890 tff = (pv->parity & 1) ^ 1;
893 /* Store current frame in yadif cache */
894 store_ref( (const uint8_t**)pv->pic_in.data, pv );
898 /* Note down if the input frame is combed */
899 pv->comb = (pv->comb << 1) | hb_detect_comb(buf_in, width, height, pv->color_equal, pv->color_diff, pv->threshold, pv->prog_equal, pv->prog_diff, pv->prog_threshold);
902 /* If yadif is not ready, store another ref and return FILTER_DELAY */
903 if( pv->yadif_ready == 0 )
905 store_ref( (const uint8_t**)pv->pic_in.data, pv );
907 hb_buffer_copy_settings( pv->buf_settings, buf_in );
909 /* don't let 'work_loop' send a chapter mark upstream */
910 buf_in->new_chap = 0;
917 /* yadif works one frame behind so if the previous frame
918 * had combing, deinterlace it otherwise just output it. */
919 if( pv->mode == 7 ) // Experimental for macroblock decombing
921 /* Perform yadif filtering */
923 pv->deinterlaced_frames++;
925 for( frame = 0; frame <= ( ( pv->mode == 2 || pv->mode == 5 )? 1 : 0 ) ; frame++ )
927 int parity = frame ^ tff ^ 1;
929 avpicture_fill( &pv->pic_out, pv->buf_out[!(frame^1)]->data,
930 pix_fmt, width, height );
932 yadif_filter( pv->pic_out.data, parity, tff, pv );
934 if( pv->mcdeint_mode >= 0 )
936 /* Perform mcdeint filtering */
937 avpicture_fill( &pv->pic_in, pv->buf_out[(frame^1)]->data,
938 pix_fmt, width, height );
940 mcdeint_filter( pv->pic_in.data, pv->pic_out.data, parity, pv );
943 *buf_out = pv->buf_out[!(frame^1)];
946 else if( (pv->comb & 2 ) == 0 )
948 /* previous frame not interlaced - copy cached input frame to buf_out */
952 avpicture_fill( &pv->pic_out, pv->buf_out[0]->data, pix_fmt, width, height );
953 get_ref( (uint8_t**)pv->pic_out.data, pv, 1 );
954 *buf_out = pv->buf_out[0];
958 /* Perform yadif filtering */
960 pv->deinterlaced_frames++;
962 for( frame = 0; frame <= ( ( pv->mode == 2 || pv->mode == 5 )? 1 : 0 ) ; frame++ )
964 int parity = frame ^ tff ^ 1;
966 avpicture_fill( &pv->pic_out, pv->buf_out[!(frame^1)]->data,
967 pix_fmt, width, height );
969 yadif_filter( pv->pic_out.data, parity, tff, pv );
971 if( pv->mcdeint_mode >= 0 )
973 /* Perform mcdeint filtering */
974 avpicture_fill( &pv->pic_in, pv->buf_out[(frame^1)]->data,
975 pix_fmt, width, height );
977 mcdeint_filter( pv->pic_in.data, pv->pic_out.data, parity, pv );
980 *buf_out = pv->buf_out[!(frame^1)];
984 /* Copy buffered settings to output buffer settings */
985 hb_buffer_copy_settings( *buf_out, pv->buf_settings );
987 /* Replace buffered settings with input buffer settings */
988 hb_buffer_copy_settings( pv->buf_settings, buf_in );
990 /* don't let 'work_loop' send a chapter mark upstream */
991 buf_in->new_chap = 0;