It may be used under the terms of the GNU General Public License. */
#include <stdarg.h>
-#include <time.h>
+#include <time.h>
#include <sys/time.h>
#include "common.h"
case HB_MUX_IPOD:
case HB_MUX_MKV:
overhead = 6;
- break;
+ break;
case HB_MUX_AVI:
overhead = 24;
- break;
+ break;
case HB_MUX_OGM:
overhead = 6;
break;
abitrate = job->abitrate * 1000 / 8;
}
avail -= length * abitrate;
-
+
/* Audio overhead */
avail -= length * job->arate * overhead / samples_per_frame;
}
ret += buf->size - buf->cur;
}
- return ret;
+ return ret;
}
/**********************************************************************
int copied;
int copying;
int i;
-
+
for( i = 0, copied = 0; copied < size; i++ )
{
buf = hb_list_item( l, i );
copying = MIN( buf->size - buf->cur, size - copied );
memcpy( &dst[copied], &buf->data[buf->cur], copying );
copied += copying;
- }
+ }
}
/**********************************************************************
int copied;
int copying;
uint8_t has_pts;
-
+
/* So we won't have to deal with NULL pointers */
uint64_t dummy1, dummy2;
}
copied += copying;
- }
+ }
}
/**********************************************************************
/**********************************************************************
* hb_error
**********************************************************************
- * Using whatever output is available display this error.
+ * Using whatever output is available display this error.
*********************************************************************/
void hb_error( char * log, ... )
{
/**********************************************************************
* hb_title_init
**********************************************************************
- *
+ *
*********************************************************************/
hb_title_t * hb_title_init( char * dvd, int index )
{
/**********************************************************************
* hb_title_close
**********************************************************************
- *
+ *
*********************************************************************/
void hb_title_close( hb_title_t ** _t )
{
free( audio );
}
hb_list_close( &t->list_audio );
-
+
while( ( chapter = hb_list_item( t->list_chapter, 0 ) ) )
{
hb_list_rem( t->list_chapter, chapter );
free( chapter );
}
hb_list_close( &t->list_chapter );
-
+
while( ( subtitle = hb_list_item( t->list_subtitle, 0 ) ) )
{
hb_list_rem( t->list_subtitle, subtitle );
/**********************************************************************
* hb_filter_close
**********************************************************************
- *
+ *
*********************************************************************/
void hb_filter_close( hb_filter_object_t ** _f )
{
{
/* ID assigned by UI so it can groups job passes together */
int sequence_id;
-
+
/* Pointer to the title to be ripped */
hb_title_t * title;
-
+
/* Chapter selection */
int chapter_start;
int chapter_end;
deinterlace: 0 or 1
width: must be a multiple of 16
height: must be a multiple of 16
- keep_ratio: used by UIs
+ keep_ratio: used by UIs
pixel_ratio: store pixel aspect ratio in the video
pixel_aspect_width: numerator for pixel aspect ratio
pixel_aspect_height: denominator for pixel aspect ratio
vrate, vrate_base: output framerate is vrate / vrate_base
h264_level: boolean for whether or not we're encoding for iPod
crf: boolean for whether to use constant rate factor with x264
- x264opts: string of extra x264 options
+ x264opts: string of extra x264 options
areBframes: boolean to note if b-frames are included in x264opts */
#define HB_VCODEC_MASK 0x0000FF
#define HB_VCODEC_FFMPEG 0x000001
#define HB_MUX_OGM 0x080000
#define HB_MUX_IPOD 0x100000
#define HB_MUX_MKV 0x200000
-
+
int mux;
const char * file;
int codec;
int rate;
int bitrate;
-
+
/* ac3flags is only set when the source audio format is HB_ACODEC_AC3 */
int ac3flags;
/* Exact duration (in 1/90000s) */
uint64_t duration;
-
+
/* Optional chapter title */
char title[1024];
};
#ifdef __LIBHB__
hb_filter_private_t* (* init) ( int, int, int, char * );
-
+
int (* work) ( const hb_buffer_t *, hb_buffer_t **,
int, int, int, hb_filter_private_t * );
-
+
void (* close) ( hb_filter_private_t * );
-
+
hb_filter_private_t * private_data;
//hb_buffer_t * buffer;
#endif
/*
Copyright (C) 2005 Michael Niedermayer <michaelni@gmx.at>
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
{ 42, 26, 38, 22, 41, 25, 37, 21, },
};
-struct hb_filter_private_s
+struct hb_filter_private_s
{
int pix_fmt;
int width[3];
int height[3];
-
+
int pp7_qp;
int pp7_mode;
int pp7_mpeg2;
int pp7_temp_stride;
uint8_t * pp7_src;
-
+
AVPicture pic_in;
- AVPicture pic_out;
+ AVPicture pic_out;
hb_buffer_t * buf_out;
};
-hb_filter_private_t * hb_deblock_init( int pix_fmt,
- int width,
+hb_filter_private_t * hb_deblock_init( int pix_fmt,
+ int width,
int height,
char * settings );
int hb_deblock_work( const hb_buffer_t * buf_in,
hb_buffer_t ** buf_out,
int pix_fmt,
- int width,
+ int width,
int height,
hb_filter_private_t * pv );
void hb_deblock_close( hb_filter_private_t * pv );
hb_filter_object_t hb_filter_deblock =
-{
+{
FILTER_DEBLOCK,
"Deblock (pp7)",
NULL,
static inline void pp7_dct_a( DCTELEM * dst, uint8_t * src, int stride )
{
int i;
-
+
for( i = 0; i < 4; i++ )
{
int s0 = src[0*stride] + src[6*stride];
int s1 = src[1*stride] + src[5*stride];
int s2 = src[2*stride] + src[4*stride];
- int s3 = src[3*stride];
- int s = s3+s3;
-
+ int s3 = src[3*stride];
+ int s = s3+s3;
+
s3 = s - s0;
s0 = s + s0;
s = s2 + s1;
s2 = s2 - s1;
-
+
dst[0] = s0 + s;
dst[2] = s0 - s;
dst[1] = 2*s3 + s2;
dst[3] = s3 - s2*2;
-
+
src++;
dst += 4;
}
static void pp7_dct_b( DCTELEM * dst, DCTELEM * src )
{
int i;
-
+
for( i = 0; i < 4; i++ )
{
int s0 = src[0*4] + src[6*4];
int s1 = src[1*4] + src[5*4];
int s2 = src[2*4] + src[4*4];
- int s3 = src[3*4];
+ int s3 = src[3*4];
int s = s3+s3;
-
+
s3 = s - s0;
s0 = s + s0;
s = s2 + s1;
s2 = s2 - s1;
-
+
dst[0*4] = s0 + s;
dst[2*4] = s0 - s;
dst[1*4] = 2*s3 + s2;
dst[3*4] = s3 - s2*2;
-
+
src++;
dst++;
}
{
int qp, i;
int bias = 0;
-
+
for( qp = 0; qp < 99; qp++ )
{
for( i = 0; i < 16; i++ )
{
- pp7_threshold[qp][i] =
- ((i&1)?SN2:SN0) * ((i&4)?SN2:SN0) *
+ pp7_threshold[qp][i] =
+ ((i&1)?SN2:SN0) * ((i&4)?SN2:SN0) *
XMAX(1,qp) * (1<<2) - 1 - bias;
}
}
static int pp7_hard_threshold( DCTELEM * src, int qp )
{
- int i;
+ int i;
int a;
-
+
a = src[0] * pp7_factor[0];
for( i = 1; i < 16; i++ )
{
static int pp7_medium_threshold( DCTELEM * src, int qp )
{
- int i;
+ int i;
int a;
-
+
a = src[0] * pp7_factor[0];
for( i = 1; i < 16; i++ )
{
}
else
{
- if( level>0 )
+ if( level>0 )
{
a += 2*(level - (int)threshold1) * pp7_factor[i];
}
static int pp7_soft_threshold( DCTELEM * src, int qp )
{
- int i;
+ int i;
int a;
-
+
a = src[0] * pp7_factor[0];
for( i = 1; i < 16; i++ )
{
int level= src[i];
if( ((unsigned)(level+threshold1))>threshold2 )
{
- if( level>0 )
+ if( level>0 )
{
a += (level - (int)threshold1) * pp7_factor[i];
}
static int ( * pp7_requantize )( DCTELEM * src, int qp ) = pp7_hard_threshold;
-static void pp7_filter( hb_filter_private_t * pv,
- uint8_t * dst,
- uint8_t * src,
- int width,
- int height,
- uint8_t * qp_store,
- int qp_stride,
+static void pp7_filter( hb_filter_private_t * pv,
+ uint8_t * dst,
+ uint8_t * src,
+ int width,
+ int height,
+ uint8_t * qp_store,
+ int qp_stride,
int is_luma)
{
int x, y;
-
+
const int stride = is_luma ? pv->pp7_temp_stride : ((width+16+15)&(~15));
uint8_t * p_src = pv->pp7_src + 8*stride;
DCTELEM * block = (DCTELEM *)(pv->pp7_src);
DCTELEM * temp = (DCTELEM *)(pv->pp7_src + 32);
-
- if( !src || !dst )
+
+ if( !src || !dst )
{
return;
}
-
+
for( y = 0; y < height; y++ )
{
int index = 8 + 8*stride + y*stride;
memcpy( p_src + index, src + y*width, width );
-
+
for( x = 0; x < 8; x++ )
- {
+ {
p_src[index - x - 1] = p_src[index + x ];
p_src[index + width + x ] = p_src[index + width - x - 1];
}
}
-
+
for( y = 0; y < 8; y++ )
{
- memcpy( p_src + ( 7-y)*stride,
+ memcpy( p_src + ( 7-y)*stride,
p_src + ( y+8)*stride, stride );
- memcpy( p_src + (height+8+y)*stride,
+ memcpy( p_src + (height+8+y)*stride,
p_src + (height-y+7)*stride, stride );
}
-
+
for( y = 0; y < height; y++ )
{
for( x = -8; x < 0; x += 4 )
const int index = x + y*stride + (8-3)*(1+stride) + 8;
uint8_t * src = p_src + index;
DCTELEM * tp = temp+4*x;
-
+
pp7_dct_a( tp+4*8, src, stride );
- }
-
+ }
+
for( x = 0; x < width; )
{
const int qps = 3 + is_luma;
int end = XMIN(x+8, width);
-
+
int qp;
if( pv->pp7_qp )
{
}
else
{
- qp = qp_store[ (XMIN(x, width-1)>>qps) +
+ qp = qp_store[ (XMIN(x, width-1)>>qps) +
(XMIN(y, height-1)>>qps) * qp_stride ];
-
- if( pv->pp7_mpeg2 )
+
+ if( pv->pp7_mpeg2 )
{
qp >>= 1;
}
}
-
+
for( ; x < end; x++ )
{
const int index = x + y*stride + (8-3)*(1+stride) + 8;
uint8_t * src = p_src + index;
DCTELEM * tp = temp+4*x;
int v;
-
+
if( (x&3) == 0 )
{
pp7_dct_a( tp+4*8, src, stride );
}
-
+
pp7_dct_b( block, tp );
-
+
v = pp7_requantize( block, qp );
v = (v + pp7_dither[y&7][x&7]) >> 6;
if( (unsigned)v > 255 )
}
}
-hb_filter_private_t * hb_deblock_init( int pix_fmt,
- int width,
+hb_filter_private_t * hb_deblock_init( int pix_fmt,
+ int width,
int height,
char * settings )
{
{
return 0;
}
-
+
hb_filter_private_t * pv = malloc( sizeof(struct hb_filter_private_s) );
-
+
pv->pix_fmt = pix_fmt;
pv->width[0] = width;
pv->height[0] = height;
-
+
pv->width[1] = pv->width[2] = width >> 1;
pv->height[1] = pv->height[2] = height >> 1;
-
+
pv->pp7_qp = PP7_QP_DEFAULT;
pv->pp7_mode = PP7_MODE_DEFAULT;
{
sscanf( settings, "%d:%d", &pv->pp7_qp, &pv->pp7_mode );
}
-
+
if( pv->pp7_qp < 0 )
{
pv->pp7_qp = 0;
}
-
+
pp7_init_threshold();
-
+
switch( pv->pp7_mode )
{
case 0:
pp7_requantize = pp7_medium_threshold;
break;
}
-
+
int h = (height+16+15)&(~15);
-
+
pv->pp7_temp_stride = (width+16+15)&(~15);
pv->pp7_src = (uint8_t*)malloc( pv->pp7_temp_stride*(h+8)*sizeof(uint8_t) );
-
- int buf_size = 3 * width * height / 2;
+
+ int buf_size = 3 * width * height / 2;
pv->buf_out = hb_buffer_init( buf_size );
-
+
return pv;
}
void hb_deblock_close( hb_filter_private_t * pv )
{
- if( !pv )
+ if( !pv )
{
return;
- }
-
+ }
+
if( pv->buf_out )
{
hb_buffer_close( &pv->buf_out );
}
-
+
free( pv );
}
int hb_deblock_work( const hb_buffer_t * buf_in,
hb_buffer_t ** buf_out,
int pix_fmt,
- int width,
+ int width,
int height,
hb_filter_private_t * pv )
-{
- if( !pv ||
+{
+ if( !pv ||
pix_fmt != pv->pix_fmt ||
width != pv->width[0] ||
height != pv->height[0] )
{
return FILTER_FAILED;
}
-
- avpicture_fill( &pv->pic_in, buf_in->data,
+
+ avpicture_fill( &pv->pic_in, buf_in->data,
pix_fmt, width, height );
- avpicture_fill( &pv->pic_out, pv->buf_out->data,
+ avpicture_fill( &pv->pic_out, pv->buf_out->data,
pix_fmt, width, height );
if( /*TODO: mpi->qscale ||*/ pv->pp7_qp )
{
- pp7_filter( pv,
- pv->pic_out.data[0],
- pv->pic_in.data[0],
- pv->width[0],
+ pp7_filter( pv,
+ pv->pic_out.data[0],
+ pv->pic_in.data[0],
+ pv->width[0],
pv->height[0],
NULL, /* TODO: mpi->qscale*/
0, /* TODO: mpi->qstride*/
1 );
- pp7_filter( pv,
- pv->pic_out.data[1],
- pv->pic_in.data[1],
- pv->width[1],
+ pp7_filter( pv,
+ pv->pic_out.data[1],
+ pv->pic_in.data[1],
+ pv->width[1],
pv->height[1],
NULL, /* TODO: mpi->qscale*/
0, /* TODO: mpi->qstride*/
0 );
- pp7_filter( pv,
- pv->pic_out.data[2],
- pv->pic_in.data[2],
- pv->width[2],
+ pp7_filter( pv,
+ pv->pic_out.data[2],
+ pv->pic_in.data[2],
+ pv->width[2],
pv->height[2],
NULL, /* TODO: mpi->qscale*/
0, /* TODO: mpi->qstride*/
- 0 );
+ 0 );
}
else
{
memcpy( pv->buf_out->data, buf_in->data, buf_in->size );
- }
-
+ }
+
hb_buffer_copy_settings( pv->buf_out, buf_in );
-
+
*buf_out = pv->buf_out;
-
+
return FILTER_OK;
}
int bitrate;
float level;
float dynamic_range_compression;
-
+
int error;
int sync;
int size;
uint8_t frame[3840];
hb_list_t * list;
-
+
int out_discrete_channels;
-
+
};
int deca52Init( hb_work_object_t *, hb_job_t * );
* dynrng_call
***********************************************************************
* Boosts soft audio -- taken from gbooker's work in A52Decoder, comment and all..
- * Two cases
- * 1) The user requested a compression of 1 or less, return the typical power rule
- * 2) The user requested a compression of more than 1 (decompression):
- * If the stream's requested compression is less than 1.0 (loud sound), return the normal compression
+ * Two cases
+ * 1) The user requested a compression of 1 or less, return the typical power rule
+ * 2) The user requested a compression of more than 1 (decompression):
+ * If the stream's requested compression is less than 1.0 (loud sound), return the normal compression
* If the stream's requested compression is more than 1.0 (soft sound), use power rule (which will make
- * it louder in this case).
- *
+ * it louder in this case).
+ *
**********************************************************************/
static sample_t dynrng_call (sample_t c, void *data)
-{
+{
float *level = (float *)data;
float levelToUse = (float)*level;
if(c > 1.0 || levelToUse <= 1.0)
else
return c;
}
-
+
/***********************************************************************
* hb_work_deca52_init
***********************************************************************
/* Decide what format we want out of a52dec
work.c has already done some of this deduction for us in do_job() */
-
+
pv->flags_out = HB_AMIXDOWN_GET_A52_FORMAT(w->amixdown);
/* pass the number of channels used into the private work data */
pv->level = 32768.0;
pv->dynamic_range_compression = job->dynamic_range_compression;
-
+
pv->next_expected_pts = 0;
pv->sequence = 0;
-
+
return 0;
}
/* If we got more than a frame, chain raw buffers */
*buf_out = buf = Decode( w );
while( buf )
- {
+ {
buf->sequence = pv->sequence;
buf->next = Decode( w );
buf = buf->next;
/***********************************************************************
* Decode
***********************************************************************
- *
+ *
**********************************************************************/
static hb_buffer_t * Decode( hb_work_object_t * w )
{
if ( pv->dynamic_range_compression > 1.0 )
{
- a52_dynrng( pv->state, dynrng_call, &pv->dynamic_range_compression);
+ a52_dynrng( pv->state, dynrng_call, &pv->dynamic_range_compression);
}
-
+
/* 6 blocks per frame, 256 samples per block, channelsused channels */
buf = hb_buffer_init( 6 * 256 * pv->out_discrete_channels * sizeof( float ) );
if (pts == -1)
/*
* To track AC3 PTS add this back in again.
- *hb_log("AC3: pts is %lld, buf->start %lld buf->stop %lld", pts, buf->start, buf->stop);
+ *hb_log("AC3: pts is %lld, buf->start %lld buf->stop %lld", pts, buf->start, buf->stop);
*/
-
+
pv->next_expected_pts = buf->stop;
-
+
for( i = 0; i < 6; i++ )
{
sample_t * samples_in;
int decavcodecInit( hb_work_object_t * w, hb_job_t * job )
{
AVCodec * codec;
-
+
hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->job = job;
-
+
codec = avcodec_find_decoder( CODEC_ID_MP2 );
pv->parser = av_parser_init(CODEC_ID_MP2);
-
+
pv->context = avcodec_alloc_context();
avcodec_open( pv->context, codec );
pv->pts_last = -1;
uint64_t cur;
unsigned char *parser_output_buffer;
int parser_output_buffer_len;
-
+
*buf_out = NULL;
if( in->start < 0 ||
while( pos < in->size )
{
len = av_parser_parse(pv->parser, pv->context,&parser_output_buffer,&parser_output_buffer_len,in->data + pos,in->size - pos,cur,cur);
-
+
out_size = 0;
uncompressed_len = 0;
if (parser_output_buffer_len)
break;
/* We should handle other formats here - but that needs additional format conversion work below */
/* For now we'll just report the error and try to carry on */
- default:
+ default:
hb_log("decavcodecWork - Unknown Sample Format from avcodec_decode_audio (%d) !", pv->context->sample_fmt);
break;
}
-
+
buf->start = cur;
buf->stop = cur + 90000 * ( out_size / (sample_size_in_bytes * pv->context->channels) ) /
pv->context->sample_rate;
int bitrate;
int frame_length;
float level;
-
+
int error;
int sync;
int size;
uint8_t frame[18726];
hb_list_t * list;
-
+
int out_discrete_channels;
-
+
};
int decdcaInit( hb_work_object_t *, hb_job_t * );
/* Decide what format we want out of libdca
work.c has already done some of this deduction for us in do_job() */
-
+
pv->flags_out = HB_AMIXDOWN_GET_DCA_FORMAT(w->amixdown);
/* pass the number of channels used into the private work data */
/***********************************************************************
* Decode
***********************************************************************
- *
+ *
**********************************************************************/
static hb_buffer_t * Decode( hb_work_object_t * w )
{
void declpcmClose( hb_work_object_t * );
hb_work_object_t hb_declpcm =
-{
+{
WORK_DECLPCM,
"LPCM decoder",
declpcmInit,
/**********************************************************************
* hb_libmpeg2_init
**********************************************************************
- *
+ *
*********************************************************************/
hb_libmpeg2_t * hb_libmpeg2_init()
{
hb_libmpeg2_t * m = calloc( sizeof( hb_libmpeg2_t ), 1 );
-
+
m->libmpeg2 = mpeg2_init();
m->info = mpeg2_info( m->libmpeg2 );
m->last_pts = -1;
/**********************************************************************
* hb_libmpeg2_decode
**********************************************************************
- *
+ *
*********************************************************************/
int hb_libmpeg2_decode( hb_libmpeg2_t * m, hb_buffer_t * buf_es,
hb_list_t * list_raw )
if ( m->aspect_ratio <= 0 )
{
// We can parse out the aspect ratio from the Sequence Start Header data in buf_es->data
-
+
// Make sure we have the correct data in the buffer
if ((buf_es->data[0] == 0x00) && (buf_es->data[1] == 0x00) && (buf_es->data[2] == 0x01) && (buf_es->data[3] == 0xb3))
{
hb_log("hb_libmpeg2_decode - STATE_SEQUENCE unexpected aspect ratio/frame rate 0x%x\n", ar_fr);
break;
}
- }
+ }
}
}
else if( state == STATE_GOP && m->look_for_break == 2)
PIC_MASK_CODING_TYPE ) == PIC_FLAG_CODING_TYPE_I )
{
m->got_iframe = 1;
-
+
// If we are looking for a break, insert the chapter break on an I-Frame
if( m->look_for_break == 1 )
{
m->last_pts = buf->start;
flag = m->info->display_picture->flags;
-
+
/* Uncomment this block to see frame-by-frame picture flags, as the video encodes.
hb_log("***** MPEG 2 Picture Info for PTS %lld *****", buf->start);
if( flag & TOP_FIRST )
if( flag & COMPOSITE_MASK )
hb_log("MPEG2 Flag: Composite mask");
hb_log("fields: %d", m->info->display_picture->nb_fields);
-*/
+*/
/* Rotate the cadence tracking. */
int i = 0;
for(i=11; i > 0; i--)
{
cadence[i] = cadence[i-1];
}
-
+
if ( !(flag & PROGRESSIVE) && !(flag & TOP_FIRST) )
{
/* Not progressive, not top first...
//hb_log("MPEG2 Flag: Progressive repeat. Top field first, 3 fields displayed.");
cadence[0] = TBT_PROG;
}
-
+
if ( (cadence[2] <= TB) && (cadence[1] <= TB) && (cadence[0] > TB) && (cadence[11]) )
hb_log("%fs: Video -> Film", (float)buf->start / 90000);
if ( (cadence[2] > TB) && (cadence[1] <= TB) && (cadence[0] <= TB) && (cadence[11]) )
/* Store picture flags for later use by filters */
buf->flags = m->info->display_picture->flags;
-
+
hb_list_add( list_raw, buf );
}
}
/**********************************************************************
* hb_libmpeg2_info
**********************************************************************
- *
+ *
*********************************************************************/
void hb_libmpeg2_info( hb_libmpeg2_t * m, int * width, int * height,
int * rate, int *aspect_ratio )
/**********************************************************************
* hb_libmpeg2_close
**********************************************************************
- *
+ *
*********************************************************************/
void hb_libmpeg2_close( hb_libmpeg2_t ** _m )
{
/**********************************************************************
* The decmpeg2 work object
**********************************************************************
- *
+ *
*********************************************************************/
struct hb_work_private_s
{
/**********************************************************************
* hb_work_decmpeg2_init
**********************************************************************
- *
+ *
*********************************************************************/
int decmpeg2Init( hb_work_object_t * w, hb_job_t * job )
{
hb_work_private_t * pv;
-
+
pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
-
+
pv->libmpeg2 = hb_libmpeg2_init();
pv->list = hb_list_init();
/**********************************************************************
* Work
**********************************************************************
- *
+ *
*********************************************************************/
int decmpeg2Work( hb_work_object_t * w, hb_buffer_t ** buf_in,
hb_buffer_t ** buf_out )
/**********************************************************************
* Close
**********************************************************************
- *
+ *
*********************************************************************/
void decmpeg2Close( hb_work_object_t * w )
{
int decsubInit( hb_work_object_t * w, hb_job_t * job )
{
hb_work_private_t * pv;
-
+
pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
pv->pts_start = 0;
pv->pts_stop = 0;
pv->pts_forced = 0;
-
+
pv->alpha[3] = 0;
pv->alpha[2] = 0;
pv->alpha[1] = 0;
pv->alpha[0] = 0;
-
+
for( i = pv->size_rle; ; )
{
date = ( pv->buf[i] << 8 ) | pv->buf[i+1]; i += 2;
*/
if( job->indepth_scan )
{
- for( n=0; n < hb_list_count(title->list_subtitle); n++ )
+ for( n=0; n < hb_list_count(title->list_subtitle); n++ )
{
subtitle = hb_list_item( title->list_subtitle, n);
if( pv->stream_id == subtitle->id ) {
break;
case 0x03: // 0x03 - SET_COLOR - Set Colour indices
- {
- /*
+ {
+ /*
* SET_COLOR - provides four indices into the CLUT
* for the current PGC to associate with the four
* pixel values
3-j,
pv->lum[3-j],
pv->chromaU[3-j],
- pv->chromaV[3-j]);
+ pv->chromaV[3-j]);
*/
- }
+ }
i += 2;
break;
}
case 0x04: // 0x04 - SET_CONTR - Set Contrast
{
- /*
+ /*
* SET_CONTR - directly provides the four contrast
* (alpha blend) values to associate with the four
* pixel values
*/
uint8_t alpha[4];
-
+
alpha[3] = (pv->buf[i+0]>>4)&0x0f;
alpha[2] = (pv->buf[i+0])&0x0f;
alpha[1] = (pv->buf[i+1]>>4)&0x0f;
alpha[0] = (pv->buf[i+1])&0x0f;
-
-
+
+
int lastAlpha = pv->alpha[3] + pv->alpha[2] + pv->alpha[1] + pv->alpha[0];
int currAlpha = alpha[3] + alpha[2] + alpha[1] + alpha[0];
-
+
// fading-in, save the highest alpha value
- if( currAlpha > lastAlpha )
+ if( currAlpha > lastAlpha )
{
pv->alpha[3] = alpha[3];
pv->alpha[2] = alpha[2];
pv->alpha[1] = alpha[1];
pv->alpha[0] = alpha[0];
}
-
+
// fading-out
- if( currAlpha < lastAlpha && !pv->pts_stop )
+ if( currAlpha < lastAlpha && !pv->pts_stop )
{
pv->pts_stop = pv->pts + date * 900;
}
-
+
i += 2;
break;
}
}
}
}
-
+
if( i > next )
#define GET_NEXT_NIBBLE code = ( code << 4 ) | ( ( ( *offset & 1 ) ? \
( pv->buf[((*offset)>>1)] & 0xF ) : ( pv->buf[((*offset)>>1)] >> 4 ) ) ); \
(*offset)++
-
+
offsets[0] = pv->offsets[0] * 2;
offsets[1] = pv->offsets[1] * 2;
/*\r
Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>\r
- \r
+\r
This program is free software; you can redistribute it and/or modify\r
it under the terms of the GNU General Public License as published by\r
the Free Software Foundation; either version 2 of the License, or\r
(at your option) any later version.\r
- \r
+\r
This program is distributed in the hope that it will be useful,\r
but WITHOUT ANY WARRANTY; without even the implied warranty of\r
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\r
GNU General Public License for more details.\r
- \r
+\r
You should have received a copy of the GNU General Public License\r
along with this program; if not, write to the Free Software\r
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\r
#define MIN3(a,b,c) MIN(MIN(a,b),c)\r
#define MAX3(a,b,c) MAX(MAX(a,b),c)\r
\r
-struct hb_filter_private_s \r
+struct hb_filter_private_s\r
{\r
int pix_fmt;\r
int width[3];\r
int yadif_mode;\r
int yadif_parity;\r
int yadif_ready;\r
- \r
- uint8_t * yadif_ref[4][3]; \r
+\r
+ uint8_t * yadif_ref[4][3];\r
int yadif_ref_stride[3];\r
- \r
+\r
int mcdeint_mode;\r
int mcdeint_qp;\r
- \r
+\r
int mcdeint_outbuf_size;\r
uint8_t * mcdeint_outbuf;\r
AVCodecContext * mcdeint_avctx_enc;\r
AVFrame * mcdeint_frame;\r
AVFrame * mcdeint_frame_dec;\r
- \r
+\r
AVPicture pic_in;\r
- AVPicture pic_out; \r
+ AVPicture pic_out;\r
hb_buffer_t * buf_out[2];\r
hb_buffer_t * buf_settings;\r
};\r
\r
-hb_filter_private_t * hb_deinterlace_init( int pix_fmt, \r
- int width, \r
+hb_filter_private_t * hb_deinterlace_init( int pix_fmt,\r
+ int width,\r
int height,\r
char * settings );\r
\r
int hb_deinterlace_work( hb_buffer_t * buf_in,\r
hb_buffer_t ** buf_out,\r
int pix_fmt,\r
- int width, \r
+ int width,\r
int height,\r
hb_filter_private_t * pv );\r
\r
void hb_deinterlace_close( hb_filter_private_t * pv );\r
\r
hb_filter_object_t hb_filter_deinterlace =\r
-{ \r
+{\r
FILTER_DEINTERLACE,\r
"Deinterlace (ffmpeg or yadif/mcdeint)",\r
NULL,\r
hb_deinterlace_close,\r
};\r
\r
-static void yadif_store_ref( const uint8_t ** pic, \r
+static void yadif_store_ref( const uint8_t ** pic,\r
hb_filter_private_t * pv )\r
{\r
- memcpy( pv->yadif_ref[3], \r
- pv->yadif_ref[0], \r
+ memcpy( pv->yadif_ref[3],\r
+ pv->yadif_ref[0],\r
sizeof(uint8_t *)*3 );\r
- \r
- memmove( pv->yadif_ref[0], \r
- pv->yadif_ref[1], \r
- sizeof(uint8_t *)*3*3 ); \r
- \r
+\r
+ memmove( pv->yadif_ref[0],\r
+ pv->yadif_ref[1],\r
+ sizeof(uint8_t *)*3*3 );\r
+\r
int i;\r
for( i = 0; i < 3; i++ )\r
{\r
const uint8_t * src = pic[i];\r
uint8_t * ref = pv->yadif_ref[2][i];\r
- \r
+\r
int w = pv->width[i];\r
int h = pv->height[i];\r
int ref_stride = pv->yadif_ref_stride[i];\r
- \r
+\r
int y;\r
for( y = 0; y < pv->height[i]; y++ )\r
{\r
\r
static void yadif_filter_line( uint8_t *dst,\r
uint8_t *prev,\r
- uint8_t *cur, \r
+ uint8_t *cur,\r
uint8_t *next,\r
int plane,\r
int parity,\r
{\r
uint8_t *prev2 = parity ? prev : cur ;\r
uint8_t *next2 = parity ? cur : next;\r
- \r
+\r
int w = pv->width[plane];\r
int refs = pv->yadif_ref_stride[plane];\r
- \r
+\r
int x;\r
for( x = 0; x < w; x++)\r
{\r
if( score < spatial_score ){\\r
spatial_score = score;\\r
spatial_pred = (cur[-refs +j] + cur[+refs -j])>>1;\\r
- \r
+\r
YADIF_CHECK(-1) YADIF_CHECK(-2) }} }}\r
YADIF_CHECK( 1) YADIF_CHECK( 2) }} }}\r
\r
{\r
int b = (prev2[-2*refs] + next2[-2*refs])>>1;\r
int f = (prev2[+2*refs] + next2[+2*refs])>>1;\r
- \r
+\r
int max = MAX3(d-e, d-c, MIN(b-c, f-e));\r
int min = MIN3(d-e, d-c, MAX(b-c, f-e));\r
\r
{\r
int i;\r
for( i = 0; i < 3; i++ )\r
- { \r
+ {\r
int w = pv->width[i];\r
int h = pv->height[i];\r
int ref_stride = pv->yadif_ref_stride[i];\r
- \r
+\r
int y;\r
for( y = 0; y < h; y++ )\r
{\r
uint8_t *cur = &pv->yadif_ref[1][i][y*ref_stride];\r
uint8_t *next = &pv->yadif_ref[2][i][y*ref_stride];\r
uint8_t *dst2 = &dst[i][y*w];\r
- \r
+\r
yadif_filter_line( dst2, prev, cur, next, i, parity ^ tff, pv );\r
}\r
else\r
{\r
- memcpy( &dst[i][y*w], \r
- &pv->yadif_ref[1][i][y*ref_stride], \r
+ memcpy( &dst[i][y*w],\r
+ &pv->yadif_ref[1][i][y*ref_stride],\r
w * sizeof(uint8_t) );\r
}\r
}\r
}\r
}\r
\r
-static void mcdeint_filter( uint8_t ** dst, \r
+static void mcdeint_filter( uint8_t ** dst,\r
uint8_t ** src,\r
int parity,\r
hb_filter_private_t * pv )\r
{\r
int x, y, i;\r
- int out_size; \r
- \r
+ int out_size;\r
+\r
#ifdef SUPPRESS_AV_LOG\r
/* TODO: temporarily change log level to suppress obnoxious debug output */\r
int loglevel = av_log_get_level();\r
av_log_set_level( AV_LOG_QUIET );\r
#endif\r
- \r
+\r
for( i=0; i<3; i++ )\r
{\r
pv->mcdeint_frame->data[i] = src[i];\r
pv->mcdeint_frame->linesize[i] = pv->width[i];\r
- } \r
+ }\r
pv->mcdeint_avctx_enc->me_cmp = FF_CMP_SAD;\r
pv->mcdeint_avctx_enc->me_sub_cmp = FF_CMP_SAD;\r
pv->mcdeint_frame->quality = pv->mcdeint_qp * FF_QP2LAMBDA;\r
- \r
- out_size = avcodec_encode_video( pv->mcdeint_avctx_enc, \r
- pv->mcdeint_outbuf, \r
- pv->mcdeint_outbuf_size, \r
+\r
+ out_size = avcodec_encode_video( pv->mcdeint_avctx_enc,\r
+ pv->mcdeint_outbuf,\r
+ pv->mcdeint_outbuf_size,\r
pv->mcdeint_frame );\r
- \r
+\r
pv->mcdeint_frame_dec = pv->mcdeint_avctx_enc->coded_frame;\r
- \r
+\r
for( i = 0; i < 3; i++ )\r
{\r
int w = pv->width[i];\r
int h = pv->height[i];\r
int fils = pv->mcdeint_frame_dec->linesize[i];\r
int srcs = pv->width[i];\r
- \r
+\r
for( y = 0; y < h; y++ )\r
{\r
if( (y ^ parity) & 1 )\r
for( x = 0; x < w; x++ )\r
{\r
if( (x-2)+(y-1)*w >= 0 && (x+2)+(y+1)*w < w*h )\r
- { \r
- uint8_t * filp = \r
+ {\r
+ uint8_t * filp =\r
&pv->mcdeint_frame_dec->data[i][x + y*fils];\r
uint8_t * srcp = &src[i][x + y*srcs];\r
\r
int diff0 = filp[-fils] - srcp[-srcs];\r
int diff1 = filp[+fils] - srcp[+srcs];\r
- \r
- int spatial_score = \r
+\r
+ int spatial_score =\r
ABS(srcp[-srcs-1] - srcp[+srcs-1])\r
+ ABS(srcp[-srcs ] - srcp[+srcs ])\r
+ ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;\r
- \r
+\r
int temp = filp[0];\r
- \r
+\r
#define MCDEINT_CHECK(j)\\r
{ int score = ABS(srcp[-srcs-1+j] - srcp[+srcs-1-j])\\r
+ ABS(srcp[-srcs +j] - srcp[+srcs -j])\\r
spatial_score = score;\\r
diff0 = filp[-fils+j] - srcp[-srcs+j];\\r
diff1 = filp[+fils-j] - srcp[+srcs-j];\r
- \r
+\r
MCDEINT_CHECK(-1) MCDEINT_CHECK(-2) }} }}\r
MCDEINT_CHECK( 1) MCDEINT_CHECK( 2) }} }}\r
\r
if(diff0 + diff1 > 0)\r
{\r
- temp -= (diff0 + diff1 - \r
+ temp -= (diff0 + diff1 -\r
ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;\r
}\r
else\r
{\r
- temp -= (diff0 + diff1 + \r
+ temp -= (diff0 + diff1 +\r
ABS( ABS(diff0) - ABS(diff1) ) / 2) / 2;\r
}\r
\r
- filp[0] = dst[i][x + y*w] = \r
+ filp[0] = dst[i][x + y*w] =\r
temp > 255U ? ~(temp>>31) : temp;\r
}\r
else\r
{\r
- dst[i][x + y*w] = \r
+ dst[i][x + y*w] =\r
pv->mcdeint_frame_dec->data[i][x + y*fils];\r
}\r
}\r
#endif\r
}\r
\r
-hb_filter_private_t * hb_deinterlace_init( int pix_fmt, \r
- int width, \r
+hb_filter_private_t * hb_deinterlace_init( int pix_fmt,\r
+ int width,\r
int height,\r
char * settings )\r
{\r
{\r
return 0;\r
}\r
- \r
+\r
hb_filter_private_t * pv = calloc( 1, sizeof(struct hb_filter_private_s) );\r
- \r
+\r
pv->pix_fmt = pix_fmt;\r
\r
pv->width[0] = width;\r
- pv->height[0] = height; \r
+ pv->height[0] = height;\r
pv->width[1] = pv->width[2] = width >> 1;\r
- pv->height[1] = pv->height[2] = height >> 1; \r
- \r
- int buf_size = 3 * width * height / 2; \r
+ pv->height[1] = pv->height[2] = height >> 1;\r
+\r
+ int buf_size = 3 * width * height / 2;\r
pv->buf_out[0] = hb_buffer_init( buf_size );\r
pv->buf_out[1] = hb_buffer_init( buf_size );\r
pv->buf_settings = hb_buffer_init( 0 );\r
- \r
+\r
pv->yadif_ready = 0;\r
pv->yadif_mode = YADIF_MODE_DEFAULT;\r
- pv->yadif_parity = YADIF_PARITY_DEFAULT; \r
- \r
+ pv->yadif_parity = YADIF_PARITY_DEFAULT;\r
+\r
pv->mcdeint_mode = MCDEINT_MODE_DEFAULT;\r
pv->mcdeint_qp = MCDEINT_QP_DEFAULT;\r
- \r
+\r
if( settings )\r
{\r
- sscanf( settings, "%d:%d:%d:%d", \r
- &pv->yadif_mode, \r
+ sscanf( settings, "%d:%d:%d:%d",\r
+ &pv->yadif_mode,\r
&pv->yadif_parity,\r
&pv->mcdeint_mode,\r
&pv->mcdeint_qp );\r
}\r
- \r
+\r
/* Allocate yadif specific buffers */\r
if( pv->yadif_mode >= 0 )\r
{\r
int i, j;\r
for( i = 0; i < 3; i++ )\r
- { \r
+ {\r
int is_chroma = !!i;\r
int w = ((width + 31) & (~31))>>is_chroma;\r
int h = ((height+6+ 31) & (~31))>>is_chroma;\r
- \r
+\r
pv->yadif_ref_stride[i] = w;\r
- \r
+\r
for( j = 0; j < 3; j++ )\r
{\r
pv->yadif_ref[j][i] = malloc( w*h*sizeof(uint8_t) ) + 3*w;\r
- } \r
+ }\r
}\r
}\r
- \r
+\r
/* Allocate mcdeint specific buffers */\r
if( pv->mcdeint_mode >= 0 )\r
{\r
avcodec_init();\r
avcodec_register_all();\r
- \r
+\r
AVCodec * enc = avcodec_find_encoder( CODEC_ID_SNOW );\r
- \r
+\r
int i;\r
for (i = 0; i < 3; i++ )\r
{\r
AVCodecContext * avctx_enc;\r
- \r
+\r
avctx_enc = pv->mcdeint_avctx_enc = avcodec_alloc_context();\r
- \r
+\r
avctx_enc->width = width;\r
avctx_enc->height = height;\r
avctx_enc->time_base = (AVRational){1,25}; // meaningless\r
avctx_enc->me_cmp = FF_CMP_SAD; //SSE;\r
avctx_enc->me_sub_cmp = FF_CMP_SAD; //SSE;\r
avctx_enc->mb_cmp = FF_CMP_SSE;\r
- \r
+\r
switch( pv->mcdeint_mode )\r
{\r
case 3:\r
case 0:\r
avctx_enc->flags |= CODEC_FLAG_QPEL;\r
}\r
- \r
- avcodec_open(avctx_enc, enc); \r
+\r
+ avcodec_open(avctx_enc, enc);\r
}\r
- \r
+\r
pv->mcdeint_frame = avcodec_alloc_frame();\r
pv->mcdeint_outbuf_size = width * height * 10;\r
pv->mcdeint_outbuf = malloc( pv->mcdeint_outbuf_size );\r
\r
void hb_deinterlace_close( hb_filter_private_t * pv )\r
{\r
- if( !pv ) \r
+ if( !pv )\r
{\r
return;\r
}\r
- \r
+\r
/* Cleanup frame buffers */\r
if( pv->buf_out[0] )\r
{\r
hb_buffer_close( &pv->buf_out[0] );\r
- } \r
+ }\r
if( pv->buf_out[1] )\r
{\r
hb_buffer_close( &pv->buf_out[1] );\r
for( i = 0; i<3*3; i++ )\r
{\r
uint8_t **p = &pv->yadif_ref[i%3][i/3];\r
- if (*p) \r
+ if (*p)\r
{\r
free( *p - 3*pv->yadif_ref_stride[i/3] );\r
*p = NULL;\r
}\r
}\r
}\r
- \r
+\r
/* Cleanup mcdeint specific buffers */\r
if( pv->mcdeint_mode >= 0 )\r
{\r
{\r
avcodec_close( pv->mcdeint_avctx_enc );\r
av_freep( &pv->mcdeint_avctx_enc );\r
- } \r
+ }\r
if( pv->mcdeint_outbuf )\r
{\r
free( pv->mcdeint_outbuf );\r
- } \r
- } \r
- \r
+ }\r
+ }\r
+\r
free( pv );\r
}\r
\r
int hb_deinterlace_work( hb_buffer_t * buf_in,\r
hb_buffer_t ** buf_out,\r
int pix_fmt,\r
- int width, \r
+ int width,\r
int height,\r
hb_filter_private_t * pv )\r
{\r
- if( !pv || \r
+ if( !pv ||\r
pix_fmt != pv->pix_fmt ||\r
width != pv->width[0] ||\r
height != pv->height[0] )\r
{\r
return FILTER_FAILED;\r
}\r
- \r
- avpicture_fill( &pv->pic_in, buf_in->data, \r
+\r
+ avpicture_fill( &pv->pic_in, buf_in->data,\r
pix_fmt, width, height );\r
\r
/* Use libavcodec deinterlace if yadif_mode < 0 */\r
if( pv->yadif_mode < 0 )\r
- { \r
- avpicture_fill( &pv->pic_out, pv->buf_out[0]->data, \r
+ {\r
+ avpicture_fill( &pv->pic_out, pv->buf_out[0]->data,\r
pix_fmt, width, height );\r
- \r
- avpicture_deinterlace( &pv->pic_out, &pv->pic_in, \r
+\r
+ avpicture_deinterlace( &pv->pic_out, &pv->pic_in,\r
pix_fmt, width, height );\r
- \r
+\r
hb_buffer_copy_settings( pv->buf_out[0], buf_in );\r
\r
*buf_out = pv->buf_out[0];\r
- \r
+\r
return FILTER_OK;\r
}\r
- \r
+\r
/* Determine if top-field first layout */\r
int tff;\r
if( pv->yadif_parity < 0 )\r
{\r
tff = (pv->yadif_parity & 1) ^ 1;\r
}\r
- \r
+\r
/* Store current frame in yadif cache */\r
yadif_store_ref( (const uint8_t**)pv->pic_in.data, pv );\r
- \r
+\r
/* If yadif is not ready, store another ref and return FILTER_DELAY */\r
if( pv->yadif_ready == 0 )\r
{\r
yadif_store_ref( (const uint8_t**)pv->pic_in.data, pv );\r
- \r
+\r
hb_buffer_copy_settings( pv->buf_settings, buf_in );\r
-
- /* don't let 'work_loop' send a chapter mark upstream */
- buf_in->new_chap = 0;
+\r
+ /* don't let 'work_loop' send a chapter mark upstream */\r
+ buf_in->new_chap = 0;\r
\r
pv->yadif_ready = 1;\r
- \r
+\r
return FILTER_DELAY;\r
}\r
\r
for( frame = 0; frame <= (pv->yadif_mode & 1); frame++ )\r
{\r
int parity = frame ^ tff ^ 1;\r
- \r
- avpicture_fill( &pv->pic_out, pv->buf_out[!(frame^1)]->data, \r
+\r
+ avpicture_fill( &pv->pic_out, pv->buf_out[!(frame^1)]->data,\r
pix_fmt, width, height );\r
- \r
+\r
yadif_filter( pv->pic_out.data, parity, tff, pv );\r
\r
if( pv->mcdeint_mode >= 0 )\r
{\r
- avpicture_fill( &pv->pic_in, pv->buf_out[(frame^1)]->data, \r
+ avpicture_fill( &pv->pic_in, pv->buf_out[(frame^1)]->data,\r
pix_fmt, width, height );\r
- \r
+\r
mcdeint_filter( pv->pic_in.data, pv->pic_out.data, parity, pv );\r
- \r
+\r
*buf_out = pv->buf_out[ (frame^1)];\r
}\r
else\r
*buf_out = pv->buf_out[!(frame^1)];\r
}\r
}\r
- \r
+\r
/* Copy buffered settings to output buffer settings */\r
hb_buffer_copy_settings( *buf_out, pv->buf_settings );\r
- \r
+\r
/* Replace buffered settings with input buffer settings */\r
- hb_buffer_copy_settings( pv->buf_settings, buf_in ); \r
-
- /* don't let 'work_loop' send a chapter mark upstream */
- buf_in->new_chap = 0;
+ hb_buffer_copy_settings( pv->buf_settings, buf_in );\r
+\r
+ /* don't let 'work_loop' send a chapter mark upstream */\r
+ buf_in->new_chap = 0;\r
\r
return FILTER_OK;\r
}\r
/*
Copyright (C) 2003 Daniel Moreno <comac@comac.darktech.org>
-
+
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
-
+
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
-
+
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#define ABS(A) ( (A) > 0 ? (A) : -(A) )
-struct hb_filter_private_s
+struct hb_filter_private_s
{
int pix_fmt;
int width[3];
- int height[3];
-
+ int height[3];
+
int hqdn3d_coef[4][512*16];
unsigned int * hqdn3d_line;
unsigned short * hqdn3d_frame[3];
-
+
AVPicture pic_in;
- AVPicture pic_out;
+ AVPicture pic_out;
hb_buffer_t * buf_out;
};
-hb_filter_private_t * hb_denoise_init( int pix_fmt,
- int width,
+hb_filter_private_t * hb_denoise_init( int pix_fmt,
+ int width,
int height,
char * settings );
int hb_denoise_work( const hb_buffer_t * buf_in,
hb_buffer_t ** buf_out,
int pix_fmt,
- int width,
+ int width,
int height,
hb_filter_private_t * pv );
void hb_denoise_close( hb_filter_private_t * pv );
hb_filter_object_t hb_filter_denoise =
-{
+{
FILTER_DENOISE,
"Denoise (hqdn3d)",
NULL,
hb_denoise_close,
};
-static void hqdn3d_precalc_coef( int * ct,
+static void hqdn3d_precalc_coef( int * ct,
double dist25 )
{
int i;
double gamma, simil, c;
-
+
gamma = log( 0.25 ) / log( 1.0 - dist25/255.0 - 0.00001 );
-
+
for( i = -255*16; i <= 255*16; i++ )
{
simil = 1.0 - ABS(i) / (16*255.0);
c = pow( simil, gamma ) * 65536.0 * (double)i / 16.0;
ct[16*256+i] = (c<0) ? (c-0.5) : (c+0.5);
}
-
+
ct[0] = (dist25 != 0);
}
-static inline unsigned int hqdn3d_lowpass_mul( unsigned int prev_mul,
- unsigned int curr_mul,
+static inline unsigned int hqdn3d_lowpass_mul( unsigned int prev_mul,
+ unsigned int curr_mul,
int * coef )
{
int diff_mul = prev_mul - curr_mul;
return curr_mul + coef[d];
}
-static void hqdn3d_denoise_temporal( unsigned char * frame_src,
+static void hqdn3d_denoise_temporal( unsigned char * frame_src,
unsigned char * frame_dst,
unsigned short * frame_ant,
- int w, int h,
+ int w, int h,
int * temporal)
{
int x, y;
unsigned int pixel_dst;
-
+
for( y = 0; y < h; y++ )
{
for( x = 0; x < w; x++ )
{
- pixel_dst = hqdn3d_lowpass_mul( frame_ant[x]<<8,
- frame_src[x]<<16,
+ pixel_dst = hqdn3d_lowpass_mul( frame_ant[x]<<8,
+ frame_src[x]<<16,
temporal );
-
+
frame_ant[x] = ((pixel_dst+0x1000007F)>>8);
frame_dst[x] = ((pixel_dst+0x10007FFF)>>16);
}
-
+
frame_src += w;
frame_dst += w;
frame_ant += w;
int line_offset_src = 0, line_offset_dst = 0;
unsigned int pixel_ant;
unsigned int pixel_dst;
-
+
/* First pixel has no left nor top neighbor. */
pixel_dst = line_ant[0] = pixel_ant = frame_src[0]<<16;
frame_dst[0] = ((pixel_dst+0x10007FFF)>>16);
-
+
/* First line has no top neighbor, only left. */
for( x = 1; x < w; x++ )
{
- pixel_dst = line_ant[x] = hqdn3d_lowpass_mul(pixel_ant,
- frame_src[x]<<16,
+ pixel_dst = line_ant[x] = hqdn3d_lowpass_mul(pixel_ant,
+ frame_src[x]<<16,
horizontal);
-
+
frame_dst[x] = ((pixel_dst+0x10007FFF)>>16);
}
-
+
for( y = 1; y < h; y++ )
{
unsigned int pixel_ant;
line_offset_src += w, line_offset_dst += w;
-
+
/* First pixel on each line doesn't have previous pixel */
pixel_ant = frame_src[line_offset_src]<<16;
-
- pixel_dst = line_ant[0] = hqdn3d_lowpass_mul( line_ant[0],
- pixel_ant,
+
+ pixel_dst = line_ant[0] = hqdn3d_lowpass_mul( line_ant[0],
+ pixel_ant,
vertical);
-
+
frame_dst[line_offset_dst] = ((pixel_dst+0x10007FFF)>>16);
-
+
/* The rest of the pixels in the line are normal */
for( x = 1; x < w; x++ )
{
unsigned int pixel_dst;
- pixel_ant = hqdn3d_lowpass_mul( pixel_ant,
- frame_src[line_offset_src+x]<<16,
+ pixel_ant = hqdn3d_lowpass_mul( pixel_ant,
+ frame_src[line_offset_src+x]<<16,
horizontal );
- pixel_dst = line_ant[x] = hqdn3d_lowpass_mul( line_ant[x],
- pixel_ant,
+ pixel_dst = line_ant[x] = hqdn3d_lowpass_mul( line_ant[x],
+ pixel_ant,
vertical );
frame_dst[line_offset_dst+x]= ((pixel_dst+0x10007FFF)>>16);
unsigned char * frame_dst,
unsigned int * line_ant,
unsigned short ** frame_ant_ptr,
- int w,
+ int w,
int h,
- int * horizontal,
- int * vertical,
+ int * horizontal,
+ int * vertical,
int * temporal)
{
int x, y;
unsigned int pixel_ant;
unsigned int pixel_dst;
unsigned short* frame_ant = (*frame_ant_ptr);
-
+
if( !frame_ant)
{
(*frame_ant_ptr) = frame_ant = malloc( w*h*sizeof(unsigned short) );
{
unsigned short* dst = &frame_ant[y*w];
unsigned char* src = frame_src + y*w;
-
+
for( x = 0; x < w; x++ )
{
dst[x] = src[x] << 8;
}
}
}
-
+
/* If no spatial coefficients, do temporal denoise only */
if( !horizontal[0] && !vertical[0] )
{
- hqdn3d_denoise_temporal( frame_src,
- frame_dst,
+ hqdn3d_denoise_temporal( frame_src,
+ frame_dst,
frame_ant,
- w, h,
+ w, h,
temporal);
return;
}
-
+
/* If no temporal coefficients, do spatial denoise only */
if( !temporal[0] )
{
- hqdn3d_denoise_spatial( frame_src,
- frame_dst,
+ hqdn3d_denoise_spatial( frame_src,
+ frame_dst,
line_ant,
- w, h,
- horizontal,
+ w, h,
+ horizontal,
vertical);
return;
}
-
+
/* First pixel has no left nor top neighbor. Only previous frame */
line_ant[0] = pixel_ant = frame_src[0] << 16;
-
- pixel_dst = hqdn3d_lowpass_mul( frame_ant[0]<<8,
- pixel_ant,
+
+ pixel_dst = hqdn3d_lowpass_mul( frame_ant[0]<<8,
+ pixel_ant,
temporal );
-
+
frame_ant[0] = ((pixel_dst+0x1000007F)>>8);
frame_dst[0] = ((pixel_dst+0x10007FFF)>>16);
-
+
/* First line has no top neighbor. Only left one for each pixel and last frame */
for( x = 1; x < w; x++ )
{
- line_ant[x] = pixel_ant = hqdn3d_lowpass_mul( pixel_ant,
- frame_src[x]<<16,
+ line_ant[x] = pixel_ant = hqdn3d_lowpass_mul( pixel_ant,
+ frame_src[x]<<16,
horizontal);
-
- pixel_dst = hqdn3d_lowpass_mul( frame_ant[x]<<8,
- pixel_ant,
+
+ pixel_dst = hqdn3d_lowpass_mul( frame_ant[x]<<8,
+ pixel_ant,
temporal);
-
+
frame_ant[x] = ((pixel_dst+0x1000007F)>>8);
frame_dst[x] = ((pixel_dst+0x10007FFF)>>16);
}
-
+
/* The rest of the lines in the frame are normal */
for( y = 1; y < h; y++ )
{
unsigned int pixel_ant;
unsigned short * line_prev = &frame_ant[y*w];
line_offset_src += w, line_offset_dst += w;
-
+
/* First pixel on each line doesn't have previous pixel */
pixel_ant = frame_src[line_offset_src]<<16;
- line_ant[0] = hqdn3d_lowpass_mul( line_ant[0],
- pixel_ant,
- vertical);
- pixel_dst = hqdn3d_lowpass_mul( line_prev[0]<<8,
- line_ant[0],
- temporal);
+ line_ant[0] = hqdn3d_lowpass_mul( line_ant[0],
+ pixel_ant,
+ vertical);
+ pixel_dst = hqdn3d_lowpass_mul( line_prev[0]<<8,
+ line_ant[0],
+ temporal);
line_prev[0] = ((pixel_dst+0x1000007F)>>8);
frame_dst[line_offset_dst] = ((pixel_dst+0x10007FFF)>>16);
-
+
/* The rest of the pixels in the line are normal */
for( x = 1; x < w; x++ )
{
unsigned int pixel_dst;
- pixel_ant = hqdn3d_lowpass_mul( pixel_ant,
- frame_src[line_offset_src+x]<<16,
+ pixel_ant = hqdn3d_lowpass_mul( pixel_ant,
+ frame_src[line_offset_src+x]<<16,
horizontal );
- line_ant[x] = hqdn3d_lowpass_mul( line_ant[x],
+ line_ant[x] = hqdn3d_lowpass_mul( line_ant[x],
pixel_ant, vertical);
- pixel_dst = hqdn3d_lowpass_mul( line_prev[x]<<8,
- line_ant[x],
+ pixel_dst = hqdn3d_lowpass_mul( line_prev[x]<<8,
+ line_ant[x],
temporal );
line_prev[x] = ((pixel_dst+0x1000007F)>>8);
-
+
frame_dst[line_offset_dst+x] = ((pixel_dst+0x10007FFF)>>16);
}
}
}
-hb_filter_private_t * hb_denoise_init( int pix_fmt,
- int width,
+hb_filter_private_t * hb_denoise_init( int pix_fmt,
+ int width,
int height,
char * settings )
{
{
return 0;
}
-
+
hb_filter_private_t * pv = malloc( sizeof(struct hb_filter_private_s) );
/*
* Clear the memory to avoid freeing uninitialised memory later.
*/
memset( pv, 0, sizeof( struct hb_filter_private_s ) );
-
- pv->pix_fmt = pix_fmt;
+
+ pv->pix_fmt = pix_fmt;
pv->width[0] = width;
- pv->height[0] = height;
+ pv->height[0] = height;
pv->width[1] = pv->width[2] = width >> 1;
- pv->height[1] = pv->height[2] = height >> 1;
-
+ pv->height[1] = pv->height[2] = height >> 1;
+
double spatial_luma, temporal_luma, spatial_chroma, temporal_chroma;
if( settings )
{
- switch( sscanf( settings, "%lf:%lf:%lf:%lf",
- &spatial_luma, &spatial_chroma,
+ switch( sscanf( settings, "%lf:%lf:%lf:%lf",
+ &spatial_luma, &spatial_chroma,
&temporal_luma, &temporal_chroma ) )
- {
+ {
case 0:
spatial_luma = HQDN3D_SPATIAL_LUMA_DEFAULT;
-
+
spatial_chroma = HQDN3D_SPATIAL_CHROMA_DEFAULT;
-
- temporal_luma = HQDN3D_TEMPORAL_LUMA_DEFAULT;
-
- temporal_chroma = temporal_luma *
+
+ temporal_luma = HQDN3D_TEMPORAL_LUMA_DEFAULT;
+
+ temporal_chroma = temporal_luma *
spatial_chroma / spatial_luma;
break;
-
+
case 1:
- spatial_chroma = HQDN3D_SPATIAL_CHROMA_DEFAULT *
+ spatial_chroma = HQDN3D_SPATIAL_CHROMA_DEFAULT *
spatial_luma / HQDN3D_SPATIAL_LUMA_DEFAULT;
-
- temporal_luma = HQDN3D_TEMPORAL_LUMA_DEFAULT *
+
+ temporal_luma = HQDN3D_TEMPORAL_LUMA_DEFAULT *
spatial_luma / HQDN3D_SPATIAL_LUMA_DEFAULT;
- temporal_chroma = temporal_luma *
+ temporal_chroma = temporal_luma *
spatial_chroma / spatial_luma;
break;
-
+
case 2:
- temporal_luma = HQDN3D_TEMPORAL_LUMA_DEFAULT *
+ temporal_luma = HQDN3D_TEMPORAL_LUMA_DEFAULT *
spatial_luma / HQDN3D_SPATIAL_LUMA_DEFAULT;
-
- temporal_chroma = temporal_luma *
+
+ temporal_chroma = temporal_luma *
spatial_chroma / spatial_luma;
break;
-
+
case 3:
- temporal_chroma = temporal_luma *
+ temporal_chroma = temporal_luma *
spatial_chroma / spatial_luma;
break;
}
}
-
+
pv->hqdn3d_line = malloc( width * sizeof(int) );
hqdn3d_precalc_coef( pv->hqdn3d_coef[0], spatial_luma );
hqdn3d_precalc_coef( pv->hqdn3d_coef[1], temporal_luma );
hqdn3d_precalc_coef( pv->hqdn3d_coef[2], spatial_chroma );
hqdn3d_precalc_coef( pv->hqdn3d_coef[3], temporal_chroma );
-
- int buf_size = 3 * width * height / 2;
+
+ int buf_size = 3 * width * height / 2;
pv->buf_out = hb_buffer_init( buf_size );
-
+
return pv;
}
{
return;
}
-
+
if( pv->hqdn3d_line )
{
free( pv->hqdn3d_line );
{
hb_buffer_close( &pv->buf_out );
}
-
+
free( pv );
}
int hb_denoise_work( const hb_buffer_t * buf_in,
hb_buffer_t ** buf_out,
int pix_fmt,
- int width,
+ int width,
int height,
hb_filter_private_t * pv )
{
- if( !pv ||
+ if( !pv ||
pix_fmt != pv->pix_fmt ||
width != pv->width[0] ||
height != pv->height[0] )
{
return FILTER_FAILED;
}
-
- avpicture_fill( &pv->pic_in, buf_in->data,
+
+ avpicture_fill( &pv->pic_in, buf_in->data,
pix_fmt, width, height );
-
- avpicture_fill( &pv->pic_out, pv->buf_out->data,
+
+ avpicture_fill( &pv->pic_out, pv->buf_out->data,
pix_fmt, width, height );
-
- hqdn3d_denoise( pv->pic_in.data[0],
+
+ hqdn3d_denoise( pv->pic_in.data[0],
pv->pic_out.data[0],
- pv->hqdn3d_line,
- &pv->hqdn3d_frame[0],
- pv->width[0],
+ pv->hqdn3d_line,
+ &pv->hqdn3d_frame[0],
+ pv->width[0],
pv->height[0],
pv->hqdn3d_coef[0],
pv->hqdn3d_coef[0],
pv->hqdn3d_coef[1] );
-
- hqdn3d_denoise( pv->pic_in.data[1],
+
+ hqdn3d_denoise( pv->pic_in.data[1],
pv->pic_out.data[1],
- pv->hqdn3d_line,
- &pv->hqdn3d_frame[1],
- pv->width[1],
+ pv->hqdn3d_line,
+ &pv->hqdn3d_frame[1],
+ pv->width[1],
pv->height[1],
pv->hqdn3d_coef[2],
pv->hqdn3d_coef[2],
pv->hqdn3d_coef[3] );
-
- hqdn3d_denoise( pv->pic_in.data[2],
+
+ hqdn3d_denoise( pv->pic_in.data[2],
pv->pic_out.data[2],
- pv->hqdn3d_line,
- &pv->hqdn3d_frame[2],
- pv->width[2],
+ pv->hqdn3d_line,
+ &pv->hqdn3d_frame[2],
+ pv->width[2],
pv->height[2],
pv->hqdn3d_coef[2],
pv->hqdn3d_coef[2],
pv->hqdn3d_coef[3] );
-
+
hb_buffer_copy_settings( pv->buf_out, buf_in );
*buf_out = pv->buf_out;
-
+
return FILTER_OK;
}
*
*/
-struct hb_filter_private_s
+struct hb_filter_private_s
{
int pix_fmt;
int width[3];
- int height[3];
-
+ int height[3];
+
struct pullup_context * pullup_ctx;
int pullup_fakecount;
int pullup_skipflag;
-
+
AVPicture pic_in;
- AVPicture pic_out;
+ AVPicture pic_out;
hb_buffer_t * buf_out;
};
-hb_filter_private_t * hb_detelecine_init( int pix_fmt,
- int width,
+hb_filter_private_t * hb_detelecine_init( int pix_fmt,
+ int width,
int height,
char * settings );
int hb_detelecine_work( const hb_buffer_t * buf_in,
hb_buffer_t ** buf_out,
int pix_fmt,
- int width,
+ int width,
int height,
hb_filter_private_t * pv );
void hb_detelecine_close( hb_filter_private_t * pv );
hb_filter_object_t hb_filter_detelecine =
-{
+{
FILTER_DETELECINE,
"Detelecine (pullup)",
NULL,
static int pullup_diff_y( unsigned char *a, unsigned char * b, int s )
{
int i, j, diff = 0;
- for( i = 4; i; i-- )
+ for( i = 4; i; i-- )
{
- for( j = 0; j < 8; j++ )
+ for( j = 0; j < 8; j++ )
{
diff += PULLUP_ABS( a[j]-b[j] );
}
static int pullup_licomb_y( unsigned char * a, unsigned char * b, int s )
{
int i, j, diff = 0;
- for( i = 4; i; i-- )
+ for( i = 4; i; i-- )
{
for( j = 0; j < 8; j++ )
{
static int pullup_var_y( unsigned char * a, unsigned char * b, int s )
{
int i, j, var = 0;
- for( i = 3; i; i-- )
+ for( i = 3; i; i-- )
{
- for( j = 0; j < 8; j++ )
+ for( j = 0; j < 8; j++ )
{
var += PULLUP_ABS( a[j]-a[j+s] );
}
return 4*var;
}
-static void pullup_alloc_metrics( struct pullup_context * c,
+static void pullup_alloc_metrics( struct pullup_context * c,
struct pullup_field * f )
{
f->diffs = calloc( c->metric_len, sizeof(int) );
static void pullup_compute_metric( struct pullup_context * c,
struct pullup_field * fa, int pa,
struct pullup_field * fb, int pb,
- int (* func)( unsigned char *,
- unsigned char *, int),
+ int (* func)( unsigned char *,
+ unsigned char *, int),
int * dest )
{
unsigned char *a, *b;
int ystep = c->stride[mp]<<3;
int s = c->stride[mp]<<1; /* field stride */
int w = c->metric_w*xstep;
-
+
if( !fa->buffer || !fb->buffer ) return;
-
+
/* Shortcut for duplicate fields (e.g. from RFF flag) */
- if( fa->buffer == fb->buffer && pa == pb )
+ if( fa->buffer == fb->buffer && pa == pb )
{
memset( dest, 0, c->metric_len * sizeof(int) );
return;
}
-
+
a = fa->buffer->planes[mp] + pa * c->stride[mp] + c->metric_offset;
b = fb->buffer->planes[mp] + pb * c->stride[mp] + c->metric_offset;
-
- for( y = c->metric_h; y; y-- )
+
+ for( y = c->metric_h; y; y-- )
{
- for( x = 0; x < w; x += xstep )
+ for( x = 0; x < w; x += xstep )
{
*dest++ = func( a + x, b + x, s );
}
}
}
-static struct pullup_field * pullup_make_field_queue( struct pullup_context * c,
+static struct pullup_field * pullup_make_field_queue( struct pullup_context * c,
int len )
{
struct pullup_field * head, * f;
f = head = calloc( 1, sizeof(struct pullup_field) );
pullup_alloc_metrics( c, f );
- for ( ; len > 0; len-- )
+ for ( ; len > 0; len-- )
{
f->next = calloc( 1, sizeof(struct pullup_field) );
f->next->prev = f;
static void pullup_check_field_queue( struct pullup_context * c )
{
- if( c->head->next == c->first )
+ if( c->head->next == c->first )
{
struct pullup_field *f = calloc( 1, sizeof(struct pullup_field) );
pullup_alloc_metrics( c, f );
}
}
-static void pullup_copy_field( struct pullup_context * c,
+static void pullup_copy_field( struct pullup_context * c,
struct pullup_buffer * dest,
- struct pullup_buffer * src,
+ struct pullup_buffer * src,
int parity )
{
int i, j;
unsigned char *d, *s;
- for( i = 0; i < c->nplanes; i++ )
+ for( i = 0; i < c->nplanes; i++ )
{
s = src->planes[i] + parity*c->stride[i];
d = dest->planes[i] + parity*c->stride[i];
- for( j = c->h[i]>>1; j; j-- )
+ for( j = c->h[i]>>1; j; j-- )
{
memcpy( d, s, c->stride[i] );
s += c->stride[i]<<1;
}
-static int pullup_queue_length( struct pullup_field * begin,
+static int pullup_queue_length( struct pullup_field * begin,
struct pullup_field * end )
{
int count = 1;
struct pullup_field * f;
-
+
if( !begin || !end ) return 0;
for( f = begin; f != end; f = f->next ) count++;
return count;
static int pullup_find_first_break( struct pullup_field * f, int max )
{
int i;
- for( i = 0; i < max; i++ )
+ for( i = 0; i < max; i++ )
{
- if( f->breaks & PULLUP_BREAK_RIGHT ||
+ if( f->breaks & PULLUP_BREAK_RIGHT ||
f->next->breaks & PULLUP_BREAK_LEFT )
{
return i+1;
return 0;
}
-static void pullup_compute_breaks( struct pullup_context * c,
+static void pullup_compute_breaks( struct pullup_context * c,
struct pullup_field * f0 )
{
int i;
struct pullup_field *f2 = f1->next;
struct pullup_field *f3 = f2->next;
int l, max_l=0, max_r=0;
-
+
if( f0->flags & PULLUP_HAVE_BREAKS ) return;
f0->flags |= PULLUP_HAVE_BREAKS;
-
+
/* Special case when fields are 100% identical */
- if( f0->buffer == f2->buffer && f1->buffer != f3->buffer )
+ if( f0->buffer == f2->buffer && f1->buffer != f3->buffer )
{
f2->breaks |= PULLUP_BREAK_RIGHT;
return;
}
- if( f0->buffer != f2->buffer && f1->buffer == f3->buffer )
+ if( f0->buffer != f2->buffer && f1->buffer == f3->buffer )
{
f1->breaks |= PULLUP_BREAK_LEFT;
return;
}
-
- for( i = 0; i < c->metric_len; i++ )
+
+ for( i = 0; i < c->metric_len; i++ )
{
l = f2->diffs[i] - f3->diffs[i];
if( l > max_l) max_l = l;
if( max_r > 4*max_l ) f2->breaks |= PULLUP_BREAK_RIGHT;
}
-static void pullup_compute_affinity( struct pullup_context * c,
+static void pullup_compute_affinity( struct pullup_context * c,
struct pullup_field * f )
{
int i;
int max_l = 0, max_r = 0, l;
-
- if( f->flags & PULLUP_HAVE_AFFINITY )
+
+ if( f->flags & PULLUP_HAVE_AFFINITY )
{
return;
}
f->flags |= PULLUP_HAVE_AFFINITY;
-
- if( f->buffer == f->next->next->buffer )
+
+ if( f->buffer == f->next->next->buffer )
{
f->affinity = 1;
f->next->affinity = 0;
f->next->next->affinity = -1;
-
+
f->next->flags |= PULLUP_HAVE_AFFINITY;
f->next->next->flags |= PULLUP_HAVE_AFFINITY;
-
+
return;
}
-
- for( i = 0; i < c->metric_len; i++ )
+
+ for( i = 0; i < c->metric_len; i++ )
{
int lv = f->prev->var[i];
int rv = f->next->var[i];
int v = f->var[i];
int lc = f->comb[i] - (v+lv) + PULLUP_ABS( v-lv );
int rc = f->next->comb[i] - (v+rv) + PULLUP_ABS( v-rv );
-
+
lc = (lc > 0) ? lc : 0;
rc = (rc > 0) ? rc : 0;
l = lc - rc;
if( l > max_l ) max_l = l;
if( -l > max_r ) max_r = -l;
}
-
- if( max_l + max_r < 64 )
+
+ if( max_l + max_r < 64 )
{
return;
}
-
- if( max_r > 6*max_l )
+
+ if( max_r > 6*max_l )
{
f->affinity = -1;
}
- else if( max_l > 6*max_r )
+ else if( max_l > 6*max_r )
{
f->affinity = 1;
}
{
struct pullup_field * f = c->first;
int i, n = pullup_queue_length (f, c->last );
- for( i = 0; i < n-1; i++ )
+ for( i = 0; i < n-1; i++ )
{
if( i < n-3 ) pullup_compute_breaks( c, f );
pullup_compute_affinity( c, f );
struct pullup_field *f1 = f0->next;
struct pullup_field *f2 = f1->next;
int l;
-
- if( pullup_queue_length( c->first, c->last ) < 4 )
+
+ if( pullup_queue_length( c->first, c->last ) < 4 )
{
return 0;
}
pullup_foo( c );
-
+
if( f0->affinity == -1 ) return 1;
-
+
l = pullup_find_first_break( f0, 3 );
if( l == 1 && c->strict_breaks < 0 ) l = 0;
-
- switch (l)
+
+ switch (l)
{
case 1:
- if ( c->strict_breaks < 1 &&
- f0->affinity == 1 &&
+ if ( c->strict_breaks < 1 &&
+ f0->affinity == 1 &&
f1->affinity == -1 )
{
return 2;
{
return 1;
}
-
+
case 2:
/* FIXME: strictly speaking, f0->prev is no longer valid... :) */
if( c->strict_pairs &&
- (f0->prev->breaks & PULLUP_BREAK_RIGHT) &&
+ (f0->prev->breaks & PULLUP_BREAK_RIGHT) &&
(f2->breaks & PULLUP_BREAK_LEFT) &&
(f0->affinity != 1 || f1->affinity != -1) )
{
return 1;
}
- if( f1->affinity == 1 )
+ if( f1->affinity == 1 )
{
return 1;
}
}
case 3:
- if( f2->affinity == 1 )
+ if( f2->affinity == 1 )
{
return 2;
}
{
return 3;
}
-
+
default:
/* 9 possibilities covered before switch */
- if( f1->affinity == 1 )
+ if( f1->affinity == 1 )
{
return 1; /* covers 6 */
}
- else if( f1->affinity == -1 )
+ else if( f1->affinity == -1 )
{
return 2; /* covers 6 */
}
- else if( f2->affinity == -1 )
- {
+ else if( f2->affinity == -1 )
+ {
/* covers 2 */
- if( f0->affinity == 1 )
+ if( f0->affinity == 1 )
{
return 3;
}
}
}
-static void pullup_print_aff_and_breaks(struct pullup_context * c,
+static void pullup_print_aff_and_breaks(struct pullup_context * c,
struct pullup_field * f )
{
int i;
struct pullup_field * f0 = f;
const char aff_l[] = "+..", aff_r[] = "..+";
printf( "\naffinity: " );
- for( i = 0; i < 4; i++ )
+ for( i = 0; i < 4; i++ )
{
- printf( "%c%d%c",
- aff_l[1+f->affinity],
- i,
+ printf( "%c%d%c",
+ aff_l[1+f->affinity],
+ i,
aff_r[1+f->affinity] );
-
+
f = f->next;
}
f = f0;
printf("\nbreaks: ");
- for( i = 0; i < 4; i++ )
+ for( i = 0; i < 4; i++ )
{
- printf( "%c%d%c",
- f->breaks & PULLUP_BREAK_LEFT ? '|' : '.',
- i,
+ printf( "%c%d%c",
+ f->breaks & PULLUP_BREAK_LEFT ? '|' : '.',
+ i,
f->breaks & PULLUP_BREAK_RIGHT ? '|' : '.' );
-
+
f = f->next;
}
printf("\n");
struct pullup_context * pullup_alloc_context( void )
{
struct pullup_context * c;
-
+
c = calloc( 1, sizeof(struct pullup_context)) ;
-
+
return c;
}
c->nbuffers = 10;
}
c->buffers = calloc( c->nbuffers, sizeof (struct pullup_buffer) );
-
+
c->metric_w = (c->w[mp] - ((c->junk_left + c->junk_right) << 3)) >> 3;
c->metric_h = (c->h[mp] - ((c->junk_top + c->junk_bottom) << 1)) >> 3;
c->metric_offset = c->junk_left*c->bpp[mp] + (c->junk_top<<1)*c->stride[mp];
c->metric_len = c->metric_w * c->metric_h;
-
+
c->head = pullup_make_field_queue( c, 8 );
-
+
c->frame = calloc( 1, sizeof (struct pullup_frame) );
c->frame->ifields = calloc( 3, sizeof (struct pullup_buffer *) );
-
- if( c->format == PULLUP_FMT_Y )
+
+ if( c->format == PULLUP_FMT_Y )
{
c->diff = pullup_diff_y;
c->comb = pullup_licomb_y;
void pullup_free_context( struct pullup_context * c )
{
struct pullup_field * f;
-
+
free( c->buffers );
f = c->head;
- do
+ do
{
free( f->diffs );
free( f->comb );
f = f->next;
free( f->prev );
- }
+ }
while( f != c->head );
-
+
free( c->frame );
free( c );
}
*
*/
-static void pullup_alloc_buffer( struct pullup_context * c,
+static void pullup_alloc_buffer( struct pullup_context * c,
struct pullup_buffer * b )
{
int i;
if( b->planes ) return;
b->planes = calloc( c->nplanes, sizeof(unsigned char *) );
- for ( i = 0; i < c->nplanes; i++ )
+ for ( i = 0; i < c->nplanes; i++ )
{
b->planes[i] = malloc(c->h[i]*c->stride[i]);
/* Deal with idiotic 128=0 for chroma: */
}
}
-struct pullup_buffer * pullup_lock_buffer( struct pullup_buffer * b,
+struct pullup_buffer * pullup_lock_buffer( struct pullup_buffer * b,
int parity )
{
if( !b ) return 0;
return b;
}
-void pullup_release_buffer( struct pullup_buffer * b,
+void pullup_release_buffer( struct pullup_buffer * b,
int parity )
{
if( !b ) return;
if( (parity+1) & 2 ) b->lock[1]--;
}
-struct pullup_buffer * pullup_get_buffer( struct pullup_context * c,
+struct pullup_buffer * pullup_get_buffer( struct pullup_context * c,
int parity )
{
int i;
-
+
/* Try first to get the sister buffer for the previous field */
- if( parity < 2 &&
- c->last &&
+ if( parity < 2 &&
+ c->last &&
parity != c->last->parity &&
- !c->last->buffer->lock[parity])
+ !c->last->buffer->lock[parity])
{
pullup_alloc_buffer( c, c->last->buffer );
return pullup_lock_buffer( c->last->buffer, parity );
}
-
+
/* Prefer a buffer with both fields open */
- for( i = 0; i < c->nbuffers; i++ )
+ for( i = 0; i < c->nbuffers; i++ )
{
if( c->buffers[i].lock[0] ) continue;
if( c->buffers[i].lock[1] ) continue;
pullup_alloc_buffer( c, &c->buffers[i] );
return pullup_lock_buffer( &c->buffers[i], parity );
}
-
+
if( parity == 2 ) return 0;
-
+
/* Search for any half-free buffer */
- for( i = 0; i < c->nbuffers; i++ )
+ for( i = 0; i < c->nbuffers; i++ )
{
if( ((parity+1) & 1) && c->buffers[i].lock[0] ) continue;
if( ((parity+1) & 2) && c->buffers[i].lock[1] ) continue;
pullup_alloc_buffer( c, &c->buffers[i] );
return pullup_lock_buffer( &c->buffers[i], parity );
}
-
+
return 0;
}
struct pullup_frame * fr = c->frame;
int n = pullup_decide_frame_length( c );
int aff = c->first->next->affinity;
-
+
if ( !n ) return 0;
if ( fr->lock ) return 0;
-
- if ( c->verbose )
+
+ if ( c->verbose )
{
pullup_print_aff_and_breaks(c, c->first);
printf("duration: %d \n", n);
}
-
+
fr->lock++;
fr->length = n;
fr->parity = c->first->parity;
fr->buffer = 0;
- for( i = 0; i < n; i++ )
+ for( i = 0; i < n; i++ )
{
/* We cheat and steal the buffer without release+relock */
fr->ifields[i] = c->first->buffer;
c->first->buffer = 0;
c->first = c->first->next;
}
-
- if( n == 1 )
+
+ if( n == 1 )
{
fr->ofields[fr->parity] = fr->ifields[0];
fr->ofields[fr->parity^1] = 0;
- }
- else if( n == 2 )
+ }
+ else if( n == 2 )
{
fr->ofields[fr->parity] = fr->ifields[0];
fr->ofields[fr->parity^1] = fr->ifields[1];
- }
- else if( n == 3 )
+ }
+ else if( n == 3 )
{
if( aff == 0 )
{
}
pullup_lock_buffer( fr->ofields[0], 0 );
pullup_lock_buffer( fr->ofields[1], 1 );
-
- if( fr->ofields[0] == fr->ofields[1] )
+
+ if( fr->ofields[0] == fr->ofields[1] )
{
fr->buffer = fr->ofields[0];
pullup_lock_buffer(fr->buffer, 2);
*
*/
-void pullup_submit_field( struct pullup_context * c,
- struct pullup_buffer * b,
+void pullup_submit_field( struct pullup_context * c,
+ struct pullup_buffer * b,
int parity )
{
struct pullup_field * f;
-
+
/* Grow the circular list if needed */
pullup_check_field_queue( c );
-
+
/* Cannot have two fields of same parity in a row; drop the new one */
if( c->last && c->last->parity == parity ) return;
-
+
f = c->head;
f->parity = parity;
f->buffer = pullup_lock_buffer( b, parity );
f->flags = 0;
f->breaks = 0;
f->affinity = 0;
-
- pullup_compute_metric( c, f, parity, f->prev->prev,
+
+ pullup_compute_metric( c, f, parity, f->prev->prev,
parity, c->diff, f->diffs );
- pullup_compute_metric( c, parity?f->prev:f, 0,
+ pullup_compute_metric( c, parity?f->prev:f, 0,
parity?f:f->prev, 1, c->comb, f->comb );
- pullup_compute_metric( c, f, parity, f,
+ pullup_compute_metric( c, f, parity, f,
-1, c->var, f->var );
-
+
/* Advance the circular list */
if( !c->first ) c->first = c->head;
c->last = c->head;
void pullup_flush_fields( struct pullup_context * c )
{
struct pullup_field * f;
-
- for( f = c->first; f && f != c->head; f = f->next )
+
+ for( f = c->first; f && f != c->head; f = f->next )
{
pullup_release_buffer( f->buffer, f->parity );
f->buffer = 0;
*
*/
-hb_filter_private_t * hb_detelecine_init( int pix_fmt,
- int width,
+hb_filter_private_t * hb_detelecine_init( int pix_fmt,
+ int width,
int height,
char * settings )
{
{
return 0;
}
-
+
hb_filter_private_t * pv = malloc( sizeof(struct hb_filter_private_s) );
-
- pv->pix_fmt = pix_fmt;
+
+ pv->pix_fmt = pix_fmt;
pv->width[0] = width;
- pv->height[0] = height;
+ pv->height[0] = height;
pv->width[1] = pv->width[2] = width >> 1;
- pv->height[1] = pv->height[2] = height >> 1;
+ pv->height[1] = pv->height[2] = height >> 1;
- int buf_size = 3 * width * height / 2;
+ int buf_size = 3 * width * height / 2;
pv->buf_out = hb_buffer_init( buf_size );
-
+
struct pullup_context * ctx;
pv->pullup_ctx = ctx = pullup_alloc_context();
-
+
ctx->junk_left = ctx->junk_right = 1;
- ctx->junk_top = ctx->junk_bottom = 4;
+ ctx->junk_top = ctx->junk_bottom = 4;
ctx->strict_breaks = 0;
ctx->metric_plane = 0;
- if( settings )
+ if( settings )
{
- sscanf( settings, "%d:%d:%d:%d:%d:%d",
- &ctx->junk_left,
- &ctx->junk_right,
- &ctx->junk_top,
- &ctx->junk_bottom,
- &ctx->strict_breaks,
+ sscanf( settings, "%d:%d:%d:%d:%d:%d",
+ &ctx->junk_left,
+ &ctx->junk_right,
+ &ctx->junk_top,
+ &ctx->junk_bottom,
+ &ctx->strict_breaks,
&ctx->metric_plane );
}
-
+
ctx->format = PULLUP_FMT_Y;
ctx->nplanes = 4;
-
+
pullup_preinit_context( ctx );
-
+
ctx->bpp[0] = ctx->bpp[1] = ctx->bpp[2] = 8;
ctx->background[1] = ctx->background[2] = 128;
ctx->w[2] = pv->width[2];
ctx->h[2] = pv->height[2];
- ctx->stride[2] = pv->width[2];
-
+ ctx->stride[2] = pv->width[2];
+
ctx->w[3] = ((width+15)/16) * ((height+15)/16);
ctx->h[3] = 2;
- ctx->stride[3] = ctx->w[3];
-
+ ctx->stride[3] = ctx->w[3];
+
#if 0
ctx->verbose = 1;
#endif
-
+
pullup_init_context( ctx );
pv->pullup_fakecount = 1;
pv->pullup_skipflag = 0;
-
+
return pv;
}
{
return;
}
-
+
if( pv->buf_out )
{
hb_buffer_close( &pv->buf_out );
}
-
+
if( pv->pullup_ctx )
{
pullup_free_context( pv->pullup_ctx );
}
-
+
free( pv );
}
int hb_detelecine_work( const hb_buffer_t * buf_in,
hb_buffer_t ** buf_out,
int pix_fmt,
- int width,
+ int width,
int height,
hb_filter_private_t * pv )
{
- if( !pv ||
+ if( !pv ||
pix_fmt != pv->pix_fmt ||
width != pv->width[0] ||
height != pv->height[0] )
{
return FILTER_FAILED;
- }
-
+ }
+
struct pullup_context * ctx = pv->pullup_ctx;
struct pullup_buffer * buf;
struct pullup_frame * frame;
-
+
buf = pullup_get_buffer( ctx, 2 );
if( !buf )
{
frame = pullup_get_frame( ctx );
- pullup_release_frame( frame );
+ pullup_release_frame( frame );
hb_log( "Could not get buffer from pullup!" );
return FILTER_FAILED;
}
-
- /* Copy input buffer into pullup buffer */
- avpicture_fill( &pv->pic_in, buf_in->data,
+
+ /* Copy input buffer into pullup buffer */
+ avpicture_fill( &pv->pic_in, buf_in->data,
pix_fmt, width, height );
-
+
hb_buffer_copy_settings( pv->buf_out, buf_in );
-
- memcpy( buf->planes[0], pv->pic_in.data[0],
+
+ memcpy( buf->planes[0], pv->pic_in.data[0],
pv->width[0] * pv->height[0] * sizeof(uint8_t) );
- memcpy( buf->planes[1], pv->pic_in.data[1],
+ memcpy( buf->planes[1], pv->pic_in.data[1],
pv->width[1] * pv->height[1] * sizeof(uint8_t) );
- memcpy( buf->planes[2], pv->pic_in.data[2],
+ memcpy( buf->planes[2], pv->pic_in.data[2],
pv->width[2] * pv->height[2] * sizeof(uint8_t) );
-
+
/* Submit buffer fields based on buffer flags */
int parity = 1;
if( buf_in->flags & PIC_FLAG_TOP_FIELD_FIRST )
pullup_submit_field( ctx, buf, parity^1 );
if( buf_in->flags & PIC_FLAG_REPEAT_FIRST_FIELD )
{
- pullup_submit_field( ctx, buf, parity );
- }
- pullup_release_buffer( buf, 2 );
-
+ pullup_submit_field( ctx, buf, parity );
+ }
+ pullup_release_buffer( buf, 2 );
+
/* Get frame and check if pullup is ready */
- frame = pullup_get_frame( ctx );
+ frame = pullup_get_frame( ctx );
if( !frame )
{
if( pv->pullup_fakecount )
{
pv->pullup_fakecount--;
-
- memcpy( pv->buf_out->data, buf_in->data, buf_in->size );
+
+ memcpy( pv->buf_out->data, buf_in->data, buf_in->size );
goto output_frame;
}
goto discard_frame;
}
}
-
+
/* Check to see if frame should be dropped */
if( frame->length < 2 )
{
pullup_release_frame( frame );
frame = pullup_get_frame( ctx );
-
- if (!frame)
+
+ if (!frame)
{
goto discard_frame;
}
- if( frame->length < 2 )
+ if( frame->length < 2 )
{
pullup_release_frame( frame );
{
goto discard_frame;
}
-
+
frame = pullup_get_frame( ctx );
-
- if( !frame )
+
+ if( !frame )
{
goto discard_frame;
}
- if( frame->length < 2 )
+ if( frame->length < 2 )
{
pullup_release_frame( frame );
goto discard_frame;
}
}
}
-
+
/* Check to see if frame buffer is ready for export */
if( !frame->buffer )
{
pullup_pack_frame( ctx, frame );
}
-
+
/* Copy pullup frame buffer into output buffer */
- avpicture_fill( &pv->pic_out, pv->buf_out->data,
- pix_fmt, width, height );
-
+ avpicture_fill( &pv->pic_out, pv->buf_out->data,
+ pix_fmt, width, height );
+
memcpy( pv->pic_out.data[0], frame->buffer->planes[0],
pv->width[0] * pv->height[0] * sizeof(uint8_t) );
- memcpy( pv->pic_out.data[1], frame->buffer->planes[1],
+ memcpy( pv->pic_out.data[1], frame->buffer->planes[1],
pv->width[1] * pv->height[1] * sizeof(uint8_t) );
- memcpy( pv->pic_out.data[2], frame->buffer->planes[2],
+ memcpy( pv->pic_out.data[2], frame->buffer->planes[2],
pv->width[2] * pv->height[2] * sizeof(uint8_t) );
-
- pullup_release_frame( frame );
-output_frame:
- *buf_out = pv->buf_out;
+ pullup_release_frame( frame );
+
+output_frame:
+ *buf_out = pv->buf_out;
return FILTER_OK;
/* This and all discard_frame calls shown above are
the result of me restoring the functionality in
pullup that huevos_rancheros disabled because
HB couldn't handle it. */
-discard_frame:
+discard_frame:
*buf_out = pv->buf_out;
return FILTER_DROP;
hb_error("Invalid VTS (title set) number: %i", title->vts);
goto fail;
}
-
+
hb_log( "scan: opening IFO for VTS %d", title->vts );
if( !( vts = ifoOpen( d->reader, title->vts ) ) )
{
hb_error( "scan: pgc not valid, skipping" );
goto fail;
}
-
+
/* Start cell */
title->cell_start = d->pgc->program_map[pgn-1] - 1;
title->block_start = d->pgc->cell_playback[title->cell_start].first_sector;
int is_nav_pack( unsigned char *buf )
{
/*
- * The NAV Pack is comprised of the PCI Packet and DSI Packet, both
+ * The NAV Pack is comprised of the PCI Packet and DSI Packet, both
* of these start at known offsets and start with a special identifier.
*
* NAV = {
// should check and discover we're at eof.
if ( d->cell_cur > d->cell_end )
return 0;
-
+
for( ;; )
{
int block, pack_len, next_vobu, read_retry;
continue;
}
- if ( !is_nav_pack( b->data ) ) {
+ if ( !is_nav_pack( b->data ) ) {
(d->next_vobu)++;
if( d->in_sync == 1 ) {
- hb_log("dvd: Lost sync, searching for NAV pack at blk %d",
+ hb_log("dvd: Lost sync, searching for NAV pack at blk %d",
d->next_vobu);
d->in_sync = 0;
}
}
navRead_DSI( &dsi_pack, &b->data[DSI_START_BYTE] );
-
+
if ( d->in_sync == 0 && d->cur_cell_id &&
(d->cur_vob_id != dsi_pack.dsi_gi.vobu_vob_idn ||
d->cur_cell_id != dsi_pack.dsi_gi.vobu_c_idn ) )
if ( d->pgc->cell_playback[d->cell_cur].first_sector < dsi_pack.dsi_gi.nv_pck_lbn &&
d->pgc->cell_playback[d->cell_cur].last_sector >= dsi_pack.dsi_gi.nv_pck_lbn )
{
- hb_log( "dvd: null prev_vobu in cell %d at block %d", d->cell_cur,
+ hb_log( "dvd: null prev_vobu in cell %d at block %d", d->cell_cur,
d->block );
// treat like end-of-cell then go directly to start of next cell.
d->cell_cur = d->cell_next;
}
else
{
- hb_log( "dvd: Beginning of Cell (%d) at block %d", d->cell_cur,
+ hb_log( "dvd: Beginning of Cell (%d) at block %d", d->cell_cur,
d->block );
if( d->in_cell )
{
if( ( dsi_pack.vobu_sri.next_vobu & (1 << 31 ) ) == 0 ||
( dsi_pack.vobu_sri.next_vobu & 0x3fffffff ) == 0x3fffffff )
- {
- hb_log( "dvd: End of Cell (%d) at block %d", d->cell_cur,
+ {
+ hb_log( "dvd: End of Cell (%d) at block %d", d->cell_cur,
d->block );
d->cell_cur = d->cell_next;
d->in_cell = 0;
d->next_vobu = d->pgc->cell_playback[d->cell_cur].first_sector;
FindNextCell( d );
d->cell_overlap = 1;
-
+
}
}
else
int nr_of_ptts = d->ifo->vts_ptt_srpt->title[d->ttn-1].nr_of_ptts;
pgc_t * pgc;
int cell;
-
+
for( i = nr_of_ptts - 1;
i > 0;
i-- )
return 1;
}
}
-
+
return 0;
}
i++;
}
d->cell_next = d->cell_cur + i + 1;
- hb_log( "dvd: Skipping multi-angle cells %d-%d",
- d->cell_cur,
+ hb_log( "dvd: Skipping multi-angle cells %d-%d",
+ d->cell_cur,
d->cell_next - 1 );
}
else
void encavcodecClose( hb_work_object_t * );
hb_work_object_t hb_encavcodec =
-{
+{
WORK_ENCAVCODEC,
"MPEG-4 encoder (libavcodec)",
encavcodecInit,
encavcodecWork,
encavcodecClose
-};
+};
int encavcodecInit( hb_work_object_t * w, hb_job_t * job )
{
AVCodec * codec;
AVCodecContext * context;
-
+
hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
w->private_data = pv;
context->sample_aspect_ratio.num = job->pixel_aspect_width;
context->sample_aspect_ratio.den = job->pixel_aspect_height;
- hb_log( "encavcodec: encoding with stored aspect %d/%d",
+ hb_log( "encavcodec: encoding with stored aspect %d/%d",
job->pixel_aspect_width, job->pixel_aspect_height );
}
context->extradata_size );
#endif
}
-
+
return 0;
}
hb_list_t * list;
int64_t pts;
-
+
int out_discrete_channels;
};
pv->faac = faacEncOpen( job->arate, pv->out_discrete_channels, &pv->input_samples,
&pv->output_bytes );
pv->buf = malloc( pv->input_samples * sizeof( float ) );
-
+
cfg = faacEncGetCurrentConfiguration( pv->faac );
cfg->mpegVersion = MPEG4;
cfg->aacObjectType = LOW;
cfg->allowMidside = 1;
-
+
if (pv->out_discrete_channels == 6) {
/* we are preserving 5.1 audio into 6-channel AAC,
so indicate that we have an lfe channel */
cfg->bandWidth = 0;
cfg->outputFormat = 0;
cfg->inputFormat = FAAC_INPUT_FLOAT;
-
+
if (w->amixdown == HB_AMIXDOWN_6CH && w->source_acodec == HB_ACODEC_AC3)
{
/* we are preserving 5.1 AC-3 audio into 6-channel AAC, and need to
cfg->channel_map[4] = 5;
cfg->channel_map[5] = 0;
}
-
+
if( !faacEncSetConfiguration( pv->faac, cfg ) )
{
hb_log( "faacEncSetConfiguration failed" );
buf->next = Encode( w );
buf = buf->next;
}
-
+
return HB_WORK_OK;
}
lame_set_in_samplerate( pv->lame, job->arate );
lame_set_out_samplerate( pv->lame, job->arate );
lame_init_params( pv->lame );
-
+
pv->input_samples = 1152 * 2;
pv->output_bytes = LAME_MAXMP3BUFFER;
pv->buf = malloc( pv->input_samples * sizeof( float ) );
void enclameClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
-
+
lame_close( pv->lame );
hb_list_empty( &pv->list );
free( pv->buf );
void encvorbisClose( hb_work_object_t * w )
{
hb_work_private_t * pv = w->private_data;
-
+
vorbis_block_clear( &pv->vb );
vorbis_dsp_clear( &pv->vd );
vorbis_comment_clear( &pv->vc );
if (pv->list)
hb_list_empty( &pv->list );
-
+
free( pv->buf );
free( pv );
w->private_data = NULL;
encx264Close
};
-#define DTS_BUFFER_SIZE 32
+#define DTS_BUFFER_SIZE 32
/*
* The frame info struct remembers information about each frame across calls
* chosen so that two successive frames will have different values in the
* bits over any plausible range of frame rates. (Starting with bit 9 allows
* any frame rate slower than 175fps.) The MSB determines the size of the array.
- * It is chosen so that two frames can't use the same slot during the
+ * It is chosen so that two frames can't use the same slot during the
* encoder's max frame delay (set by the standard as 16 frames) and so
* that, up to some minimum frame rate, frames are guaranteed to map to
* different slots. (An MSB of 16 which is 2^(16-9+1) = 256 slots guarantees
param.i_height = job->height;
param.i_fps_num = job->vrate;
param.i_fps_den = job->vrate_base;
-
+
if (job->vrate_base != 1080000)
{
/* If the fps isn't 25, adjust the key intervals. Add 1 because
we want 24, not 23 with a truncated remainder. */
param.i_keyint_min = (job->vrate / job->vrate_base) + 1;
param.i_keyint_max = (10 * job->vrate / job->vrate_base) + 1;
- hb_log("encx264: keyint-min: %i, keyint-max: %i", param.i_keyint_min, param.i_keyint_max);
+ hb_log("encx264: keyint-min: %i, keyint-max: %i", param.i_keyint_min, param.i_keyint_max);
}
-
+
param.i_log_level = X264_LOG_INFO;
if( job->h264_level )
{
param.analyse.i_subpel_refine = 4;
/*
- This section passes the string x264opts to libx264 for parsing into
+ This section passes the string x264opts to libx264 for parsing into
parameter names and values.
The string is set up like this:
option1=value1:option2=value 2
- So, you have to iterate through based on the colons, and then put
+ So, you have to iterate through based on the colons, and then put
the left side of the equals sign in "name" and the right side into
"value." Then you hand those strings off to x264 for interpretation.
/* Basic initDelay value is the clockrate divided by the FPS
-- the length of one frame in clockticks. */
pv->init_delay = (float)90000 / (float)((float)job->vrate / (float)job->vrate_base);
-
+
/* 23.976-length frames are 3753.75 ticks long. That means 25%
will come out as 3753, 75% will be 3754. The delay has to be
the longest possible frame duration, 3754. However, 3753.75
gets truncated to 3753, so if that's what it is, ++ it. */
if (pv->init_delay == 3753)
pv->init_delay++;
-
+
/* For VFR, libhb sees the FPS as 29.97, but the longest frames
will use the duration of frames running at 23.976fps instead.. */
if (job->vfr)
{
pv->init_delay = 7506;
}
-
+
/* The delay is 2 frames for regular b-frames, 3 for b-pyramid.
Since job->areBframes is 1 for b-frames and 2 for b-pyramid,
add one to it and use it as a multiplier. */
if( pv->dts_next == -1 )
{
- /* we don't have a start time yet so use the first frame's
+ /* we don't have a start time yet so use the first frame's
* start. All other frame times will be determined by the
* sum of the prior output frame durations in *DTS* order
* (not by the order they arrive here). This timing change is
}
pv->last_stop = in->stop;
- // Remember info about this frame that we need to pass across
+ // Remember info about this frame that we need to pass across
// the x264_encoder_encode call (since it reorders frames).
save_frame_info( pv, in );
pv->pic_in.i_pts = in->start;
x264_encoder_encode( pv->x264, &nal, &i_nal,
- &pv->pic_in, &pic_out );
+ &pv->pic_in, &pic_out );
}
else
{
themselves reference frames, figure it out on our own. */
if( (buf->frametype == HB_FRAME_B) && (nal[i].i_ref_idc != NAL_PRIORITY_DISPOSABLE) )
buf->frametype = HB_FRAME_BREF;
-
+
/* Store the output presentation time stamp
from x264 for use by muxmp4 in off-setting
b-frames with the CTTS atom. */
hb_log( "encxvid: closing libxvidcore" );
xvid_encore( pv->xvid, XVID_ENC_DESTROY, NULL, NULL);
}
-
+
free( pv );
w->private_data = NULL;
}
buffers.entries = 0;
buffers.lock = hb_lock_init();
buffers.allocated = 0;
-
+
while(size <= max_size) {
buffer_pool = buffers.pool[buffers.entries++] = hb_fifo_init(BUFFER_POOL_MAX_ELEMENTS);
buffer_pool->buffer_size = size;
hb_lock(buffers.lock);
- for( i = 0; i < buffers.entries; i++)
+ for( i = 0; i < buffers.entries; i++)
{
count = 0;
while( ( b = hb_fifo_get(buffers.pool[i]) ) )
hb_buffer_t * hb_buffer_init( int size )
-{
+{
hb_buffer_t * b;
int i;
hb_fifo_t *buffer_pool = NULL;
/*
* This pool is big enough, but are there any buffers in it?
*/
- if( hb_fifo_size( buffers.pool[i] ) )
+ if( hb_fifo_size( buffers.pool[i] ) )
{
/*
* We've found a matching buffer pool, with buffers.
resize = buffers.pool[i]->buffer_size;
} else {
/*
- * Buffer pool is empty,
+ * Buffer pool is empty,
*/
if( resize ) {
/*
*/
if( size != 0 && buffer_pool )
{
- b = hb_fifo_get( buffer_pool );
+ b = hb_fifo_get( buffer_pool );
if( b )
{
* Zero the contents of the buffer, would be nice if we
* didn't have to do this.
*
- hb_log("Reused buffer size %d for size %d from pool %d depth %d",
- b->alloc, size, smallest_pool->buffer_size,
+ hb_log("Reused buffer size %d for size %d from pool %d depth %d",
+ b->alloc, size, smallest_pool->buffer_size,
hb_fifo_size(smallest_pool));
*/
data = b->data;
b->size = size;
b->data = data;
return( b );
- }
+ }
}
/*
if( resize )
{
size = resize;
- }
- b->alloc = size;
+ }
+ b->alloc = size;
/*
- hb_log("Allocating new buffer of size %d for size %d",
- b->alloc,
+ hb_log("Allocating new buffer of size %d for size %d",
+ b->alloc,
b->size);
*/
for( i = 0; i < buffers.entries; i++ )
{
if( b->alloc == buffers.pool[i]->buffer_size )
- {
+ {
buffer_pool = buffers.pool[i];
break;
}
}
}
- if( buffer_pool )
+ if( buffer_pool )
{
- if( !hb_fifo_is_full( buffer_pool ) )
+ if( !hb_fifo_is_full( buffer_pool ) )
{
if(b->data)
{
/*
hb_log("Putting a buffer of size %d on pool %d, depth %d",
- b->alloc,
- buffer_pool->buffer_size,
+ b->alloc,
+ buffer_pool->buffer_size,
hb_fifo_size(buffer_pool));
*/
hb_fifo_push( buffer_pool, b );
b->next = NULL;
f->size -= 1;
hb_unlock( f->lock );
-
+
return b;
}
{
hb_fifo_t * f = *_f;
hb_buffer_t * b;
-
+
hb_log( "fifo_close: trashing %d buffer(s)", hb_fifo_size( f ) );
while( ( b = hb_fifo_get( f ) ) )
{
hb_lock_t * state_lock;
hb_state_t state;
-
+
int paused;
hb_lock_t * pause_lock;
/* For MacGui active queue
increments each time the scan thread completes*/
int scanCount;
-
+
};
hb_work_object_t * hb_objects = NULL;
avcodec_init();
avcodec_register_all();
av_register_codec_parser( &mpegaudio_parser);
-
+
/* Start library thread */
hb_log( "hb_init: starting libhb thread" );
h->die = 0;
HB_NORMAL_PRIORITY );
return h;
-
+
/* Set the scan count to start at 0 */
//scan_count = 0;
}
h->main_thread = hb_thread_init( "libhb", thread_func, h,
HB_NORMAL_PRIORITY );
- hb_register( &hb_sync );
- hb_register( &hb_decmpeg2 );
- hb_register( &hb_decsub );
- hb_register( &hb_render );
- hb_register( &hb_encavcodec );
- hb_register( &hb_encxvid );
- hb_register( &hb_encx264 );
- hb_register( &hb_deca52 );
- hb_register( &hb_decdca );
- hb_register( &hb_decavcodec );
- hb_register( &hb_declpcm );
- hb_register( &hb_encfaac );
- hb_register( &hb_enclame );
- hb_register( &hb_encvorbis );
-
+ hb_register( &hb_sync );
+ hb_register( &hb_decmpeg2 );
+ hb_register( &hb_decsub );
+ hb_register( &hb_render );
+ hb_register( &hb_encavcodec );
+ hb_register( &hb_encxvid );
+ hb_register( &hb_encx264 );
+ hb_register( &hb_deca52 );
+ hb_register( &hb_decdca );
+ hb_register( &hb_decavcodec );
+ hb_register( &hb_declpcm );
+ hb_register( &hb_encfaac );
+ hb_register( &hb_enclame );
+ hb_register( &hb_encvorbis );
+
return h;
}
hb_list_rem( h->list_title, title );
hb_title_close( &title );
}
-
+
hb_log( "hb_scan: path=%s, title_index=%d", path, title_index );
h->scan_thread = hb_scan_init( h, path, title_index, h->list_title );
}
- Allows users to set the width
- Handles ITU pixel aspects
*/
-
+
/* Set up some variables to make the math easier to follow. */
hb_title_t * title = job->title;
int cropped_width = title->width - job->crop[2] - job->crop[3] ;
int cropped_height = title->height - job->crop[0] - job->crop[1] ;
- int storage_aspect = cropped_width * 10000 / cropped_height;
+ int storage_aspect = cropped_width * 10000 / cropped_height;
int width = job->width;
int height; // Gets set later, ignore user value
int mod = job->modulus;
is bigger than the max. If so, set it to the max (this is sloppy).
If not, set job height to job width divided by storage aspect.
*/
-
+
if ( job->maxWidth && (job->maxWidth < job->width) )
width = job->maxWidth;
-
+
if ( job->maxHeight && (job->maxHeight < (width / storage_aspect * 10000)) )
{
height = job->maxHeight;
{
height = width * 10000 / storage_aspect;
}
-
-
+
+
/* Time to get picture dimensions that divide cleanly.
These variables will store temporary dimensions as we iterate. */
int i, w, h;
mod = job->modulus;
else
mod = 16;
-
+
/* Iterate through multiples of mod to find one close to job->width. */
for( i = 1;; i++ )
{
w = mod * i;
-
+
if (w < width)
{
if ( ( width - w ) <= ( mod / 2 ) )
}
}
width = mod * (i);
-
+
/* Now do the same for a mod-friendly value near job->height. */
for( i = 1;; i++)
{
h = i * mod;
-
+
if (h < height)
{
if ( ( height - h ) <= ( mod / 2 ))
if (h == height)
/* Mod 16 dimensions, how nice! */
break;
-
+
if ( h > height)
{
if ( ( h - height ) < ( mod / 2 ))
}
}
height = mod * (i);
-
+
int pixel_aspect_width = job->pixel_aspect_width;
int pixel_aspect_height = job->pixel_aspect_height;
-
+
if (cropped_width <= 706)
{
/* Handle ITU PARs */
/* Figure out what dimensions the source would display at. */
int source_display_width = cropped_width * ((float)pixel_aspect_width / (float)pixel_aspect_height) ;
-
+
/* The film AR is the source's display width / cropped source height.
The output display width is the output height * film AR.
The output PAR is the output display width / output storage width. */
pixel_aspect_width = height * source_display_width / cropped_height;
pixel_aspect_height = width;
-
+
/* While x264 is smart enough to reduce fractions on its own, libavcodec
needs some help with the math, so lose superfluous factors. */
hb_reduce( &pixel_aspect_width, &pixel_aspect_height,
pixel_aspect_width, pixel_aspect_height );
-
+
/* Pass the results back to the caller */
*output_width = width;
*output_height = height;
memset( audio_lang, 0, sizeof( audio_lang ) );
if ( job->indepth_scan || job->native_language ) {
-
+
/*
* Find the first audio language that is being encoded
*/
* In all cases switch the language if we need to to our native
* language.
*/
- if( job->native_language )
+ if( job->native_language )
{
- if( strncasecmp( job->native_language, audio_lang,
+ if( strncasecmp( job->native_language, audio_lang,
sizeof( audio_lang ) ) != 0 )
- {
-
+ {
+
if( job->pass != 2 )
{
hb_log( "Enabled subtitles in native language '%s', audio is in '%s'",
* If doing a subtitle scan then add all the matching subtitles for this
* language.
*/
- if ( job->indepth_scan )
+ if ( job->indepth_scan )
{
- for( i=0; i < hb_list_count( title->list_subtitle ); i++ )
+ for( i=0; i < hb_list_count( title->list_subtitle ); i++ )
{
subtitle = hb_list_item( title->list_subtitle, i );
- if( strcmp( subtitle->iso639_2, audio_lang ) == 0 )
+ if( strcmp( subtitle->iso639_2, audio_lang ) == 0 )
{
/*
* Matched subtitle language with audio language, so
/*
* Definitely not doing a subtitle scan.
*/
- if( job->pass != 1 && job->native_language )
+ if( job->pass != 1 && job->native_language )
{
/*
* We are not doing a subtitle scan but do want the
- * native langauge subtitle selected, so select it
+ * native langauge subtitle selected, so select it
* for pass 0 or pass 2 of a two pass.
*/
- for( i=0; i < hb_list_count( title->list_subtitle ); i++ )
+ for( i=0; i < hb_list_count( title->list_subtitle ); i++ )
{
subtitle = hb_list_item( title->list_subtitle, i );
- if( strcmp( subtitle->iso639_2, audio_lang ) == 0 )
+ if( strcmp( subtitle->iso639_2, audio_lang ) == 0 )
{
/*
* Matched subtitle language with audio language, so
* bother adding them for pass 0 or pass 2 of a two
* pass.
*/
- if( job->pass != 1 )
+ if( job->pass != 1 )
{
if( ( subtitle = hb_list_item( title->list_subtitle, job->subtitle ) ) )
{
{
int i;
int filter_count = hb_list_count( job->filters );
- job_copy->filters = hb_list_init();
+ job_copy->filters = hb_list_init();
for( i = 0; i < filter_count; i++ )
{
/*
if( filter->settings )
filter_copy->settings = strdup( filter->settings );
hb_list_add( job_copy->filters, filter_copy );
- }
+ }
}
-
+
/* Add the job to the list */
hb_list_add( h->jobs, job_copy );
h->job_count = hb_count(h);
void hb_rem( hb_handle_t * h, hb_job_t * job )
{
hb_list_rem( h->jobs, job );
-
+
h->job_count = hb_count(h);
if (h->job_count_permanent)
h->job_count_permanent--;
hb_lock( h->state_lock );
h->state.state = HB_STATE_WORKDONE;
h->state.param.workdone.error = h->work_error;
-
+
h->job_count = hb_count(h);
if (h->job_count < 1)
h->job_count_permanent = 0;
/* XXX Hack */
if (h->job_count < 1)
h->job_count_permanent = 1;
-
+
h->state.param.working.job_cur =
h->job_count_permanent - hb_list_count( h->jobs );
h->state.param.working.job_count = h->job_count_permanent;
-
+
// Set which job is being worked on
if (h->current_job)
h->state.param.working.sequence_id = h->current_job->sequence_id;
#define HB_DEBUG_ALL 1
void hb_register( hb_work_object_t * );
hb_handle_t * hb_init_real( int verbose, int update_check );
-hb_handle_t * hb_init_dl ( int verbose, int update_check ); // hb_init for use with dylib
+hb_handle_t * hb_init_dl ( int verbose, int update_check ); // hb_init for use with dylib
#define hb_init(v,u) \
hb_init_real( v, u ); \
Look at test/test.c to see how to use it. */
void hb_get_state( hb_handle_t *, hb_state_t * );
void hb_get_state2( hb_handle_t *, hb_state_t * );
-/* hb_get_scancount() is called by the MacGui in UpdateUI to
+/* hb_get_scancount() is called by the MacGui in UpdateUI to
check for a new scan during HB_STATE_WORKING phase */
int hb_get_scancount( hb_handle_t * );
hb_buffer_t * hb_buffer_init( int size );
void hb_buffer_realloc( hb_buffer_t *, int size );
void hb_buffer_close( hb_buffer_t ** );
-void hb_buffer_copy_settings( hb_buffer_t * dst,
+void hb_buffer_copy_settings( hb_buffer_t * dst,
const hb_buffer_t * src );
hb_fifo_t * hb_fifo_init();
uint8_t headers[3][HB_CONFIG_MAX_SIZE];
char *language;
} vorbis;
-
+
struct
{
/* ac3flags stores the flags from the AC3 source, as found in scan.c */
iso639_lang_t * lang_for_english( const char * english )
{
iso639_lang_t * lang;
-
+
for( lang = (iso639_lang_t*) languages; lang->eng_name; lang++ )
{
if( !strcmp( lang->eng_name, english ) )
static void WriteInt8( FILE * file, uint8_t val )
{
fputc( val, file );
-}
+}
static void WriteInt16( FILE * file, uint16_t val )
{
/* Video track */
mux_data = calloc( sizeof( hb_mux_data_t ), 1 );
job->mux_data = mux_data;
-
+
#define h mux_data->header
/* Video stream header */
h.FourCC = FOURCC( "strh" );
for( i = 0; i < audio_count; i++ )
{
char fourcc[4] = "00wb";
-
+
audio = hb_list_item( title->list_audio, i );
mux_data = audio->mux_data;
WriteInt8( m->file, 0 );
}
- /* Update headers */
+ /* Update headers */
m->size += 8 + EVEN( buf->size );
mux_data->header.Length++;
- /* RIFF size */
+ /* RIFF size */
fseek( m->file, 4, SEEK_SET );
WriteInt32( m->file, 2052 + m->size );
hb_error("muxmkv: Unknown audio codec: %x", job->acodec);
return 0;
}
-
+
if (default_track_flag)
{
track->flagDefault = 1;
/* Cumulated durations so far, in timescale units (see MP4Mux) */
uint64_t sum_dur;
-
+
/* Chapter state information for muxing */
MP4TrackId chapter_track;
int current_chapter;
struct hb_text_sample_s *sample = NULL;
int stringLength = strlen(textString);
int x;
-
+
if( stringLength < 1024 )
{
sample = malloc( sizeof( struct hb_text_sample_s ) );
- //textLength = (stringLength; // Account for BOM
+ //textLength = (stringLength; // Account for BOM
sample->length = stringLength + 2 + 12; // Account for text length code and other marker
sample->duration = (MP4Duration)duration;
-
+
// 2-byte length marker
sample->sample[0] = (stringLength >> 8) & 0xff;
sample->sample[1] = stringLength & 0xff;
-
+
strncpy( (char *)&(sample->sample[2]), textString, stringLength );
-
+
x = 2 + stringLength;
// Modifier Length Marker
sample->sample[x+1] = 0x00;
sample->sample[x+2] = 0x00;
sample->sample[x+3] = 0x0C;
-
+
// Modifier Type Code
sample->sample[x+4] = 'e';
sample->sample[x+5] = 'n';
sample->sample[x+6] = 'c';
sample->sample[x+7] = 'd';
-
+
// Modifier Value
sample->sample[x+8] = 0x00;
sample->sample[x+9] = 0x00;
sample->sample[x+10] = (256 >> 8) & 0xff;
sample->sample[x+11] = 256 & 0xff;
}
-
+
return sample;
}
-
+
/**********************************************************************
* MP4GenerateChapterSample
**********************************************************************
hb_chapter_t *chapter_data = hb_list_item( m->job->title->list_chapter, chapter - 1 );
char tmp_buffer[1024];
char *string = tmp_buffer;
-
+
tmp_buffer[0] = '\0';
-
+
if( chapter_data != NULL )
{
string = chapter_data->title;
}
-
+
if( strlen(string) == 0 || strlen(string) >= 1024 )
{
snprintf( tmp_buffer, 1023, "Chapter %03i", chapter );
string = tmp_buffer;
}
-
+
return MP4CreateTextSample( string, duration );
}
-
+
/**********************************************************************
* MP4Init
**********************************************************************
{
hb_job_t * job = m->job;
hb_title_t * title = job->title;
-
+
hb_audio_t * audio;
hb_mux_data_t * mux_data;
int i;
u_int16_t language_code;
-
+
/* Flags for enabling/disabling tracks in an MP4. */
typedef enum { TRACK_DISABLED = 0x0, TRACK_ENABLED = 0x1, TRACK_IN_MOVIE = 0x2, TRACK_IN_PREVIEW = 0x4, TRACK_IN_POSTER = 0x8} track_header_flags;
-
+
/* Create an empty mp4 file */
if (job->largeFileSize)
/* Use 64-bit MP4 file */
{
- m->file = MP4Create( job->file, MP4_DETAILS_ERROR, MP4_CREATE_64BIT_DATA );
+ m->file = MP4Create( job->file, MP4_DETAILS_ERROR, MP4_CREATE_64BIT_DATA );
hb_log("Using 64-bit MP4 formatting.");
}
else
{
m->file = MP4Create( job->file, MP4_DETAILS_ERROR, 0 );
}
-
+
if (m->file == MP4_INVALID_FILE_HANDLE)
{
hb_error("muxmp4.c: MP4Create failed!");
job->config.h264.sps[2], /* profile_compat */
job->config.h264.sps[3], /* AVCLevelIndication */
3 ); /* 4 bytes length before each NAL unit */
-
+
MP4AddH264SequenceParameterSet( m->file, mux_data->track,
job->config.h264.sps, job->config.h264.sps_length );
*job->die = 1;
return 0;
}
-
+
/* VOL from FFmpeg or XviD */
if (!(MP4SetTrackESConfiguration( m->file, mux_data->track,
width = job->pixel_aspect_width;
height = job->pixel_aspect_height;
-
+
MP4AddPixelAspectRatio(m->file, mux_data->track, (uint32_t)width, (uint32_t)height);
-
+
MP4SetTrackFloatProperty(m->file, mux_data->track, "tkhd.width", job->width * (width / height));
}
for( i = 0; i < hb_list_count( title->list_audio ); i++ )
{
static u_int8_t reserved2[16] = {
- 0x00, 0x00, 0x00, 0x00,
- 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
0x00, 0x02, 0x00, 0x10,
- 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00,
};
-
+
audio = hb_list_item( title->list_audio, i );
mux_data = malloc( sizeof( hb_mux_data_t ) );
audio->mux_data = mux_data;
if( job->acodec & HB_ACODEC_AC3 ||
job->audio_mixdowns[i] == HB_AMIXDOWN_AC3 )
{
- mux_data->track = MP4AddAC3AudioTrack(
+ mux_data->track = MP4AddAC3AudioTrack(
m->file,
- job->arate, 1536, MP4_MPEG4_AUDIO_TYPE );
- MP4SetTrackBytesProperty(
+ job->arate, 1536, MP4_MPEG4_AUDIO_TYPE );
+ MP4SetTrackBytesProperty(
m->file, mux_data->track,
- "udta.name.value",
+ "udta.name.value",
(const u_int8_t*)"Surround", strlen("Surround"));
} else {
- mux_data->track = MP4AddAudioTrack(
+ mux_data->track = MP4AddAudioTrack(
m->file,
job->arate, 1024, MP4_MPEG4_AUDIO_TYPE );
- MP4SetTrackBytesProperty(
+ MP4SetTrackBytesProperty(
m->file, mux_data->track,
- "udta.name.value",
+ "udta.name.value",
(const u_int8_t*)"Stereo", strlen("Stereo"));
-
+
MP4SetAudioProfileLevel( m->file, 0x0F );
- MP4SetTrackESConfiguration(
+ MP4SetTrackESConfiguration(
m->file, mux_data->track,
audio->config.aac.bytes, audio->config.aac.length );
language_code |= audio->iso639_2[1] - 0x60; language_code <<= 5;
language_code |= audio->iso639_2[2] - 0x60;
MP4SetTrackIntegerProperty(m->file, mux_data->track, "mdia.mdhd.language", language_code);
-
+
/* Set the audio track alternate group */
MP4SetTrackIntegerProperty(m->file, mux_data->track, "tkhd.alternate_group", 1);
-
+
/* If we ever upgrade mpeg4ip, the line above should be replaced with the line below.*/
// MP4SetTrackIntegerProperty(m->file, mux_data->track, "mdia.minf.stbl.stsd.mp4a.channels", (u_int16_t)HB_AMIXDOWN_GET_DISCRETE_CHANNEL_COUNT(audio->amixdown));
-
+
/* store a reference to the first audio track,
so we can use it to feed the chapter text track's sample rate */
if (i == 0) {
firstAudioTrack = mux_data->track;
-
+
/* Enable the first audio track */
MP4SetTrackIntegerProperty(m->file, mux_data->track, "tkhd.flags", (TRACK_ENABLED | TRACK_IN_MOVIE));
}
MP4SetTrackIntegerProperty(m->file, mux_data->track, "tkhd.flags", (TRACK_DISABLED | TRACK_IN_MOVIE));
hb_log("Disabled extra audio track %i", mux_data->track-1);
}
-
+
}
- if (job->chapter_markers)
+ if (job->chapter_markers)
{
/* add a text track for the chapters */
MP4TrackId textTrack;
textTrack = MP4AddChapterTextTrack(m->file, firstAudioTrack);
-
+
m->chapter_track = textTrack;
m->chapter_duration = 0;
m->current_chapter = job->chapter_start;
}
-
+
/* Add encoded-by metadata listing version and build date */
char *tool_string;
tool_string = (char *)malloc(80);
snprintf( tool_string, 80, "HandBrake %s %i", HB_VERSION, HB_BUILD);
MP4SetMetadataTool(m->file, tool_string);
free(tool_string);
-
+
return 0;
}
uint64_t duration;
if( mux_data == job->mux_data )
- {
+ {
/* Add the sample before the new frame.
It is important that this be calculated prior to the duration
of the new video sample, as we want to sync to right after it.
}
sample = MP4GenerateChapterSample( m, duration );
-
- if( !MP4WriteSample(m->file,
- m->chapter_track,
- sample->sample,
- sample->length,
- sample->duration,
+
+ if( !MP4WriteSample(m->file,
+ m->chapter_track,
+ sample->sample,
+ sample->length,
+ sample->duration,
0, true) )
{
hb_error("Failed to write to output file, disk full?");
m->current_chapter++;
m->chapter_duration += duration;
}
-
+
/* Video */
/* Because we use the audio samplerate as the timescale,
we have to use potentially variable durations so the video
duration = MP4_INVALID_DURATION;
}
- /* Here's where the sample actually gets muxed.
+ /* Here's where the sample actually gets muxed.
If it's an audio sample, don't offset the sample's playback.
If it's a video sample and there are no b-frames, ditto.
If there are b-frames, offset by the initDelay plus the
difference between the presentation time stamp x264 gives
and the decoding time stamp from the buffer data. */
- if( !MP4WriteSample( m->file,
- mux_data->track,
- buf->data,
+ if( !MP4WriteSample( m->file,
+ mux_data->track,
+ buf->data,
buf->size,
- duration,
- ((mux_data->track != 1) ||
- (job->areBframes==0) ||
+ duration,
+ ((mux_data->track != 1) ||
+ (job->areBframes==0) ||
(job->vcodec != HB_VCODEC_X264)) ? 0 : ( buf->renderOffset * job->arate / 90000),
((buf->frametype & HB_FRAME_KEY) != 0) ) )
{
- hb_error("Failed to write to output file, disk full?");
+ hb_error("Failed to write to output file, disk full?");
*job->die = 1;
}
-
+
return 0;
}
static int MP4End( hb_mux_object_t * m )
-{
+{
hb_job_t * job = m->job;
/* Write our final chapter marker */
if( m->job->chapter_markers )
{
struct hb_text_sample_s *sample = MP4GenerateChapterSample( m, (m->sum_dur - m->chapter_duration) );
-
- if( !MP4WriteSample(m->file,
- m->chapter_track,
- sample->sample,
- sample->length,
- sample->duration,
+
+ if( !MP4WriteSample(m->file,
+ m->chapter_track,
+ sample->sample,
+ sample->length,
+ sample->duration,
0, true) )
{
- hb_error("Failed to write to output file, disk full?");
+ hb_error("Failed to write to output file, disk full?");
*job->die = 1;
}
free(sample);
}
-
+
if (job->areBframes)
{
// Insert track edit to get A/V back in sync. The edit amount is
{
int codec;
ogg_stream_state os;
- int i_packet_no;
+ int i_packet_no;
};
typedef struct __attribute__((__packed__))
}
}
return 0;
-}
+}
/**********************************************************************
* OGMInit
{
hb_job_t * job = m->job;
hb_title_t * title = job->title;
-
+
hb_audio_t * audio;
hb_mux_data_t * mux_data;
int i;
SetDWLE( &h.i_buffer_size, 30 * 1024 );
SetWLE ( &h.i_bits_per_sample, 0 );
- SetDWLE( &h.header.audio.i_channels, 2 );
+ SetDWLE( &h.header.audio.i_channels, 2 );
SetDWLE( &h.header.audio.i_block_align, 0 );
SetDWLE( &h.header.audio.i_avgbytespersec,
job->abitrate / 8 );
}
free( op.packet );
}
- return 0;
+ return 0;
}
static int OGMEnd( hb_mux_object_t * m )
return -1;
}
ogg_stream_clear( &mux_data->os );
-
+
for( i = 0; i < hb_list_count( title->list_audio ); i++ )
{
audio = hb_list_item( title->list_audio, i );
fclose( m->file );
hb_log( "muxogm: `%s' closed", job->file );
-
+
return 0;
}
Homepage: <http://handbrake.m0k.org/>.
It may be used under the terms of the GNU General Public License. */
-#include <time.h>
+#include <time.h>
#include <sys/time.h>
#if defined( SYS_BEOS )
hb_get_tempory_directory( h, name );
strcat( name, "/" );
-
+
va_start( args, fmt );
vsnprintf( &name[strlen(name)], 1024 - strlen(name), fmt, args );
va_end( args );
#if defined( SYS_BEOS )
long exit_value;
wait_for_thread( t->thread, &exit_value );
-
+
#elif USE_PTHREAD
pthread_join( t->thread, NULL );
//#elif defined( SYS_CYGWIN )
// WaitForSingleObject( t->thread, INFINITE );
#endif
-
+
hb_log( "thread %x joined (\"%s\")",
t->thread, t->name );
hb_lock_close( &t->lock );
free( t->name );
free( t );
- *_t = NULL;
+ *_t = NULL;
}
/************************************************************************
free( n );
return NULL;
}
-
+
return n;
}
hb_dvd_t * dvd;
hb_buffer_t * ps;
hb_stream_t * stream;
-
+
uint sequence;
} hb_reader_t;
r->title = job->title;
r->die = job->die;
r->sequence = 0;
-
+
return hb_thread_init( "reader", ReaderFunc, r,
HB_NORMAL_PRIORITY );
}
return;
}
}
-
+
list = hb_list_init();
r->ps = hb_buffer_init( HB_DVD_READ_BUFFER_SIZE );
chapter = hb_dvd_chapter( r->dvd );
else if (r->stream)
chapter = 1;
-
+
if( chapter < 0 )
{
hb_log( "reader: end of the title reached" );
if( p.progress > 1.0 )
{
p.progress = 1.0;
- }
+ }
p.rate_avg = 0.0;
p.hours = -1;
p.minutes = -1;
hb_list_rem( list, buf );
fifos = GetFifoForId( r->job, buf->id );
if( fifos )
- {
+ {
buf->sequence = r->sequence++;
- for( n = 0; fifos[n] != NULL; n++)
+ for( n = 0; fifos[n] != NULL; n++)
{
if( n != 0 )
{
hb_fifo_is_full( fifos[n] ) )
{
/*
- * Loop until the incoming fifo is reaqdy to receive
+ * Loop until the incoming fifo is reaqdy to receive
* this buffer.
*/
hb_snooze( 50 );
{
hb_stream_close(&r->stream);
}
-
+
free( r );
_r = NULL;
if( id == 0xE0 )
{
- if( job->indepth_scan )
+ if( job->indepth_scan )
{
/*
* Ditch the video here during the indepth scan until
* we can improve the MPEG2 decode performance.
*/
return NULL;
- }
- else
+ }
+ else
{
fifos[0] = job->fifo_mpeg2;
return fifos;
subtitle->hits++;
if( job->subtitle_force )
{
-
+
fifos[0] = subtitle->fifo_in;
return fifos;
}
return fifos;
}
}
- if( !job->indepth_scan )
+ if( !job->indepth_scan )
{
n = 0;
for( i = 0; i < hb_list_count( title->list_audio ); i++ )
struct SwsContext * context;
AVPicture pic_tmp_in;
AVPicture pic_tmp_crop;
- AVPicture pic_tmp_out;
+ AVPicture pic_tmp_out;
hb_buffer_t * buf_scale;
hb_fifo_t * subtitle_queue;
hb_fifo_t * delay_queue;
void renderClose( hb_work_object_t * );
hb_work_object_t hb_render =
-{
+{
WORK_RENDER,
"Renderer",
renderInit,
static uint8_t *getV(uint8_t *data, int width, int height, int x, int y)
{
- return(&data[(((y/2) * (width/2)) + (x/2)) + (width*height) +
+ return(&data[(((y/2) * (width/2)) + (x/2)) + (width*height) +
(width*height)/4]);
}
{
return;
}
-
- /*
+
+ /*
* If necessary, move the subtitle so it is not in a cropped zone.
* When it won't fit, we center it so we lose as much on both ends.
- * Otherwise we try to leave a 20px or 2% margin around it.
+ * Otherwise we try to leave a 20px or 2% margin around it.
*/
- margin_top = ( ( title->height - job->crop[0] - job->crop[1] ) *
+ margin_top = ( ( title->height - job->crop[0] - job->crop[1] ) *
margin_percent ) / 100;
if( margin_top > 20 )
margin_top = 20;
}
- if( sub->height > title->height - job->crop[0] - job->crop[1] -
+ if( sub->height > title->height - job->crop[0] - job->crop[1] -
( margin_top * 2 ) )
{
/*
* Merge the luminance and alpha with the picture
*/
out[j] = ( (uint16_t) out[j] * ( 16 - (uint16_t) alpha[j] ) +
- (uint16_t) lum[j] * (uint16_t) alpha[j] ) >> 4;
+ (uint16_t) lum[j] * (uint16_t) alpha[j] ) >> 4;
/*
* Set the chroma (colour) based on whether there is
* any alpha at all. Don't try to blend with the picture.
*/
chromaU = getU(buf->data, title->width, title->height,
offset_left+j, offset_top+i);
-
+
chromaV = getV(buf->data, title->width, title->height,
offset_left+j, offset_top+i);
-
+
if( alpha[j] > 0 )
{
/*
- * Add the chroma from the sub-picture, as this is
+ * Add the chroma from the sub-picture, as this is
* not a transparent element.
*/
*chromaU = sub_chromaU[j];
*chromaV = sub_chromaV[j];
- }
+ }
}
}
}
hb_title_t * title = job->title;
hb_buffer_t * in = *buf_in, * buf_tmp_in = *buf_in;
hb_buffer_t * ivtc_buffer = NULL;
-
+
if(!in->data)
{
/* If the input buffer is end of stream, send out an empty one
* use the subtitles.
*/
if( job->indepth_scan )
- {
+ {
*buf_out = NULL;
return HB_WORK_OK;
}
-
+
/* Push subtitles onto queue just in case we need to delay a frame */
if( in->sub )
{
}
/* Setup render buffer */
- hb_buffer_t * buf_render = hb_buffer_init( 3 * job->width * job->height / 2 );
-
+ hb_buffer_t * buf_render = hb_buffer_init( 3 * job->width * job->height / 2 );
+
/* Apply filters */
if( job->filters )
{
int filter_count = hb_list_count( job->filters );
int i;
-
+
for( i = 0; i < filter_count; i++ )
{
hb_filter_object_t * filter = hb_list_item( job->filters, i );
-
+
if( !filter )
{
continue;
- }
-
+ }
+
hb_buffer_t * buf_tmp_out = NULL;
-
+
int result = filter->work( buf_tmp_in,
- &buf_tmp_out,
- PIX_FMT_YUV420P,
- title->width,
- title->height,
+ &buf_tmp_out,
+ PIX_FMT_YUV420P,
+ title->width,
+ title->height,
filter->private_data );
-
- /*
- * FILTER_OK: set temp buffer to filter buffer, continue
- * FILTER_DELAY: set temp buffer to NULL, abort
- * FILTER_DROP: set temp buffer to NULL, pop subtitle, abort
- * FILTER_FAILED: leave temp buffer alone, continue
+
+ /*
+ * FILTER_OK: set temp buffer to filter buffer, continue
+ * FILTER_DELAY: set temp buffer to NULL, abort
+ * FILTER_DROP: set temp buffer to NULL, pop subtitle, abort
+ * FILTER_FAILED: leave temp buffer alone, continue
*/
if( result == FILTER_OK )
{
{
buf_tmp_in = NULL;
break;
- }
+ }
else if( result == FILTER_DROP )
{
if( job->vfr )
pv->lost_time[1] += (temp_duration / 4);
pv->lost_time[2] += (temp_duration / 4);
pv->lost_time[3] += ( temp_duration - (temp_duration / 4) - (temp_duration / 4) - (temp_duration / 4) );
-
+
pv->total_lost_time += temp_duration;
pv->dropped_frames++;
-
- hb_fifo_get( pv->subtitle_queue );
+
+ hb_fifo_get( pv->subtitle_queue );
buf_tmp_in = NULL;
}
else
break;
}
}
- }
+ }
if( buf_tmp_in )
{
/* Cache frame start and stop times, so we can renumber
- time stamps if dropping frames for VFR. */
+ time stamps if dropping frames for VFR. */
int i;
for( i = 3; i >= 1; i-- )
{
pv->last_start[i] = pv->last_start[i-1];
pv->last_stop[i] = pv->last_stop[i-1];
}
-
+
/* In order to make sure we have continuous time stamps, store
the current frame's duration as starting when the last one stopped. */
pv->last_start[0] = pv->last_stop[1];
pv->last_stop[0] = pv->last_start[0] + (in->stop - in->start);
}
-
+
/* Apply subtitles */
if( buf_tmp_in )
{
- hb_buffer_t * subtitles = hb_fifo_get( pv->subtitle_queue );
+ hb_buffer_t * subtitles = hb_fifo_get( pv->subtitle_queue );
if( subtitles )
{
ApplySub( job, buf_tmp_in, &subtitles );
}
}
-
+
/* Apply crop/scale if specified */
if( buf_tmp_in && pv->context )
{
- avpicture_fill( &pv->pic_tmp_in, buf_tmp_in->data,
+ avpicture_fill( &pv->pic_tmp_in, buf_tmp_in->data,
PIX_FMT_YUV420P,
title->width, title->height );
-
- avpicture_fill( &pv->pic_tmp_out, buf_render->data,
+
+ avpicture_fill( &pv->pic_tmp_out, buf_render->data,
PIX_FMT_YUV420P,
job->width, job->height );
pv->pic_tmp_crop.data, pv->pic_tmp_crop.linesize,
0, title->height - (job->crop[0] + job->crop[1]),
pv->pic_tmp_out.data, pv->pic_tmp_out.linesize);
-
+
hb_buffer_copy_settings( buf_render, buf_tmp_in );
-
+
buf_tmp_in = buf_render;
- }
+ }
/* Set output to render buffer */
(*buf_out) = buf_render;
{
hb_buffer_close( buf_in );
*buf_in = NULL;
- }
+ }
if( buf_out && *buf_out )
{
- hb_buffer_close( buf_out );
+ hb_buffer_close( buf_out );
*buf_out = NULL;
}
}
else if( buf_tmp_in != buf_render )
- {
+ {
/* Copy temporary results and settings into render buffer */
memcpy( buf_render->data, buf_tmp_in->data, buf_render->size );
hb_buffer_copy_settings( buf_render, buf_tmp_in );
}
-
+
if (*buf_out && job->vfr)
{
hb_fifo_push( pv->delay_queue, *buf_out );
- *buf_out = NULL;
+ *buf_out = NULL;
}
/*
* Keep the last three frames in our queue, this ensures that we have the last
* two always in there should we need to rewrite the durations on them.
*/
-
+
if( job->vfr )
{
if( hb_fifo_size( pv->delay_queue ) >= 3 )
* ones you need a 2 frame delay between
* reading input and writing output.
*/
-
+
/* We want to extend the outputted frame's duration by the value
stored in the 4th slot of the lost_time array. Because we need
to adjust all the values in the array so they're contiguous,
extend the duration inside the array first, before applying
it to the current frame buffer. */
pv->last_stop[3] += pv->lost_time[3];
-
+
/* Log how much time has been added back in to the video. */
pv->total_gained_time += pv->lost_time[3];
-
+
/* We've pulled the 4th value from the lost_time array
and added it to the last_stop array's 4th slot. Now, rotate the
lost_time array so the 4th slot now holds the 3rd's value, and
pv->lost_time[i+1] = pv->lost_time[i];
}
pv->lost_time[0] = 0;
-
+
/* Log how many frames have had their durations extended. */
pv->extended_frames++;
}
-
+
/* We can't use the given time stamps. Previous frames
might already have been extended, throwing off the
raw values fed to render.c. Instead, their
lost time, it will have happened above. */
ivtc_buffer->start = pv->last_start[3];
ivtc_buffer->stop = pv->last_stop[3];
-
+
/* Set the 3rd cached frame to start when this one stops,
and so on down the line. If any of them need to be
extended as well to make up lost time, it'll be handled
on the next loop through the renderer. */
- int i;
+ int i;
for (i = 2; i >= 0; i--)
{
int temp_duration = pv->last_stop[i] - pv->last_start[i];
void renderClose( hb_work_object_t * w )
{
- hb_work_private_t * pv = w->private_data;
+ hb_work_private_t * pv = w->private_data;
hb_log("render: lost time: %lld (%i frames)", pv->total_lost_time, pv->dropped_frames);
hb_log("render: gained time: %lld (%i frames) (%lld not accounted for)", pv->total_gained_time, pv->extended_frames, pv->total_lost_time - pv->total_gained_time);
if (pv->dropped_frames)
hb_log("render: average dropped frame duration: %lld", (pv->total_lost_time / pv->dropped_frames) );
-
+
/* Cleanup subtitle queue */
if( pv->subtitle_queue )
{
hb_fifo_close( &pv->subtitle_queue );
}
-
+
if( pv->delay_queue )
{
hb_fifo_close( &pv->delay_queue );
}
-
+
/* Cleanup render work structure */
free( pv );
- w->private_data = NULL;
+ w->private_data = NULL;
}
int renderInit( hb_work_object_t * w, hb_job_t * job )
-{
+{
/* Allocate new private work object */
hb_work_private_t * pv = calloc( 1, sizeof( hb_work_private_t ) );
pv->job = job;
PIX_FMT_YUV420P,
job->width, job->height, PIX_FMT_YUV420P,
(uint16_t)(SWS_LANCZOS|SWS_ACCURATE_RND), NULL, NULL, NULL);
- }
-
+ }
+
/* Setup FIFO queue for subtitle cache */
- pv->subtitle_queue = hb_fifo_init( 8 );
+ pv->subtitle_queue = hb_fifo_init( 8 );
pv->delay_queue = hb_fifo_init( 8 );
/* VFR IVTC needs a bunch of time-keeping variables to track
{
int filter_count = hb_list_count( job->filters );
int i;
-
+
for( i = 0; i < filter_count; i++ )
{
hb_filter_object_t * filter = hb_list_item( job->filters, i );
if( !filter ) continue;
-
+
filter->private_data = filter->init( PIX_FMT_YUV420P,
title->width,
title->height,
filter->settings );
}
}
-
+
return 0;
}
typedef struct
{
hb_handle_t * h;
-
+
char * path;
int title_index;
hb_list_t * list_title;
-
+
hb_dvd_t * dvd;
hb_stream_t * stream;
-
+
} hb_scan_t;
static void ScanFunc( void * );
data->dvd = NULL;
data->stream = NULL;
-
+
/* Try to open the path as a DVD. If it fails, try as a file */
hb_log( "scan: trying to open with libdvdread" );
if( ( data->dvd = hb_dvd_init( data->path ) ) )
hb_list_rem( data->list_title, title );
continue;
}
-
+
if (data->stream)
{
// Stream based processing uses PID's to handle the different audio options for a given title
j++;
}
}
-
+
/* Make sure we found AC3 / DCA rates and bitrates */
for( j = 0; j < hb_list_count( title->list_audio ); )
{
audio->input_channel_layout = HB_INPUT_CH_LAYOUT_STEREO;
}
}
-
+
i++;
}
hb_list_t * list_es, * list_raw;
hb_libmpeg2_t * mpeg2;
int progressive_count = 0;
-
+
buf_ps = hb_buffer_init( HB_DVD_READ_BUFFER_SIZE );
list_es = hb_list_init();
list_raw = hb_list_init();
if (data->dvd)
hb_dvd_start( data->dvd, title->index, 1 );
-
+
for( i = 0; i < 10; i++ )
{
int j, k;
char filename[1024];
//hb_log("Seeking to: %f", (float) ( i + 1 ) / 11.0 );
-
+
if (data->dvd)
{
if( !hb_dvd_seek( data->dvd, (float) ( i + 1 ) / 11.0 ) )
goto error;
}
}
-
+
hb_log( "scan: preview %d", i + 1 );
mpeg2 = hb_libmpeg2_init();
int ar;
hb_libmpeg2_info( mpeg2, &title->width, &title->height,
&title->rate_base, &ar );
-
+
if( title->rate_base == 1126125 )
{
/* Frame FPS is 23.976 (meaning it's progressive), so
which means we should be conservative and use
29.97 as the title's FPS for now.
*/
- title->rate_base = 900900;
+ title->rate_base = 900900;
}
else
{
{
hb_log("Title's mostly progressive NTSC, setting fps to 23.976");
}
- title->rate_base = 1126125;
+ title->rate_base = 1126125;
}
- }
+ }
else if( title->rate_base == 900900 && progressive_count >= 6 )
{
/*
*/
title->rate_base = 1126125;
}
-
+
if( i == 2) // Use the third frame's info, so as to skip opening logos
{
// The aspect ratio may have already been set by parsing the VOB/IFO details on a DVD, however
#define Y buf_raw->data
#define DARK 64
-
+
/* Detect black borders */
-
+
for( j = 0; j < title->width; j++ )
{
for( k = 0; k < title->crop[0]; k++ )
}
for( j = 0; j < title->height; j++ )
{
- for( k = 0; k < title->crop[2]; k++ )
+ for( k = 0; k < title->crop[2]; k++ )
if( Y[ j * title->width + k ] > DARK )
{
title->crop[2] = k;
return ret;
}
-static void LookForAC3AndDCA( hb_title_t * title, hb_buffer_t * b )
+static void LookForAC3AndDCA( hb_title_t * title, hb_buffer_t * b )
{
int i;
int flags;
default:
audio->input_channel_layout = HB_INPUT_CH_LAYOUT_STEREO;
}
-
+
/* add in our own LFE flag if the source has LFE */
if (flags & A52_LFE)
{
}
break;
-
+
}
}
#define min(a, b) a < b ? a : b
typedef enum { hb_stream_type_unknown = 0, hb_stream_type_transport, hb_stream_type_program } hb_stream_type_t;
-
+
#define kMaxNumberDecodeStreams 8
#define kMaxNumberVideoPIDS 16
#define kMaxNumberAudioPIDS 16
char * path;
FILE * file_handle;
hb_stream_type_t stream_type;
-
+
int ps_current_write_buffer_index;
int ps_current_read_buffer_index;
int write_pos;
unsigned char * data;
} ps_decode_buffer[kNumDecodeBuffers];
-
+
struct {
int lang_code;
int flags;
int rate;
int bitrate;
} a52_info[kMaxNumberAudioPIDS];
-
+
int ts_video_pids[kMaxNumberVideoPIDS];
int ts_audio_pids[kMaxNumberAudioPIDS];
-
+
int ts_number_video_pids;
int ts_number_audio_pids;
-
+
unsigned char* ts_packetbuf[kMaxNumberDecodeStreams];
int ts_packetpos[kMaxNumberDecodeStreams];
// int ts_bufpackets[kMaxNumberDecodeStreams];
int ts_streamcont[kMaxNumberDecodeStreams];
int ts_streamid[kMaxNumberDecodeStreams];
int ts_audio_stream_type[kMaxNumberAudioPIDS];
-
- struct
+
+ struct
{
unsigned short program_number;
unsigned short program_map_PID;
} pat_info[kMaxNumberPMTStreams];
int ts_number_pat_entries;
-
+
struct
{
int reading;
unsigned char *tablebuf;
unsigned int tablepos;
unsigned char current_continuity_counter;
-
+
int section_length;
int program_number;
unsigned int PCR_PID;
d->ps_decode_buffer[i].data = NULL;
}
}
-
+
for (i = 0; i < kMaxNumberDecodeStreams; i++)
{
if (d->ts_packetbuf[i])
char *dot_term = strrchr(aTitle->name, '.');
if (dot_term)
*dot_term = '\0';
-
+
// Height, width, rate and aspect ratio information is filled in when the previews are built
hb_stream_duration(stream, aTitle);
-
+
// One Chapter
hb_chapter_t * chapter;
chapter = calloc( sizeof( hb_chapter_t ), 1 );
chapter->minutes = aTitle->minutes;
chapter->seconds = aTitle->seconds;
hb_list_add( aTitle->list_chapter, chapter );
-
+
// Figure out how many audio streams we really have:
// - For transport streams, for each PID listed in the PMT (whether
// or not it was an audio stream type) read the bitstream until we
* of the video. This says that we want to compute the rate over relatively
* long segments to get a representative average but long segments increase
* the likelihood that we'll cross a piece boundary.
- *
+ *
* What we do is take time stamp samples at several places in the file
* (currently 16) then compute the average rate (i.e., ticks of video per
* byte of the file) for all pairs of samples (N^2 rates computed for N
}
else
{
- // Not quite enough data in the buffer - transfer what is present, fill the buffer and then
+ // Not quite enough data in the buffer - transfer what is present, fill the buffer and then
// transfer what's still needed.
int transfer_size = HB_DVD_READ_BUFFER_SIZE;
int amt_avail_to_transfer = src_stream->ps_decode_buffer[read_buffer_index].len - src_stream->ps_decode_buffer[read_buffer_index].read_pos;
src_stream->ps_decode_buffer[read_buffer_index].read_pos = 0;
src_stream->ps_decode_buffer[read_buffer_index].write_pos = 0;
src_stream->ps_decode_buffer[read_buffer_index].len = 0;
-
+
// Fill the buffer
hb_ts_stream_decode(src_stream);
-
+
// Decoding will almost certainly have changed the current read buffer index
read_buffer_index = src_stream->ps_current_read_buffer_index;
-
+
if (src_stream->ps_decode_buffer[read_buffer_index].len == 0)
{
hb_log("hb_stream_read - buffer after decode has zero length data");
return 0;
}
-
+
// Read the bit we still need
memcpy(b->data+amt_avail_to_transfer, src_stream->ps_decode_buffer[read_buffer_index].data + src_stream->ps_decode_buffer[read_buffer_index].read_pos,transfer_size);
src_stream->ps_decode_buffer[read_buffer_index].read_pos += transfer_size;
-
+
return 1;
- }
+ }
}
else
return 0;
new_pos = (off_t) ((double) (stream_size) * pos_ratio);
new_pos &=~ (HB_DVD_READ_BUFFER_SIZE - 1);
int r = fseeko(src_stream->file_handle, new_pos, SEEK_SET);
-
+
if (r == -1)
{
fseeko(src_stream->file_handle, cur_pos, SEEK_SET);
return 0;
}
-
+
if (src_stream->stream_type == hb_stream_type_transport)
{
// We need to drop the current decoder output and move
// forwards to the next transport stream packet.
hb_ts_stream_reset(src_stream);
}
-
+
// Now we must scan forwards for a valid start code (0x000001BA)
int done = 0;
hb_buffer_t *buf = hb_buffer_init(HB_DVD_READ_BUFFER_SIZE);
// start looking 20% into the file since there's occasionally no
// audio at the beginning (particularly for vobs).
hb_stream_seek(stream, 0.2f);
-
+
while (--blksleft >= 0 && hb_stream_read(stream, buf) == 1)
{
hb_buffer_t *es;
void hb_stream_update_audio(hb_stream_t *stream, hb_audio_t *audio)
{
iso639_lang_t *lang;
-
+
if (stream->stream_type == hb_stream_type_transport)
{
// Find the audio stream info for this PID. The stream index is
stream->a52_info[i].rate = 48000 /*Hz*/;
stream->a52_info[i].bitrate = 384000 /*Bps*/;
}
-
+
lang = lang_for_code(stream->a52_info[i].lang_code);
if (!audio->rate)
audio->rate = stream->a52_info[i].rate;
// XXX should try to get language code from the AC3 bitstream
lang = lang_for_code(0x0000);
}
-
+
if (!audio->input_channel_layout)
{
switch( audio->ac3flags & A52_CHANNEL_MASK )
audio->input_channel_layout = audio->input_channel_layout | HB_INPUT_CH_LAYOUT_HAS_LFE;
}
}
-
+
snprintf( audio->lang, sizeof( audio->lang ), "%s (%s)", strlen(lang->native_name) ? lang->native_name : lang->eng_name,
audio->codec == HB_ACODEC_AC3 ? "AC3" : ( audio->codec == HB_ACODEC_MPGA ? "MPEG" : ( audio->codec == HB_ACODEC_DCA ? "DTS" : "LPCM" ) ) );
snprintf( audio->lang_simple, sizeof( audio->lang_simple ), "%s", strlen(lang->native_name) ? lang->native_name : lang->eng_name );
else if (stream->stream_type == hb_stream_type_transport)
{
int read_buffer_index = stream->ps_current_read_buffer_index;
-
- // Transport streams are a little more tricky - so long as the
+
+ // Transport streams are a little more tricky - so long as the
// amount to back up is still within the current decode buffer
// we can just adjust the read pos.
if (stream->ps_decode_buffer[read_buffer_index].read_pos - i > 0)
*
**********************************************************************/
#define PS_DECODE_BUFFER_SIZE ( 1024 * 1024 * 4)
-
+
static void hb_ts_stream_init(hb_stream_t *stream)
{
// Output Program Stream
stream->ps_decode_buffer[i].len = 0;
stream->ps_decode_buffer[i].write_pos = 0;
}
-
+
for (i=0; i < kMaxNumberDecodeStreams; i++)
{
stream->ts_streamcont[i] = -1;
}
-
+
stream->ps_current_write_buffer_index = 0;
stream->ps_current_read_buffer_index = 1;
-
+
// Find the audio and video pids in the stream
hb_ts_stream_find_pids(stream);
-
+
for (i=0; i < stream->ts_number_video_pids; i++)
{
// In progress audio/video data during the transport stream -> program stream processing
stream->ts_packetbuf[i] = (unsigned char *) malloc(1024 * 1024);
stream->ts_streamid[i] = 0xE0; // Stream is Video
}
-
+
for (i = stream->ts_number_video_pids; i < stream->ts_number_video_pids + stream->ts_number_audio_pids; i++)
{
stream->ts_packetbuf[i] = (unsigned char *) malloc(1024 * 1024);
int pos = bitpos >> 3;
bitval = (bitbuf[pos] << 24) | (bitbuf[pos + 1] << 16) | (bitbuf[pos + 2] << 8) | bitbuf[pos + 3];
-
+
if (bits > 0)
{
val |= (bitval >> (32 - bits)) & bitmask[bits];
for (i = 0; i < program_info_length; i++)
{
descriptor_buf[i] = get_bits(8);
- }
-
+ }
+
int cur_pos = 9 /* data after the section length field*/ + program_info_length;
int done_reading_stream_types = 0;
while (!done_reading_stream_types)
unsigned int elementary_PID = get_bits(13);
get_bits(4);
unsigned int ES_info_length = get_bits(12);
-
+
int i=0;
unsigned char *ES_info_buf = (unsigned char *) malloc(ES_info_length);
for (i=0; i < ES_info_length; i++)
{
ES_info_buf[i] = get_bits(8);
}
-
+
if (stream_type == 0x02)
{
if (stream->ts_number_video_pids <= kMaxNumberVideoPIDS)
stream->ts_number_audio_pids++;
stream->ts_audio_pids[i] = elementary_PID;
stream->ts_audio_stream_type[i] = stream_type;
-
+
if (ES_info_length > 0)
{
decode_element_descriptors(stream, i, ES_info_buf, ES_info_length);
}
cur_pos += 5 /* stream header */ + ES_info_length;
-
+
free(ES_info_buf);
-
+
if (cur_pos >= section_length - 4 /* stop before the CRC */)
done_reading_stream_types = 1;
}
-
+
free(descriptor_buf);
return 1;
}
{
pointer_len = buf[4 + adapt_len] + 1;
stream->pmt_info.tablepos = 0;
- }
+ }
// Get Continuity Counter
int continuity_counter = buf[3] & 0x0f;
if (!start && (stream->pmt_info.current_continuity_counter + 1 != continuity_counter))
if (stream->pmt_info.reading && (amount_to_copy > 0))
{
stream->pmt_info.tablebuf = realloc(stream->pmt_info.tablebuf, stream->pmt_info.tablepos + amount_to_copy);
-
+
memcpy(stream->pmt_info.tablebuf + stream->pmt_info.tablepos, buf + 4 + adapt_len + pointer_len, amount_to_copy);
stream->pmt_info.tablepos += amount_to_copy;
}
{
unsigned char tablebuf[1024];
unsigned int tablepos = 0;
-
+
int reading = 0;
{
memcpy(tablebuf + tablepos, buf + 4 + adapt_len + 1, pointer_len - 1);
-
+
unsigned int pos = 0;
//while (pos < tablepos)
{
{
unsigned int pkt_program_num = get_bits(16);
stream->pat_info[stream->ts_number_pat_entries].program_number = pkt_program_num;
-
+
get_bits(3); // Reserved
if (pkt_program_num == 0)
{
stream->ps_current_write_buffer_index++;
if (stream->ps_current_write_buffer_index > kNumDecodeBuffers-1)
stream->ps_current_write_buffer_index = 0;
-
+
if ( (stream->ps_decode_buffer[stream->ps_current_write_buffer_index].len != 0) || (stream->ps_decode_buffer[stream->ps_current_write_buffer_index].write_pos != 0) )
{
hb_log("flushbuf - new buffer (index %d) has non zero length and write position !", stream->ps_current_write_buffer_index);
return 0;
}
-
+
stream->ps_current_read_buffer_index = old_write_index;
stream->ps_decode_buffer[stream->ps_current_read_buffer_index].read_pos = 0;
-
+
return 1;
}
int size = elsize;
if (elnum > 1)
size *= elnum;
-
+
int written = 0;
int current_write_index = stream->ps_current_write_buffer_index;
-
+
if (size <= stream->ps_decode_buffer[current_write_index].size - stream->ps_decode_buffer[current_write_index].write_pos)
{
memcpy(stream->ps_decode_buffer[current_write_index].data + stream->ps_decode_buffer[current_write_index].write_pos, buf, size);
{
// FLushing the buffer will have change the current write buffer
current_write_index = stream->ps_current_write_buffer_index;
-
+
memcpy(stream->ps_decode_buffer[current_write_index].data, (unsigned char*)buf + written, size - written);
stream->ps_decode_buffer[current_write_index].write_pos += size - written;
stream->ps_decode_buffer[current_write_index].len = stream->ps_decode_buffer[current_write_index].write_pos;
set_bits(0, 1); // PES_CRC_flag 1
set_bits(0, 1); // PES_extension_flag 1
set_bits(hdrlen, 8); // PES_header_data_length 8
-
+
if (PTS_DTS_flags == 2)
{
set_bits(2, 4); // '0010' 4
{
unsigned char ac3_substream_id[4];
int ac3len = 0;
-
+
if (write_ac3)
{
// Make a four byte DVD ac3 stream header
ac3_substream_id[3] = 0x02;
ac3len = 4;
}
-
+
int written = 0; // Bytes we've written to output file
int pos = 0; // Position in PES packet buffer
-
+
for (;;)
{
if ((stream->ps_decode_buffer[stream->ps_current_write_buffer_index].len % HB_DVD_READ_BUFFER_SIZE) != 0)
hb_log("write_output_stream - Failed to write output file!");
return 1;
}
-
+
// Write stuffing
int i=0;
for (i = 0; i < stuffing; i++) // Write any stuffing bytes
static void hb_ts_handle_mpeg_audio(hb_stream_t *stream, int curstream, unsigned char* buf, int adapt_len )
{
// Although we don't have AC3/A52 audio here we can still use the same structure to record this useful information.
-
+
stream->a52_info[curstream - stream->ts_number_video_pids].flags = A52_STEREO;
stream->a52_info[curstream - stream->ts_number_video_pids].rate = 48000 /*Hz*/;
stream->a52_info[curstream - stream->ts_number_video_pids].bitrate = 384000 /*Bps*/;
// Read the Transport Stream Packets (188 bytes each) looking at first for PID 0 (the PAT PID), then decode that
// to find the program map PID and then decode that to get the list of audio and video PIDs
-
+
int bytesReadInPacket = 0;
for (;;)
{
bytesReadInPacket += bytesRead;
hb_log("hb_ts_stream_find_pids - end of file");
- break;
+ break;
}
else
{
// Get pid
int pid = (((buf[1] & 0x1F) << 8) | buf[2]) & 0x1FFF;
-
+
if ((pid == 0x0000) && (stream->ts_number_pat_entries == 0))
{
decode_PAT(buf, stream);
continue;
}
-
+
int pat_index = 0;
for (pat_index = 0; pat_index < stream->ts_number_pat_entries; pat_index++)
{
// multiple programs in the same transport stream, and yet there's actually only one
// program really in the stream. This seems to be true for transport streams that
// originate in the HDHomeRun but have been output by EyeTV's export utility. What I think
- // is happening is that the HDHomeRun is sending the entire transport stream as broadcast,
- // but the EyeTV is only recording a single (selected) program number and not rewriting the
+ // is happening is that the HDHomeRun is sending the entire transport stream as broadcast,
+ // but the EyeTV is only recording a single (selected) program number and not rewriting the
// PAT info on export to match what's actually on the stream.
// Until we have a way of handling multiple programs per transport stream elegantly we'll match
// on the first pat entry for which we find a matching program map PID. The ideal solution would
if ((stream->ts_number_video_pids > 0) && (stream->ts_number_audio_pids > 0))
break;
}
-
+
hb_log("hb_ts_stream_find_pids - found the following PIDS");
hb_log(" Video PIDS : ");
int i=0;
int index_of_video_pid(int pid, hb_stream_t *stream)
{
int found_pid = -1, i = 0;
-
+
for (i = 0; (i < stream->ts_number_video_pids) && (found_pid < 0); i++)
{
if (pid == stream->ts_video_pids[i])
int index_of_pid(int pid, hb_stream_t *stream)
{
int found_pid = -1;
-
+
if ((found_pid = index_of_video_pid(pid, stream)) >= 0)
return found_pid;
-
+
if ((found_pid = index_of_audio_pid(pid, stream)) >= 0)
return found_pid;
-
+
return found_pid;
}
unsigned char buf[188];
int curstream;
int doing_iframe;
-
+
int i = 0;
for (i=0; i < stream->ts_number_video_pids + stream->ts_number_audio_pids; i++)
{
stream->ts_skipbad[i] = 0;
}
-
+
doing_iframe = 0;
-
+
if ((stream->ts_number_video_pids == 0) || (stream->ts_number_audio_pids == 0))
{
hb_log("hb_ts_stream_decode - no Video or Audio PID selected, cannot decode transport stream");
return;
}
-
+
int curr_write_buffer_index = stream->ps_current_write_buffer_index;
-
+
// Write output data until a buffer switch occurs.
while (curr_write_buffer_index == stream->ps_current_write_buffer_index)
{
// Check sync byte
if ((buf[0] != 0x47) && (buf[0] != 0x72) && (buf[0] != 0x29))
{
- // lost sync - back up to where we started then try to
+ // lost sync - back up to where we started then try to
// re-establish sync.
off_t pos = ftello(stream->file_handle) - 188;
off_t pos2 = align_to_next_packet(stream->file_handle);
}
else
curstream = index_of_selected_pid;
-
+
// Get start code
int start;
start = (buf[1] & 0x40) != 0;
-
+
if (!start && stream->ts_skipbad[curstream])
continue;
}
stream->ts_streamcont[curstream] = continuity;
}
-
+
// Get adaption header size
if (adaption == 0)
{
// Couldn't find an AC3 sync start in this packet.. don't make a PES packet!
if (!sync_found)
{
- adapt_len = 184;
+ adapt_len = 184;
start = 0;
}
}
stream->ts_foundfirst[curstream] |= 1;
}
}
-
+
// If we were skipping a bad packet, start fresh on this new PES packet..
if (stream->ts_skipbad[curstream] == 1)
{
write_ac3 = hb_ts_handle_ac3_audio(stream, curstream, buf, adapt_len);
}
}
-
+
if (generate_output_data(stream, write_ac3, curstream, pid) != 0)
return ;
}
{
hb_audio_t * audio;
int64_t count_frames;
-
+
/* Raw */
SRC_STATE * state;
SRC_DATA data;
{
chapter = hb_list_item( title->list_chapter, i - 1 );
duration += chapter->duration;
- }
+ }
duration += 90000;
/* 1 second safety so we're sure we won't miss anything */
pv->count_frames_max = duration * job->vrate / job->vrate_base / 90000;
hb_work_private_t * pv = w->private_data;
hb_job_t * job = pv->job;
hb_title_t * title = job->title;
-
+
int i;
if( pv->cur ) hb_buffer_close( &pv->cur );
src_delete( pv->sync_audio[i].state );
}
}
-
+
free( pv );
w->private_data = NULL;
}
{
hb_log( "sync: avcodec_encode_audio failed" );
}
-
+
free( zeros );
avcodec_close( c );
av_free( c );
/***********************************************************************
* SyncVideo
***********************************************************************
- *
+ *
**********************************************************************/
static int SyncVideo( hb_work_object_t * w )
{
hb_log( "sync: got %lld frames, %lld expected",
pv->count_frames, pv->count_frames_max );
pv->done = 1;
-
+
hb_buffer_t * buf_tmp;
// Drop an empty buffer into our output to ensure that things
// get flushed all the way out.
buf_tmp = hb_buffer_init(0); // Empty end buffer
hb_fifo_push( job->fifo_sync, buf_tmp );
-
+
return HB_WORK_DONE;
}
{
hb_log( "Sync: Video PTS discontinuity %s (current buffer start=%lld, next buffer start=%lld)",
pv->discontinuity ? "second" : "first", cur->start, next->start );
-
+
/*
* Do we need to trash the subtitle, is it from the next->start period
* or is it from our old position. If the latter then trash it.
if( sub2 && sub->stop > sub2->start )
sub->stop = sub2->start;
- // hb_log("0x%x: video seq: %lld subtitle sequence: %lld",
+ // hb_log("0x%x: video seq: %lld subtitle sequence: %lld",
// sub, cur->sequence, sub->sequence);
if( sub->sequence > cur->sequence )
* code.
*/
break;
- }
- else
+ }
+ else
{
/*
* The stop time is in the past. But is it due to
}
}
- /*
- * The subtitle is older than this picture, trash it
+ /*
+ * The subtitle is older than this picture, trash it
*/
sub = hb_fifo_get( pv->subtitle->fifo_raw );
hb_buffer_close( &sub );
if( sub->stop > sub->start)
{
/*
- * Normal subtitle which ends after it starts, check to
+ * Normal subtitle which ends after it starts, check to
* see that the current video is between the start and end.
*/
if( cur->start > sub->start &&
*
* fall through to display
*/
- }
+ }
else
{
/*
*
* fall through to display.
*/
- }
- else
+ }
+ else
{
/*
* Defer until the play point is within the subtitle
pts_expected = pv->pts_offset +
pv->count_frames * pv->job->vrate_base / 300;
- //hb_log("Video expecting PTS %lld, current frame: %lld, next frame: %lld, cf: %lld",
+ //hb_log("Video expecting PTS %lld, current frame: %lld, next frame: %lld, cf: %lld",
// pts_expected, cur->start, next->start, pv->count_frames * pv->job->vrate_base / 300 );
if( cur->start < pts_expected - pv->job->vrate_base / 300 / 2 &&
hb_buffer_close( &cur );
pv->cur = cur = hb_fifo_get( job->fifo_raw );
cur->new_chap |= chap_break; // Make sure we don't stomp the existing one.
-
+
continue;
}
/* We'll need the current frame more than one time. Make a
copy of it and keep it */
buf_tmp = hb_buffer_init( cur->size );
- memcpy( buf_tmp->data, cur->data, cur->size );
+ memcpy( buf_tmp->data, cur->data, cur->size );
buf_tmp->sequence = cur->sequence;
}
else
buf_tmp = cur;
pv->cur = cur = hb_fifo_get( job->fifo_raw );
}
-
+
/* Replace those MPEG-2 dates with our dates */
buf_tmp->start = (uint64_t) pv->count_frames *
pv->job->vrate_base / 300;
{
hb_log( "sync: got too many frames (%lld), exiting early", pv->count_frames );
pv->done = 1;
-
+
// Drop an empty buffer into our output to ensure that things
// get flushed all the way out.
buf_tmp = hb_buffer_init(0); // Empty end buffer
hb_fifo_push( job->fifo_sync, buf_tmp );
-
+
break;
}
}
/***********************************************************************
* SyncAudio
***********************************************************************
- *
+ *
**********************************************************************/
static void SyncAudio( hb_work_object_t * w, int i )
{
if( pv->discontinuity )
{
/*
- * There is an outstanding discontinuity, so use the offset from
+ * There is an outstanding discontinuity, so use the offset from
* that discontinuity.
*/
pts_expected = pv->pts_offset_old + sync->count_frames *
* Try and reconverge regardless. so continue on to
* our convergence code below which will kick in as
* it will be more than 100ms out.
- *
+ *
* Note that trashing the Audio could make things
* worse if the Audio is in front because we will end
* up diverging even more. We need to hold on to the
hb_log("Sync: Audio is way out of sync, attempt to reconverge from current video PTS");
pv->way_out_of_sync = 1;
}
-
+
/*
* It wasn't from the old place, so we must be from
* the new, but just too far out. So attempt to
}
InsertSilence( w, i );
continue;
- }
- else
+ }
+ else
{
if( pv->trashing_audio || pv->inserting_silence )
{
}
if( hb_fifo_is_full( fifo ) &&
- pv->way_out_of_sync )
+ pv->way_out_of_sync )
{
/*
* Trash the top audio packet to avoid dead lock as we reconverge.
3 * sizeof( uint64_t ) );
pv->st_dates[3] = hb_get_date();
pv->st_counts[3] = pv->count_frames;
- }
+ }
#define p state.param.working
state.state = HB_STATE_WORKING;
p.progress = (float) pv->count_frames / (float) pv->count_frames_max;
if( p.progress > 1.0 )
{
- p.progress = 1.0;
+ p.progress = 1.0;
}
p.rate_cur = 1000.0 *
(float) ( pv->st_counts[3] - pv->st_counts[0] ) /
hb_title_t * title;
int i, j;
hb_work_object_t * w;
-
+
/* FIXME: This feels really hackish, anything better? */
hb_work_object_t * audio_w = NULL;
hb_work_object_t * sub_w = NULL;
if (job->maxWidth && (job->width > job->maxWidth) && (job->pixel_ratio != 2))
{
job->width = job->maxWidth;
- hb_fix_aspect( job, HB_KEEP_WIDTH );
+ hb_fix_aspect( job, HB_KEEP_WIDTH );
hb_log("Width out of bounds, scaling down to %i", job->maxWidth);
hb_log("New dimensions %i * %i", job->width, job->height);
}
if ( job->grayscale )
hb_log( " + grayscale mode" );
-
+
if ( job->vfr )
{
- int detelecine_present = 0;
+ int detelecine_present = 0;
if ( job->filters )
{
for( i = 0; i < hb_list_count( job->filters ); i++ )
hb_filter_object_t * filter = hb_list_item( job->filters, i );
if (filter->id == FILTER_DETELECINE)
detelecine_present = 1;
- }
+ }
}
-
+
if (!detelecine_present)
{
/* Allocate the filter. */
hb_filter_object_t * filter = malloc( sizeof( hb_filter_object_t ) );
-
+
/* Copy in the contents of the detelecine struct. */
memcpy( filter, &hb_filter_detelecine, sizeof( hb_filter_object_t ) );
/* Set the name to a copy of the template name so render.c has something to free. */
filter->name = strdup(hb_filter_detelecine.name);
-
+
/* Add it to the list. */
hb_list_add( job->filters, filter );
-
+
hb_log("work: VFR mode -- adding detelecine filter");
}
}
-
+
if( hb_list_count( job->filters ) )
{
hb_log(" + filters");
hb_log(" + %s (default settings)", filter->name);
}
}
-
+
if( job->vfr)
{
hb_log( " + video frame rate: variable (detected %.3f fps)", (float) job->vrate /
{
hb_log( " + video frame rate: %.3f fps", (float) job->vrate / (float) job->vrate_base);
}
-
+
if( job->vquality >= 0.0 && job->vquality <= 1.0 )
{
hb_log( " + video quality %.2f", job->vquality );
w->fifo_in = job->fifo_render;
w->fifo_out = job->fifo_mpeg4;
w->config = &job->config;
-
+
hb_list_add( job->list_work, w );
- if( job->select_subtitle && !job->indepth_scan )
+ if( job->select_subtitle && !job->indepth_scan )
{
/*
* Must be second pass of a two pass with subtitle scan enabled, so
}
}
- for( i=0; i < hb_list_count(title->list_subtitle); i++ )
+ for( i=0; i < hb_list_count(title->list_subtitle); i++ )
{
subtitle = hb_list_item( title->list_subtitle, i );
if( subtitle )
{
hb_log( " + subtitle %x, %s", subtitle->id, subtitle->lang );
-
+
subtitle->fifo_in = hb_fifo_init( FIFO_CPU_MULT * cpu_count );
subtitle->fifo_raw = hb_fifo_init( FIFO_CPU_MULT * cpu_count );
-
+
/*
* Disable forced subtitles if we didn't find any in the scan
* so that we display normal subtitles instead.
*
* select_subtitle implies that we did a scan.
*/
- if( !job->indepth_scan && job->subtitle_force &&
- job->select_subtitle )
+ if( !job->indepth_scan && job->subtitle_force &&
+ job->select_subtitle )
{
if( subtitle->forced_hits == 0 )
{
* looking for forced subtitles.
*/
if( sub_w != NULL )
- {
+ {
/*
* Need to copy the prior subtitle structure so that we
* don't overwrite the fifos.
if( job->acodec & HB_ACODEC_AC3 )
{
hb_log( " + audio AC3 passthrough" );
-
+
/* Hard set correct sample rate for AC3 */
job->arate = 48000;
}
"faac" : ( ( job->acodec & HB_ACODEC_LAME ) ? "lame" :
"vorbis" ) );
}
-
+
if ( job->dynamic_range_compression > 1 )
hb_log(" + dynamic range compression: %f", job->dynamic_range_compression);
- /* if we are doing AC3 passthru (at the codec level, not pass-through),
+ /* if we are doing AC3 passthru (at the codec level, not pass-through),
* then remove any non-AC3 audios from the job */
/* otherwise, Bad Things will happen */
for( i = 0; i < hb_list_count( title->list_audio ); )
{
audio = hb_list_item( title->list_audio, i );
hb_log( " + %x, %s", audio->id, audio->lang );
-
+
/* sense-check the current mixdown options */
/* log the requested mixdown */
{
/* find out what the format of our source audio is */
switch (audio->input_channel_layout & HB_INPUT_CH_LAYOUT_DISCRETE_NO_LFE_MASK) {
-
+
/* mono sources */
case HB_INPUT_CH_LAYOUT_MONO:
/* regardless of what stereo mixdown we've requested, a mono source always get mixed down
w->config = &audio->config;
w->amixdown = audio->amixdown;
w->source_acodec = audio->codec;
-
+
/* FIXME: This feels really hackish, anything better? */
audio_w = calloc( sizeof( hb_work_object_t ), 1 );
audio_w = memcpy( audio_w, w, sizeof( hb_work_object_t ));
-
+
hb_list_add( job->list_work, audio_w );
/*
w->config = &audio->config;
w->amixdown = audio->amixdown;
w->source_acodec = audio->codec;
-
+
/* FIXME: This feels really hackish, anything better? */
audio_w = calloc( sizeof( hb_work_object_t ), 1 );
audio_w = memcpy( audio_w, w, sizeof( hb_work_object_t ));
-
+
hb_list_add( job->list_work, audio_w );
}
hb_list_rem( job->list_work, w );
hb_thread_close( &w->thread );
w->close( w );
-
+
/* FIXME: This feels really hackish, anything better? */
if ( w->id == WORK_DECA52 ||
w->id == WORK_DECDCA ||
w = NULL;
}
}
-
+
hb_list_close( &job->list_work );
/* Stop read & write threads */
* Before closing the title print out our subtitle stats if we need to
* Find the highest and lowest.
*/
- for( i=0; i < hb_list_count( title->list_subtitle ); i++ )
+ for( i=0; i < hb_list_count( title->list_subtitle ); i++ )
{
subtitle = hb_list_item( title->list_subtitle, i );
hb_log( "Subtitle stream 0x%x '%s': %d hits (%d forced)",
subtitle->id, subtitle->lang, subtitle->hits,
subtitle->forced_hits );
- if( subtitle->hits > subtitle_highest )
+ if( subtitle->hits > subtitle_highest )
{
subtitle_highest = subtitle->hits;
subtitle_highest_id = subtitle->id;
- }
-
- if( subtitle->hits < subtitle_lowest )
+ }
+
+ if( subtitle->hits < subtitle_lowest )
{
subtitle_lowest = subtitle->hits;
subtitle_lowest_id = subtitle->id;
}
}
}
-
+
if( job->native_language ) {
/*
* We still have a native_language, so the audio and subtitles are
subtitle_hit = subtitle_forced_id;
hb_log("Found a subtitle candidate id 0x%x (contains forced subs)",
subtitle_hit);
- } else if( subtitle_lowest < subtitle_highest )
+ } else if( subtitle_lowest < subtitle_highest )
{
/*
* OK we have more than one, and the lowest is lower,
*
* Let's say 10% as a default.
*/
- if( subtitle_lowest < ( subtitle_highest * 0.1 ) )
+ if( subtitle_lowest < ( subtitle_highest * 0.1 ) )
{
subtitle_hit = subtitle_lowest_id;
hb_log( "Found a subtitle candidate id 0x%x",
}
}
- if( job->select_subtitle )
+ if( job->select_subtitle )
{
- if( job->indepth_scan )
+ if( job->indepth_scan )
{
- for( i=0; i < hb_list_count( title->list_subtitle ); i++ )
+ for( i=0; i < hb_list_count( title->list_subtitle ); i++ )
{
subtitle = hb_list_item( title->list_subtitle, i );
- if( subtitle->id == subtitle_hit )
+ if( subtitle->id == subtitle_hit )
{
hb_list_rem( title->list_subtitle, subtitle );
*( job->select_subtitle ) = subtitle;
hb_filter_close( &filter );
}
hb_list_close( &job->filters );
- }
+ }
hb_buffer_pool_free();
hb_log("work %s: Copying Chapter Break @ %lld", w->name, buf_in->start);
buf_out->new_chap = 1;
}
-
+
if( buf_in )
{
hb_buffer_close( &buf_in );
#define is_newline(_x) ( (_x) == 13 || \
(_x) == 11 || \
(_x) == 10 )
-
+
#define is_white(_x) ( (_x) == '\t' || \
(_x) == ' ' || \
- is_newline(_x) )
-
+ is_newline(_x) )
+
#define is_sep(_x) ( (_x) == ',' )
#define is_esc(_x) ( (_x) == '\\' )
{
hb_csv_file_t *file = NULL;
FILE * fileref;
-
+
if( filepath == NULL )
{
return file;
}
-
+
fileref = fopen( filepath, "r" );
if( fileref == NULL )
{
return file;
}
-
+
file = malloc( sizeof( hb_csv_file_t ) );
file->fileref = fileref;
file->eof = 0;
return file;
}
-void hb_close_csv_file( hb_csv_file_t *file )
+void hb_close_csv_file( hb_csv_file_t *file )
{
if( file == NULL )
{
return;
}
-
+
fclose( file->fileref );
free( file );
}
{
return cell;
}
-
+
if( file->eof )
{
return cell;
}
-
+
cell = malloc( sizeof( hb_csv_cell_t ) );
cell->cell_row = file->curr_row;
cell->cell_col = file->curr_col;
}
}
}
-
+
if( c == CSV_CHAR_EOF )
{
file->eof = 1;
}
-
+
/* Terminate the cell text */
cell->cell_text[index] = '\0';
hb_trim_end( cell->cell_text );
{
return;
}
-
+
free( cell );
}
int byte;
uint16_t c;
int need_char = 1;
-
+
if( file == NULL )
{
return CSV_CHAR_ERROR;
}
-
+
while( need_char )
{
byte = fgetc( file->fileref );
{
return CSV_CHAR_ERROR;
}
-
+
if( file->parse_state == CSV_PARSE_SEEK && is_white(byte) )
{
continue;
c = (uint16_t)byte;
}
}
-
+
return c;
}
}
int i = strlen(text) - 1;
-
+
for( i = strlen(text) - 1; i >= 0 && is_white(text[i]) ; i-- )
{
text[i] = '\0';
/****************************************************************************
* hb_error_handler
- *
+ *
* When using the CLI just display using hb_log as we always did in the past
* make sure that we prefix with a nice ERROR message to catch peoples eyes.
****************************************************************************/
if( x264opts ) free (x264opts );
if( x264opts2 ) free (x264opts2 );
if (preset_name) free (preset_name);
-
+
fprintf( stderr, "HandBrake has exited.\n" );
return 0;
int longest_title_pos=-1;
int longest_title_time=0;
int title_time;
-
+
fprintf( stderr, "Searching for longest title...\n" );
for( i = 0; i < hb_list_count( list ); i++ )
{
title = hb_list_item( list, i );
title_time = (title->hours*60*60 ) + (title->minutes *60) + (title->seconds);
- fprintf( stderr, " + Title (%d) index %d has length %dsec\n",
+ fprintf( stderr, " + Title (%d) index %d has length %dsec\n",
i, title->index, title_time );
if( longest_title_time < title_time )
{
longest_title_time = title_time;
longest_title_pos = i;
longest_title_idx = title->index;
- }
+ }
}
- if( longest_title_pos == -1 )
+ if( longest_title_pos == -1 )
{
fprintf( stderr, "No longest title found.\n" );
die = 1;
break;
}
titleindex = longest_title_idx;
- fprintf( stderr, "Found longest title, setting title to %d\n",
+ fprintf( stderr, "Found longest title, setting title to %d\n",
longest_title_idx);
title = hb_list_item( list, longest_title_pos);
if (preset)
{
fprintf( stderr, "+ Using preset: %s", preset_name);
-
+
if (!strcmp(preset_name, "Animation"))
{
mux = HB_MUX_MKV;
pixelratio = 1;
}
}
-
+
if ( chapter_markers )
{
job->chapter_markers = chapter_markers;
hb_csv_cell_t * cell;
int row = 0;
int chapter = 0;
-
+
fprintf( stderr, "Reading chapter markers from file %s\n", marker_file );
-
+
if( file == NULL )
{
fprintf( stderr, "Cannot open chapter marker file, using defaults\n" );
{
/* Parse the cells */
while( NULL != ( cell = hb_read_next_cell( file ) ) )
- {
+ {
/* We have a chapter number */
if( cell->cell_col == 0 )
{
row = cell->cell_row;
chapter = atoi( cell->cell_text );
}
-
+
/* We have a chapter name */
if( cell->cell_col == 1 && row == cell->cell_row )
{
if( chapter >= job->chapter_start && chapter <= job->chapter_end )
{
hb_chapter_t * chapter_s;
-
+
chapter_s = hb_list_item( job->title->list_chapter, chapter - 1);
strncpy(chapter_s->title, cell->cell_text, 1023);
chapter_s->title[1023] = '\0';
}
- }
-
-
+ }
+
+
hb_dispose_cell( cell );
}
-
+
hb_close_csv_file( file );
}
}
hb_filter_denoise.settings = denoise_opt;
hb_list_add( job->filters, &hb_filter_denoise );
}
-
+
if( width && height )
{
job->width = width;
/* The height will be thrown away in hb.c but calculate it anyway */
hb_fix_aspect( job, HB_KEEP_WIDTH );
}
-
+
if( vquality >= 0.0 && vquality <= 1.0 )
{
job->vquality = vquality;
{
job->vcodec = vcodec;
}
- if( h264_13 )
- {
- job->h264_level = 13;
+ if( h264_13 )
+ {
+ job->h264_level = 13;
}
if( h264_30 )
{
job->audios[0] = 0;
job->audio_mixdowns[0] = audio_mixdown;
}
-
+
if( audio_mixdown == HB_AMIXDOWN_DOLBYPLII_AC3)
{
int i;
for( i = 3 ; i > 0; i--)
- {
+ {
job->audios[i*2+1] = job->audios[i];
job->audios[i*2] = job->audios[i];
if(job->audios[i] != -1 )
job->audio_mixdowns[i*2] = HB_AMIXDOWN_DOLBYPLII;
}
}
-
+
job->audios[1] = job->audios[0];
job->audio_mixdowns[1] = HB_AMIXDOWN_AC3;
job->audio_mixdowns[0] = HB_AMIXDOWN_DOLBYPLII;
}
-
+
if( abitrate )
{
job->abitrate = abitrate;
{
job->dynamic_range_compression = dynamic_range_compression;
}
-
+
if( size )
{
job->vbitrate = hb_calc_bitrate( job, size );
{
job->mux = mux;
}
-
+
if ( largeFileSize )
{
job->largeFileSize = 1;
{
job->ipod_atom = 1;
}
-
+
job->file = strdup( output );
if( crf )
job->maxWidth = maxWidth;
if (maxHeight)
job->maxHeight = maxHeight;
-
+
if (vfr)
job->vfr = 1;
-
+
if( subtitle_force )
{
job->subtitle_force = subtitle_force;
* which will determine which subtitles to enable, if any.
*/
job->pass = -1;
-
+
x264opts_tmp = job->x264opts;
job->x264opts = NULL;
- job->indepth_scan = subtitle_scan;
+ job->indepth_scan = subtitle_scan;
fprintf( stderr, "Subtitle Scan Enabled - enabling "
"subtitles if found for foreign language segments\n");
job->select_subtitle = malloc(sizeof(hb_subtitle_t*));
*(job->select_subtitle) = NULL;
-
+
/*
* Add the pre-scan job
*/
/*
* If subtitle_scan is enabled then only turn it on
* for the first pass and then off again for the
- * second.
+ * second.
*/
hb_subtitle_t **subtitle_tmp = job->select_subtitle;
job->pass = 1;
job->indepth_scan = 0;
-
+
if (x264opts)
{
x264opts2 = strdup(x264opts);
* If turbo options have been selected then append them
* to the x264opts now (size includes one ':' and the '\0')
*/
- if( turbo_opts_enabled )
+ if( turbo_opts_enabled )
{
int size = (x264opts ? strlen(x264opts) : 0) + strlen(turbo_opts) + 2;
char *tmp_x264opts;
-
+
tmp_x264opts = malloc(size * sizeof(char));
- if( x264opts )
+ if( x264opts )
{
- snprintf( tmp_x264opts, size, "%s:%s",
- x264opts, turbo_opts );
+ snprintf( tmp_x264opts, size, "%s:%s",
+ x264opts, turbo_opts );
free( x264opts );
} else {
/*
* No x264opts to modify, but apply the turbo options
* anyway as they may be modifying defaults
*/
- snprintf( tmp_x264opts, size, "%s",
+ snprintf( tmp_x264opts, size, "%s",
turbo_opts );
}
x264opts = tmp_x264opts;
x264opts );
job->x264opts = x264opts;
- }
+ }
hb_add( h, job );
job->select_subtitle = subtitle_tmp;
job->indepth_scan = 0;
job->x264opts = x264opts2;
-
+
hb_add( h, job );
}
else
/*
* Turn on subtitle scan if requested, note that this option
* precludes encoding of any actual subtitles.
- */
+ */
job->indepth_scan = 0;
job->pass = 0;
static void ShowHelp()
{
int i;
-
+
fprintf( stderr,
"Syntax: HandBrakeCLI [options] -i <device> -o <file>\n"
"\n"
" double quotation marks\n"
" -z, --preset-list See a list of available built-in presets\n"
"\n"
-
+
"### Source Options-----------------------------------------------------------\n\n"
" -i, --input <string> Set input device\n"
" -t, --title <number> Select a title to encode (0 to scan only,\n"
" 1 to 3, or \"3\" for chapter 3 only,\n"
" default: all chapters)\n"
"\n"
-
+
"### Destination Options------------------------------------------------------\n\n"
" -o, --output <string> Set output file name\n"
" -f, --format <string> Set output format (avi/mp4/ogm/mkv, default:\n"
" -O, --optimize Optimize mp4 files for HTTP streaming\n"
" -I, --ipod-atom Mark mp4 files so iPods will accept them\n"
"\n"
-
+
"### Picture Settings---------------------------------------------------------\n\n"
" -w, --width <number> Set picture width\n"
" -l, --height <number> Set picture height\n"
" iso639-2 code (fre, eng, spa, dut, et cetera)\n"
" -m, --markers Add chapter markers (mp4 output format only)\n"
"\n"
-
+
"### Video Options------------------------------------------------------------\n\n"
" -e, --encoder <string> Set video library encoder (ffmpeg,xvid,\n"
" x264,x264b13,x264b30 default: ffmpeg)\n"
" -d, --deinterlace Deinterlace video with yadif/mcdeint filter\n"
" <YM:FD:MM:QP> (default 0:-1:-1:1)\n"
" or\n"
- " <fast/slow/slower>\n"
+ " <fast/slow/slower>\n"
" -7, --deblock Deblock video with pp7 filter\n"
" <QP:M> (default 0:2)\n"
" -8, --denoise Denoise video with hqdn3d filter\n"
" -P, --loosePixelratio Store pixel aspect ratio with specified width\n"
" <modulus> Takes as optional argument what number you want\n"
" the dimensions to divide cleanly by (default 16)\n"
-
-
+
+
"\n"
-
-
+
+
"### Audio Options-----------------------------------------------------------\n\n"
" -E, --aencoder <string> Audio encoder (faac/lame/vorbis/ac3/aac+ac3) \n"
" ac3 meaning passthrough, aac+ac3 meaning an\n"
" -D, --drc <float> Apply extra dynamic range compression to the audio,\n"
" making soft sounds louder. Range is 1.0 to 4.0\n"
" (too loud), with 1.5 - 2.5 being a useful range.\n"
-
-
+
+
"\n"
-
-
+
+
"### Advanced Options---------------------------------------------------------\n\n"
" -x, --x264opts <string> Specify advanced x264 options in the\n"
" same style as mencoder:\n"
{ "large-file", no_argument, NULL, '4' },
{ "optimize", no_argument, NULL, 'O' },
{ "ipod-atom", no_argument, NULL, 'I' },
-
+
{ "title", required_argument, NULL, 't' },
{ "longest", no_argument, NULL, 'L' },
{ "chapters", required_argument, NULL, 'c' },
{ "preset", required_argument, NULL, 'Z' },
{ "preset-list", no_argument, NULL, 'z' },
{ "vfr", no_argument, NULL, 'V' },
-
+
{ 0, 0, 0, 0 }
};
case 'C':
cpu = atoi( optarg );
break;
-
+
case 'Z':
preset = 1;
preset_name = strdup(optarg);
case 'z':
ShowPresets();
exit ( 0 );
-
+
case 'f':
format = strdup( optarg );
break;
case 'I':
ipod_atom = 1;
break;
-
+
case 't':
titleindex = atoi( optarg );
break;
}
}
denoise = 1;
- break;
+ break;
case '9':
if( optarg != NULL )
{
detelecine_opt = strdup( optarg );
}
detelecine = 1;
- break;
+ break;
case 'g':
grayscale = 1;
break;
case 'V':
vfr = 1;
break;
-
+
default:
fprintf( stderr, "unknown option (%s)\n", argv[optind] );
return -1;