}
+ /**
+ * Clip MDCT coefficients to allowable range.
+ */
+ static void clip_coefficients(DSPContext *dsp, float *coef, unsigned int len)
+ {
+ dsp->vector_clipf(coef, coef, COEF_MIN, COEF_MAX, len);
+ }
+
+
#if CONFIG_AC3_ENCODER
-AVCodec ff_ac3_encoder = {
- "ac3",
+AVCodec ff_ac3_float_encoder = {
+ "ac3_float",
AVMEDIA_TYPE_AUDIO,
CODEC_ID_AC3,
sizeof(AC3EncodeContext),
int n, d[8];
unsigned int size;
int64_t i, sync;
- for(i=sync=avio_tell(pb); !pb->eof_reached; i++) {
+
+ start_sync:
+ memset(d, -1, sizeof(int)*8);
++ for(i=sync=avio_tell(pb); !url_feof(pb); i++) {
+ int j;
+
+ for(j=0; j<7; j++)
+ d[j]= d[j+1];
+ d[7]= avio_r8(pb);
+
+ size= d[4] + (d[5]<<8) + (d[6]<<16) + (d[7]<<24);
+
+ n= get_stream_idx(d+2);
+ //av_log(s, AV_LOG_DEBUG, "%X %X %X %X %X %X %X %X %"PRId64" %d %d\n", d[0], d[1], d[2], d[3], d[4], d[5], d[6], d[7], i, size, n);
+ if(i + (uint64_t)size > avi->fsize || d[0]<0)
+ continue;
+
+ //parse ix##
+ if( (d[0] == 'i' && d[1] == 'x' && n < s->nb_streams)
+ //parse JUNK
+ ||(d[0] == 'J' && d[1] == 'U' && d[2] == 'N' && d[3] == 'K')
+ ||(d[0] == 'i' && d[1] == 'd' && d[2] == 'x' && d[3] == '1')){
+ avio_skip(pb, size);
+ //av_log(s, AV_LOG_DEBUG, "SKIP\n");
+ goto start_sync;
+ }
+
+ //parse stray LIST
+ if(d[0] == 'L' && d[1] == 'I' && d[2] == 'S' && d[3] == 'T'){
+ avio_skip(pb, 4);
+ goto start_sync;
+ }
+
+ n= get_stream_idx(d);
+
+ if(!((i-avi->last_pkt_pos)&1) && get_stream_idx(d+1) < s->nb_streams)
+ continue;
+
+ //detect ##ix chunk and skip
+ if(d[2] == 'i' && d[3] == 'x' && n < s->nb_streams){
+ avio_skip(pb, size);
+ goto start_sync;
+ }
+
+ //parse ##dc/##wb
+ if(n < s->nb_streams){
+ AVStream *st;
+ AVIStream *ast;
+ st = s->streams[n];
+ ast = st->priv_data;
+
+ if(s->nb_streams>=2){
+ AVStream *st1 = s->streams[1];
+ AVIStream *ast1= st1->priv_data;
+ //workaround for broken small-file-bug402.avi
+ if( d[2] == 'w' && d[3] == 'b'
+ && n==0
+ && st ->codec->codec_type == AVMEDIA_TYPE_VIDEO
+ && st1->codec->codec_type == AVMEDIA_TYPE_AUDIO
+ && ast->prefix == 'd'*256+'c'
+ && (d[2]*256+d[3] == ast1->prefix || !ast1->prefix_count)
+ ){
+ n=1;
+ st = st1;
+ ast = ast1;
+ av_log(s, AV_LOG_WARNING, "Invalid stream + prefix combination, assuming audio.\n");
+ }
+ }
+
+
+ if( (st->discard >= AVDISCARD_DEFAULT && size==0)
+ /*|| (st->discard >= AVDISCARD_NONKEY && !(pkt->flags & AV_PKT_FLAG_KEY))*/ //FIXME needs a little reordering
+ || st->discard >= AVDISCARD_ALL){
+ ast->frame_offset += get_duration(ast, size);
+ avio_skip(pb, size);
+ goto start_sync;
+ }
+
+ if (d[2] == 'p' && d[3] == 'c' && size<=4*256+4) {
+ int k = avio_r8(pb);
+ int last = (k + avio_r8(pb) - 1) & 0xFF;
+
+ avio_rl16(pb); //flags
+
+ for (; k <= last; k++)
+ ast->pal[k] = avio_rb32(pb)>>8;// b + (g << 8) + (r << 16);
+ ast->has_pal= 1;
+ goto start_sync;
+ } else if( ((ast->prefix_count<5 || sync+9 > i) && d[2]<128 && d[3]<128) ||
+ d[2]*256+d[3] == ast->prefix /*||
+ (d[2] == 'd' && d[3] == 'c') ||
+ (d[2] == 'w' && d[3] == 'b')*/) {
+
+ //av_log(s, AV_LOG_DEBUG, "OK\n");
+ if(d[2]*256+d[3] == ast->prefix)
+ ast->prefix_count++;
+ else{
+ ast->prefix= d[2]*256+d[3];
+ ast->prefix_count= 0;
+ }
+
+ avi->stream_index= n;
+ ast->packet_size= size + 8;
+ ast->remaining= size;
+
+ if(size || !ast->sample_size){
+ uint64_t pos= avio_tell(pb) - 8;
+ if(!st->index_entries || !st->nb_index_entries || st->index_entries[st->nb_index_entries - 1].pos < pos){
+ av_add_index_entry(st, pos, ast->frame_offset, size, 0, AVINDEX_KEYFRAME);
+ }
+ }
+ return 0;
+ }
+ }
+ }
+
+ return AVERROR_EOF;
+ }
+
+ static int avi_read_packet(AVFormatContext *s, AVPacket *pkt)
+ {
+ AVIContext *avi = s->priv_data;
+ AVIOContext *pb = s->pb;
+ int err;
void* dstr;
if (CONFIG_DV_DEMUXER && avi->dv_demux) {
*/
/**
- * @file Public dictionary API.
+ * @file
+ * Public dictionary API.
+ * @deprecated
+ * AVDictionary is provided for compatibility with libav. It is both in
+ * implementation as well as API inefficient. It does not scale and is
+ * extremely slow with large dictionaries.
+ * It is recommended that new code uses our tree container from tree.c/h
+ * where applicable, which uses AVL trees to achieve O(log n) performance.
*/
#ifndef AVUTIL_DICT_H
#define input_pixel(pos) (isBE(origin) ? AV_RB16(pos) : AV_RL16(pos))
- // FIXME Maybe dither instead.
- static av_always_inline void
- yuv9_OR_10ToUV_c_template(uint8_t *dstU, uint8_t *dstV,
- const uint8_t *_srcU, const uint8_t *_srcV,
- int width, enum PixelFormat origin, int depth)
- {
- int i;
- const uint16_t *srcU = (const uint16_t *) _srcU;
- const uint16_t *srcV = (const uint16_t *) _srcV;
-
- for (i = 0; i < width; i++) {
- dstU[i] = input_pixel(&srcU[i]) >> (depth - 8);
- dstV[i] = input_pixel(&srcV[i]) >> (depth - 8);
- }
- }
-
- static av_always_inline void
- yuv9_or_10ToY_c_template(uint8_t *dstY, const uint8_t *_srcY,
- int width, enum PixelFormat origin, int depth)
- {
- int i;
- const uint16_t *srcY = (const uint16_t*)_srcY;
-
- for (i = 0; i < width; i++)
- dstY[i] = input_pixel(&srcY[i]) >> (depth - 8);
- }
-
- #undef input_pixel
-
- #define YUV_NBPS(depth, BE_LE, origin) \
- static void BE_LE ## depth ## ToUV_c(uint8_t *dstU, uint8_t *dstV, \
- const uint8_t *srcU, const uint8_t *srcV, \
- int width, uint32_t *unused) \
- { \
- yuv9_OR_10ToUV_c_template(dstU, dstV, srcU, srcV, width, origin, depth); \
- } \
- static void BE_LE ## depth ## ToY_c(uint8_t *dstY, const uint8_t *srcY, \
- int width, uint32_t *unused) \
- { \
- yuv9_or_10ToY_c_template(dstY, srcY, width, origin, depth); \
- }
-
- YUV_NBPS( 9, LE, PIX_FMT_YUV420P9LE);
- YUV_NBPS( 9, BE, PIX_FMT_YUV420P9BE);
- YUV_NBPS(10, LE, PIX_FMT_YUV420P10LE);
- YUV_NBPS(10, BE, PIX_FMT_YUV420P10BE);
-
-static void bgr24ToY_c(uint8_t *dst, const uint8_t *src,
+static void bgr24ToY_c(int16_t *dst, const uint8_t *src,
int width, uint32_t *unused)
{
int i;