2 * various utility functions for use within FFmpeg
3 * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include "avio_internal.h"
25 #include "libavcodec/internal.h"
26 #include "libavcodec/raw.h"
27 #include "libavcodec/bytestream.h"
28 #include "libavutil/opt.h"
29 #include "libavutil/dict.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/pixdesc.h"
34 #include "libavutil/avassert.h"
35 #include "libavutil/avstring.h"
36 #include "libavutil/mathematics.h"
37 #include "libavutil/parseutils.h"
38 #include "libavutil/time.h"
39 #include "libavutil/timestamp.h"
41 #include "audiointerleave.h"
53 * various utility functions for use within FFmpeg
56 unsigned avformat_version(void)
58 av_assert0(LIBAVFORMAT_VERSION_MICRO >= 100);
59 return LIBAVFORMAT_VERSION_INT;
62 const char *avformat_configuration(void)
64 return FFMPEG_CONFIGURATION;
67 const char *avformat_license(void)
69 #define LICENSE_PREFIX "libavformat license: "
70 return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
73 #define RELATIVE_TS_BASE (INT64_MAX - (1LL<<48))
75 static int is_relative(int64_t ts) {
76 return ts > (RELATIVE_TS_BASE - (1LL<<48));
80 * Wrap a given time stamp, if there is an indication for an overflow
83 * @param timestamp the time stamp to wrap
84 * @return resulting time stamp
86 static int64_t wrap_timestamp(AVStream *st, int64_t timestamp)
88 if (st->pts_wrap_behavior != AV_PTS_WRAP_IGNORE &&
89 st->pts_wrap_reference != AV_NOPTS_VALUE && timestamp != AV_NOPTS_VALUE) {
90 if (st->pts_wrap_behavior == AV_PTS_WRAP_ADD_OFFSET &&
91 timestamp < st->pts_wrap_reference)
92 return timestamp + (1ULL<<st->pts_wrap_bits);
93 else if (st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET &&
94 timestamp >= st->pts_wrap_reference)
95 return timestamp - (1ULL<<st->pts_wrap_bits);
100 MAKE_ACCESSORS(AVStream, stream, AVRational, r_frame_rate)
101 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, video_codec)
102 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, audio_codec)
103 MAKE_ACCESSORS(AVFormatContext, format, AVCodec *, subtitle_codec)
105 static AVCodec *find_decoder(AVFormatContext *s, AVStream *st, enum AVCodecID codec_id)
107 if (st->codec->codec)
108 return st->codec->codec;
110 switch(st->codec->codec_type){
111 case AVMEDIA_TYPE_VIDEO:
112 if(s->video_codec) return s->video_codec;
114 case AVMEDIA_TYPE_AUDIO:
115 if(s->audio_codec) return s->audio_codec;
117 case AVMEDIA_TYPE_SUBTITLE:
118 if(s->subtitle_codec) return s->subtitle_codec;
122 return avcodec_find_decoder(codec_id);
125 int av_format_get_probe_score(const AVFormatContext *s)
127 return s->probe_score;
130 /* an arbitrarily chosen "sane" max packet size -- 50M */
131 #define SANE_CHUNK_SIZE (50000000)
133 int ffio_limit(AVIOContext *s, int size)
136 int64_t remaining= s->maxsize - avio_tell(s);
137 if(remaining < size){
138 int64_t newsize= avio_size(s);
139 if(!s->maxsize || s->maxsize<newsize)
140 s->maxsize= newsize - !newsize;
141 remaining= s->maxsize - avio_tell(s);
142 remaining= FFMAX(remaining, 0);
145 if(s->maxsize>=0 && remaining+1 < size){
146 av_log(NULL, remaining ? AV_LOG_ERROR : AV_LOG_DEBUG, "Truncating packet of size %d to %"PRId64"\n", size, remaining+1);
154 * Read the data in sane-sized chunks and append to pkt.
155 * Return the number of bytes read or an error.
157 static int append_packet_chunked(AVIOContext *s, AVPacket *pkt, int size)
159 int64_t orig_pos = pkt->pos; // av_grow_packet might reset pos
160 int orig_size = pkt->size;
164 int prev_size = pkt->size;
168 * When the caller requests a lot of data, limit it to the amount left
169 * in file or SANE_CHUNK_SIZE when it is not known
172 if (read_size > SANE_CHUNK_SIZE/10) {
173 read_size = ffio_limit(s, read_size);
174 // If filesize/maxsize is unknown, limit to SANE_CHUNK_SIZE
176 read_size = FFMIN(read_size, SANE_CHUNK_SIZE);
179 ret = av_grow_packet(pkt, read_size);
183 ret = avio_read(s, pkt->data + prev_size, read_size);
184 if (ret != read_size) {
185 av_shrink_packet(pkt, prev_size + FFMAX(ret, 0));
192 pkt->flags |= AV_PKT_FLAG_CORRUPT;
197 return pkt->size > orig_size ? pkt->size - orig_size : ret;
200 int av_get_packet(AVIOContext *s, AVPacket *pkt, int size)
205 pkt->pos = avio_tell(s);
207 return append_packet_chunked(s, pkt, size);
210 int av_append_packet(AVIOContext *s, AVPacket *pkt, int size)
213 return av_get_packet(s, pkt, size);
214 return append_packet_chunked(s, pkt, size);
218 int av_filename_number_test(const char *filename)
221 return filename && (av_get_frame_filename(buf, sizeof(buf), filename, 1)>=0);
224 AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret)
226 AVProbeData lpd = *pd;
227 AVInputFormat *fmt1 = NULL, *fmt;
228 int score, nodat = 0, score_max=0;
229 const static uint8_t zerobuffer[AVPROBE_PADDING_SIZE];
232 lpd.buf = zerobuffer;
234 if (lpd.buf_size > 10 && ff_id3v2_match(lpd.buf, ID3v2_DEFAULT_MAGIC)) {
235 int id3len = ff_id3v2_tag_len(lpd.buf);
236 if (lpd.buf_size > id3len + 16) {
238 lpd.buf_size -= id3len;
244 while ((fmt1 = av_iformat_next(fmt1))) {
245 if (!is_opened == !(fmt1->flags & AVFMT_NOFILE))
248 if (fmt1->read_probe) {
249 score = fmt1->read_probe(&lpd);
250 if(fmt1->extensions && av_match_ext(lpd.filename, fmt1->extensions))
251 score = FFMAX(score, nodat ? AVPROBE_SCORE_EXTENSION / 2 - 1 : 1);
252 } else if (fmt1->extensions) {
253 if (av_match_ext(lpd.filename, fmt1->extensions)) {
254 score = AVPROBE_SCORE_EXTENSION;
257 if (score > score_max) {
260 }else if (score == score_max)
264 score_max = FFMIN(AVPROBE_SCORE_EXTENSION / 2 - 1, score_max);
265 *score_ret= score_max;
270 AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max)
273 AVInputFormat *fmt= av_probe_input_format3(pd, is_opened, &score_ret);
274 if(score_ret > *score_max){
275 *score_max= score_ret;
281 AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened){
283 return av_probe_input_format2(pd, is_opened, &score);
286 static int set_codec_from_probe_data(AVFormatContext *s, AVStream *st, AVProbeData *pd)
288 static const struct {
289 const char *name; enum AVCodecID id; enum AVMediaType type;
291 { "aac" , AV_CODEC_ID_AAC , AVMEDIA_TYPE_AUDIO },
292 { "ac3" , AV_CODEC_ID_AC3 , AVMEDIA_TYPE_AUDIO },
293 { "dts" , AV_CODEC_ID_DTS , AVMEDIA_TYPE_AUDIO },
294 { "eac3" , AV_CODEC_ID_EAC3 , AVMEDIA_TYPE_AUDIO },
295 { "h264" , AV_CODEC_ID_H264 , AVMEDIA_TYPE_VIDEO },
296 { "loas" , AV_CODEC_ID_AAC_LATM , AVMEDIA_TYPE_AUDIO },
297 { "m4v" , AV_CODEC_ID_MPEG4 , AVMEDIA_TYPE_VIDEO },
298 { "mp3" , AV_CODEC_ID_MP3 , AVMEDIA_TYPE_AUDIO },
299 { "mpegvideo", AV_CODEC_ID_MPEG2VIDEO, AVMEDIA_TYPE_VIDEO },
303 AVInputFormat *fmt = av_probe_input_format3(pd, 1, &score);
305 if (fmt && st->request_probe <= score) {
307 av_log(s, AV_LOG_DEBUG, "Probe with size=%d, packets=%d detected %s with score=%d\n",
308 pd->buf_size, MAX_PROBE_PACKETS - st->probe_packets, fmt->name, score);
309 for (i = 0; fmt_id_type[i].name; i++) {
310 if (!strcmp(fmt->name, fmt_id_type[i].name)) {
311 st->codec->codec_id = fmt_id_type[i].id;
312 st->codec->codec_type = fmt_id_type[i].type;
320 /************************************************************/
321 /* input media file */
323 int av_demuxer_open(AVFormatContext *ic){
326 if (ic->iformat->read_header) {
327 err = ic->iformat->read_header(ic);
332 if (ic->pb && !ic->data_offset)
333 ic->data_offset = avio_tell(ic->pb);
339 int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
340 const char *filename, void *logctx,
341 unsigned int offset, unsigned int max_probe_size)
343 AVProbeData pd = { filename ? filename : "", NULL, -offset };
344 unsigned char *buf = NULL;
346 int ret = 0, probe_size, buf_offset = 0;
349 if (!max_probe_size) {
350 max_probe_size = PROBE_BUF_MAX;
351 } else if (max_probe_size > PROBE_BUF_MAX) {
352 max_probe_size = PROBE_BUF_MAX;
353 } else if (max_probe_size < PROBE_BUF_MIN) {
354 av_log(logctx, AV_LOG_ERROR,
355 "Specified probe size value %u cannot be < %u\n", max_probe_size, PROBE_BUF_MIN);
356 return AVERROR(EINVAL);
359 if (offset >= max_probe_size) {
360 return AVERROR(EINVAL);
363 if (!*fmt && pb->av_class && av_opt_get(pb, "mime_type", AV_OPT_SEARCH_CHILDREN, &mime_type) >= 0 && mime_type) {
364 if (!av_strcasecmp(mime_type, "audio/aacp")) {
365 *fmt = av_find_input_format("aac");
367 av_freep(&mime_type);
370 for(probe_size= PROBE_BUF_MIN; probe_size<=max_probe_size && !*fmt;
371 probe_size = FFMIN(probe_size<<1, FFMAX(max_probe_size, probe_size+1))) {
373 score = probe_size < max_probe_size ? AVPROBE_SCORE_RETRY : 0;
375 /* read probe data */
376 if ((ret = av_reallocp(&buf, probe_size + AVPROBE_PADDING_SIZE)) < 0)
378 if ((ret = avio_read(pb, buf + buf_offset, probe_size - buf_offset)) < 0) {
379 /* fail if error was not end of file, otherwise, lower score */
380 if (ret != AVERROR_EOF) {
385 ret = 0; /* error was end of file, nothing read */
388 if (buf_offset < offset)
390 pd.buf_size = buf_offset - offset;
391 pd.buf = &buf[offset];
393 memset(pd.buf + pd.buf_size, 0, AVPROBE_PADDING_SIZE);
395 /* guess file format */
396 *fmt = av_probe_input_format2(&pd, 1, &score);
398 if(score <= AVPROBE_SCORE_RETRY){ //this can only be true in the last iteration
399 av_log(logctx, AV_LOG_WARNING, "Format %s detected only with low score of %d, misdetection possible!\n", (*fmt)->name, score);
401 av_log(logctx, AV_LOG_DEBUG, "Format %s probed with size=%d and score=%d\n", (*fmt)->name, probe_size, score);
407 return AVERROR_INVALIDDATA;
410 /* rewind. reuse probe buffer to avoid seeking */
411 ret = ffio_rewind_with_probe_data(pb, &buf, pd.buf_size);
413 return ret < 0 ? ret : score;
416 int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
417 const char *filename, void *logctx,
418 unsigned int offset, unsigned int max_probe_size)
420 int ret = av_probe_input_buffer2(pb, fmt, filename, logctx, offset, max_probe_size);
421 return ret < 0 ? ret : 0;
425 /* open input file and probe the format if necessary */
426 static int init_input(AVFormatContext *s, const char *filename, AVDictionary **options)
429 AVProbeData pd = {filename, NULL, 0};
430 int score = AVPROBE_SCORE_RETRY;
433 s->flags |= AVFMT_FLAG_CUSTOM_IO;
435 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
436 else if (s->iformat->flags & AVFMT_NOFILE)
437 av_log(s, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
438 "will be ignored with AVFMT_NOFILE format.\n");
442 if ( (s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
443 (!s->iformat && (s->iformat = av_probe_input_format2(&pd, 0, &score))))
446 if ((ret = avio_open2(&s->pb, filename, AVIO_FLAG_READ | s->avio_flags,
447 &s->interrupt_callback, options)) < 0)
451 return av_probe_input_buffer2(s->pb, &s->iformat, filename, s, 0, s->probesize);
454 static AVPacket *add_to_pktbuf(AVPacketList **packet_buffer, AVPacket *pkt,
455 AVPacketList **plast_pktl){
456 AVPacketList *pktl = av_mallocz(sizeof(AVPacketList));
461 (*plast_pktl)->next = pktl;
463 *packet_buffer = pktl;
465 /* add the packet in the buffered packet list */
471 int avformat_queue_attached_pictures(AVFormatContext *s)
474 for (i = 0; i < s->nb_streams; i++)
475 if (s->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC &&
476 s->streams[i]->discard < AVDISCARD_ALL) {
477 AVPacket copy = s->streams[i]->attached_pic;
478 copy.buf = av_buffer_ref(copy.buf);
480 return AVERROR(ENOMEM);
482 add_to_pktbuf(&s->raw_packet_buffer, ©, &s->raw_packet_buffer_end);
487 int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options)
489 AVFormatContext *s = *ps;
491 AVDictionary *tmp = NULL;
492 ID3v2ExtraMeta *id3v2_extra_meta = NULL;
494 if (!s && !(s = avformat_alloc_context()))
495 return AVERROR(ENOMEM);
497 av_log(NULL, AV_LOG_ERROR, "Input context has not been properly allocated by avformat_alloc_context() and is not NULL either\n");
498 return AVERROR(EINVAL);
504 av_dict_copy(&tmp, *options, 0);
506 if ((ret = av_opt_set_dict(s, &tmp)) < 0)
509 if ((ret = init_input(s, filename, &tmp)) < 0)
511 s->probe_score = ret;
512 avio_skip(s->pb, s->skip_initial_bytes);
514 /* check filename in case an image number is expected */
515 if (s->iformat->flags & AVFMT_NEEDNUMBER) {
516 if (!av_filename_number_test(filename)) {
517 ret = AVERROR(EINVAL);
522 s->duration = s->start_time = AV_NOPTS_VALUE;
523 av_strlcpy(s->filename, filename ? filename : "", sizeof(s->filename));
525 /* allocate private data */
526 if (s->iformat->priv_data_size > 0) {
527 if (!(s->priv_data = av_mallocz(s->iformat->priv_data_size))) {
528 ret = AVERROR(ENOMEM);
531 if (s->iformat->priv_class) {
532 *(const AVClass**)s->priv_data = s->iformat->priv_class;
533 av_opt_set_defaults(s->priv_data);
534 if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
539 /* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
541 ff_id3v2_read(s, ID3v2_DEFAULT_MAGIC, &id3v2_extra_meta);
543 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->iformat->read_header)
544 if ((ret = s->iformat->read_header(s)) < 0)
547 if (id3v2_extra_meta) {
548 if (!strcmp(s->iformat->name, "mp3") || !strcmp(s->iformat->name, "aac") ||
549 !strcmp(s->iformat->name, "tta")) {
550 if((ret = ff_id3v2_parse_apic(s, &id3v2_extra_meta)) < 0)
553 av_log(s, AV_LOG_DEBUG, "demuxer does not support additional id3 data, skipping\n");
555 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
557 if ((ret = avformat_queue_attached_pictures(s)) < 0)
560 if (!(s->flags&AVFMT_FLAG_PRIV_OPT) && s->pb && !s->data_offset)
561 s->data_offset = avio_tell(s->pb);
563 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
566 av_dict_free(options);
573 ff_id3v2_free_extra_meta(&id3v2_extra_meta);
575 if (s->pb && !(s->flags & AVFMT_FLAG_CUSTOM_IO))
577 avformat_free_context(s);
582 /*******************************************************/
584 static void force_codec_ids(AVFormatContext *s, AVStream *st)
586 switch(st->codec->codec_type){
587 case AVMEDIA_TYPE_VIDEO:
588 if(s->video_codec_id) st->codec->codec_id= s->video_codec_id;
590 case AVMEDIA_TYPE_AUDIO:
591 if(s->audio_codec_id) st->codec->codec_id= s->audio_codec_id;
593 case AVMEDIA_TYPE_SUBTITLE:
594 if(s->subtitle_codec_id)st->codec->codec_id= s->subtitle_codec_id;
599 static int probe_codec(AVFormatContext *s, AVStream *st, const AVPacket *pkt)
601 if(st->request_probe>0){
602 AVProbeData *pd = &st->probe_data;
604 av_log(s, AV_LOG_DEBUG, "probing stream %d pp:%d\n", st->index, st->probe_packets);
608 uint8_t *new_buf = av_realloc(pd->buf, pd->buf_size+pkt->size+AVPROBE_PADDING_SIZE);
610 av_log(s, AV_LOG_WARNING,
611 "Failed to reallocate probe buffer for stream %d\n",
616 memcpy(pd->buf+pd->buf_size, pkt->data, pkt->size);
617 pd->buf_size += pkt->size;
618 memset(pd->buf+pd->buf_size, 0, AVPROBE_PADDING_SIZE);
621 st->probe_packets = 0;
623 av_log(s, AV_LOG_WARNING, "nothing to probe for stream %d\n",
628 end= s->raw_packet_buffer_remaining_size <= 0
629 || st->probe_packets<=0;
631 if(end || av_log2(pd->buf_size) != av_log2(pd->buf_size - pkt->size)){
632 int score= set_codec_from_probe_data(s, st, pd);
633 if( (st->codec->codec_id != AV_CODEC_ID_NONE && score > AVPROBE_SCORE_RETRY)
637 st->request_probe= -1;
638 if(st->codec->codec_id != AV_CODEC_ID_NONE){
639 av_log(s, AV_LOG_DEBUG, "probed stream %d\n", st->index);
641 av_log(s, AV_LOG_WARNING, "probed stream %d failed\n", st->index);
643 force_codec_ids(s, st);
649 int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
655 AVPacketList *pktl = s->raw_packet_buffer;
659 st = s->streams[pkt->stream_index];
660 if (s->raw_packet_buffer_remaining_size <= 0) {
661 if ((err = probe_codec(s, st, NULL)) < 0)
664 if(st->request_probe <= 0){
665 s->raw_packet_buffer = pktl->next;
666 s->raw_packet_buffer_remaining_size += pkt->size;
675 ret= s->iformat->read_packet(s, pkt);
677 if (!pktl || ret == AVERROR(EAGAIN))
679 for (i = 0; i < s->nb_streams; i++) {
681 if (st->probe_packets) {
682 if ((err = probe_codec(s, st, NULL)) < 0)
685 av_assert0(st->request_probe <= 0);
690 if ((s->flags & AVFMT_FLAG_DISCARD_CORRUPT) &&
691 (pkt->flags & AV_PKT_FLAG_CORRUPT)) {
692 av_log(s, AV_LOG_WARNING,
693 "Dropped corrupted packet (stream = %d)\n",
699 if(!(s->flags & AVFMT_FLAG_KEEP_SIDE_DATA))
700 av_packet_merge_side_data(pkt);
702 if(pkt->stream_index >= (unsigned)s->nb_streams){
703 av_log(s, AV_LOG_ERROR, "Invalid stream index %d\n", pkt->stream_index);
707 st= s->streams[pkt->stream_index];
708 pkt->dts = wrap_timestamp(st, pkt->dts);
709 pkt->pts = wrap_timestamp(st, pkt->pts);
711 force_codec_ids(s, st);
713 /* TODO: audio: time filter; video: frame reordering (pts != dts) */
714 if (s->use_wallclock_as_timestamps)
715 pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);
717 if(!pktl && st->request_probe <= 0)
720 add_to_pktbuf(&s->raw_packet_buffer, pkt, &s->raw_packet_buffer_end);
721 s->raw_packet_buffer_remaining_size -= pkt->size;
723 if ((err = probe_codec(s, st, pkt)) < 0)
728 #if FF_API_READ_PACKET
729 int av_read_packet(AVFormatContext *s, AVPacket *pkt)
731 return ff_read_packet(s, pkt);
736 /**********************************************************/
738 static int determinable_frame_size(AVCodecContext *avctx)
740 if (/*avctx->codec_id == AV_CODEC_ID_AAC ||*/
741 avctx->codec_id == AV_CODEC_ID_MP1 ||
742 avctx->codec_id == AV_CODEC_ID_MP2 ||
743 avctx->codec_id == AV_CODEC_ID_MP3/* ||
744 avctx->codec_id == AV_CODEC_ID_CELT*/)
750 * Get the number of samples of an audio frame. Return -1 on error.
752 int ff_get_audio_frame_size(AVCodecContext *enc, int size, int mux)
756 /* give frame_size priority if demuxing */
757 if (!mux && enc->frame_size > 1)
758 return enc->frame_size;
760 if ((frame_size = av_get_audio_frame_duration(enc, size)) > 0)
763 /* Fall back on using frame_size if muxing. */
764 if (enc->frame_size > 1)
765 return enc->frame_size;
767 //For WMA we currently have no other means to calculate duration thus we
768 //do it here by assuming CBR, which is true for all known cases.
769 if(!mux && enc->bit_rate>0 && size>0 && enc->sample_rate>0 && enc->block_align>1) {
770 if (enc->codec_id == AV_CODEC_ID_WMAV1 || enc->codec_id == AV_CODEC_ID_WMAV2)
771 return ((int64_t)size * 8 * enc->sample_rate) / enc->bit_rate;
779 * Return the frame duration in seconds. Return 0 if not available.
781 void ff_compute_frame_duration(int *pnum, int *pden, AVStream *st,
782 AVCodecParserContext *pc, AVPacket *pkt)
788 switch(st->codec->codec_type) {
789 case AVMEDIA_TYPE_VIDEO:
790 if (st->r_frame_rate.num && !pc) {
791 *pnum = st->r_frame_rate.den;
792 *pden = st->r_frame_rate.num;
793 } else if(st->time_base.num*1000LL > st->time_base.den) {
794 *pnum = st->time_base.num;
795 *pden = st->time_base.den;
796 }else if(st->codec->time_base.num*1000LL > st->codec->time_base.den){
797 *pnum = st->codec->time_base.num;
798 *pden = st->codec->time_base.den;
799 if (pc && pc->repeat_pict) {
800 if (*pnum > INT_MAX / (1 + pc->repeat_pict))
801 *pden /= 1 + pc->repeat_pict;
803 *pnum *= 1 + pc->repeat_pict;
805 //If this codec can be interlaced or progressive then we need a parser to compute duration of a packet
806 //Thus if we have no parser in such case leave duration undefined.
807 if(st->codec->ticks_per_frame>1 && !pc){
812 case AVMEDIA_TYPE_AUDIO:
813 frame_size = ff_get_audio_frame_size(st->codec, pkt->size, 0);
814 if (frame_size <= 0 || st->codec->sample_rate <= 0)
817 *pden = st->codec->sample_rate;
824 static int is_intra_only(AVCodecContext *enc){
825 const AVCodecDescriptor *desc;
827 if(enc->codec_type != AVMEDIA_TYPE_VIDEO)
830 desc = av_codec_get_codec_descriptor(enc);
832 desc = avcodec_descriptor_get(enc->codec_id);
833 av_codec_set_codec_descriptor(enc, desc);
836 return !!(desc->props & AV_CODEC_PROP_INTRA_ONLY);
840 static int has_decode_delay_been_guessed(AVStream *st)
842 if(st->codec->codec_id != AV_CODEC_ID_H264) return 1;
843 if(!st->info) // if we have left find_stream_info then nb_decoded_frames won't increase anymore for stream copy
845 #if CONFIG_H264_DECODER
846 if(st->codec->has_b_frames &&
847 avpriv_h264_has_num_reorder_frames(st->codec) == st->codec->has_b_frames)
850 if(st->codec->has_b_frames<3)
851 return st->nb_decoded_frames >= 7;
852 else if(st->codec->has_b_frames<4)
853 return st->nb_decoded_frames >= 18;
855 return st->nb_decoded_frames >= 20;
858 static AVPacketList *get_next_pkt(AVFormatContext *s, AVStream *st, AVPacketList *pktl)
862 if (pktl == s->parse_queue_end)
863 return s->packet_buffer;
867 static int update_wrap_reference(AVFormatContext *s, AVStream *st, int stream_index)
869 if (s->correct_ts_overflow && st->pts_wrap_bits < 63 &&
870 st->pts_wrap_reference == AV_NOPTS_VALUE && st->first_dts != AV_NOPTS_VALUE) {
873 // reference time stamp should be 60 s before first time stamp
874 int64_t pts_wrap_reference = st->first_dts - av_rescale(60, st->time_base.den, st->time_base.num);
875 // if first time stamp is not more than 1/8 and 60s before the wrap point, subtract rather than add wrap offset
876 int pts_wrap_behavior = (st->first_dts < (1LL<<st->pts_wrap_bits) - (1LL<<st->pts_wrap_bits-3)) ||
877 (st->first_dts < (1LL<<st->pts_wrap_bits) - av_rescale(60, st->time_base.den, st->time_base.num)) ?
878 AV_PTS_WRAP_ADD_OFFSET : AV_PTS_WRAP_SUB_OFFSET;
880 AVProgram *first_program = av_find_program_from_stream(s, NULL, stream_index);
882 if (!first_program) {
883 int default_stream_index = av_find_default_stream_index(s);
884 if (s->streams[default_stream_index]->pts_wrap_reference == AV_NOPTS_VALUE) {
885 for (i=0; i<s->nb_streams; i++) {
886 s->streams[i]->pts_wrap_reference = pts_wrap_reference;
887 s->streams[i]->pts_wrap_behavior = pts_wrap_behavior;
891 st->pts_wrap_reference = s->streams[default_stream_index]->pts_wrap_reference;
892 st->pts_wrap_behavior = s->streams[default_stream_index]->pts_wrap_behavior;
896 AVProgram *program = first_program;
898 if (program->pts_wrap_reference != AV_NOPTS_VALUE) {
899 pts_wrap_reference = program->pts_wrap_reference;
900 pts_wrap_behavior = program->pts_wrap_behavior;
903 program = av_find_program_from_stream(s, program, stream_index);
906 // update every program with differing pts_wrap_reference
907 program = first_program;
909 if (program->pts_wrap_reference != pts_wrap_reference) {
910 for (i=0; i<program->nb_stream_indexes; i++) {
911 s->streams[program->stream_index[i]]->pts_wrap_reference = pts_wrap_reference;
912 s->streams[program->stream_index[i]]->pts_wrap_behavior = pts_wrap_behavior;
915 program->pts_wrap_reference = pts_wrap_reference;
916 program->pts_wrap_behavior = pts_wrap_behavior;
918 program = av_find_program_from_stream(s, program, stream_index);
926 static void update_initial_timestamps(AVFormatContext *s, int stream_index,
927 int64_t dts, int64_t pts, AVPacket *pkt)
929 AVStream *st= s->streams[stream_index];
930 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
931 int64_t pts_buffer[MAX_REORDER_DELAY+1];
935 if(st->first_dts != AV_NOPTS_VALUE || dts == AV_NOPTS_VALUE || st->cur_dts == AV_NOPTS_VALUE || is_relative(dts))
938 delay = st->codec->has_b_frames;
939 st->first_dts= dts - (st->cur_dts - RELATIVE_TS_BASE);
941 shift = st->first_dts - RELATIVE_TS_BASE;
943 for (i=0; i<MAX_REORDER_DELAY+1; i++)
944 pts_buffer[i] = AV_NOPTS_VALUE;
946 if (is_relative(pts))
949 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
950 if(pktl->pkt.stream_index != stream_index)
952 if(is_relative(pktl->pkt.pts))
953 pktl->pkt.pts += shift;
955 if(is_relative(pktl->pkt.dts))
956 pktl->pkt.dts += shift;
958 if(st->start_time == AV_NOPTS_VALUE && pktl->pkt.pts != AV_NOPTS_VALUE)
959 st->start_time= pktl->pkt.pts;
961 if(pktl->pkt.pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
962 pts_buffer[0]= pktl->pkt.pts;
963 for(i=0; i<delay && pts_buffer[i] > pts_buffer[i+1]; i++)
964 FFSWAP(int64_t, pts_buffer[i], pts_buffer[i+1]);
965 if(pktl->pkt.dts == AV_NOPTS_VALUE)
966 pktl->pkt.dts= pts_buffer[0];
970 if (update_wrap_reference(s, st, stream_index) && st->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {
971 // correct first time stamps to negative values
972 st->first_dts = wrap_timestamp(st, st->first_dts);
973 st->cur_dts = wrap_timestamp(st, st->cur_dts);
974 pkt->dts = wrap_timestamp(st, pkt->dts);
975 pkt->pts = wrap_timestamp(st, pkt->pts);
976 pts = wrap_timestamp(st, pts);
979 if (st->start_time == AV_NOPTS_VALUE)
980 st->start_time = pts;
983 static void update_initial_durations(AVFormatContext *s, AVStream *st,
984 int stream_index, int duration)
986 AVPacketList *pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
987 int64_t cur_dts= RELATIVE_TS_BASE;
989 if(st->first_dts != AV_NOPTS_VALUE){
990 cur_dts= st->first_dts;
991 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
992 if(pktl->pkt.stream_index == stream_index){
993 if(pktl->pkt.pts != pktl->pkt.dts || pktl->pkt.dts != AV_NOPTS_VALUE || pktl->pkt.duration)
998 if(pktl && pktl->pkt.dts != st->first_dts) {
999 av_log(s, AV_LOG_DEBUG, "first_dts %s not matching first dts %s (pts %s, duration %d) in the queue\n",
1000 av_ts2str(st->first_dts), av_ts2str(pktl->pkt.dts), av_ts2str(pktl->pkt.pts), pktl->pkt.duration);
1004 av_log(s, AV_LOG_DEBUG, "first_dts %s but no packet with dts in the queue\n", av_ts2str(st->first_dts));
1007 pktl= s->parse_queue ? s->parse_queue : s->packet_buffer;
1008 st->first_dts = cur_dts;
1009 }else if(st->cur_dts != RELATIVE_TS_BASE)
1012 for(; pktl; pktl= get_next_pkt(s, st, pktl)){
1013 if(pktl->pkt.stream_index != stream_index)
1015 if(pktl->pkt.pts == pktl->pkt.dts && (pktl->pkt.dts == AV_NOPTS_VALUE || pktl->pkt.dts == st->first_dts)
1016 && !pktl->pkt.duration){
1017 pktl->pkt.dts= cur_dts;
1018 if(!st->codec->has_b_frames)
1019 pktl->pkt.pts= cur_dts;
1020 // if (st->codec->codec_type != AVMEDIA_TYPE_AUDIO)
1021 pktl->pkt.duration = duration;
1024 cur_dts = pktl->pkt.dts + pktl->pkt.duration;
1027 st->cur_dts= cur_dts;
1030 static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
1031 AVCodecParserContext *pc, AVPacket *pkt)
1033 int num, den, presentation_delayed, delay, i;
1036 if (s->flags & AVFMT_FLAG_NOFILLIN)
1039 if((s->flags & AVFMT_FLAG_IGNDTS) && pkt->pts != AV_NOPTS_VALUE)
1040 pkt->dts= AV_NOPTS_VALUE;
1042 if (pc && pc->pict_type == AV_PICTURE_TYPE_B
1043 && !st->codec->has_b_frames)
1044 //FIXME Set low_delay = 0 when has_b_frames = 1
1045 st->codec->has_b_frames = 1;
1047 /* do we have a video B-frame ? */
1048 delay= st->codec->has_b_frames;
1049 presentation_delayed = 0;
1051 /* XXX: need has_b_frame, but cannot get it if the codec is
1054 pc && pc->pict_type != AV_PICTURE_TYPE_B)
1055 presentation_delayed = 1;
1057 if (pkt->pts != AV_NOPTS_VALUE && pkt->dts != AV_NOPTS_VALUE &&
1058 st->pts_wrap_bits < 63 &&
1059 pkt->dts - (1LL << (st->pts_wrap_bits - 1)) > pkt->pts) {
1060 if(is_relative(st->cur_dts) || pkt->dts - (1LL<<(st->pts_wrap_bits-1)) > st->cur_dts) {
1061 pkt->dts -= 1LL<<st->pts_wrap_bits;
1063 pkt->pts += 1LL<<st->pts_wrap_bits;
1066 // some mpeg2 in mpeg-ps lack dts (issue171 / input_file.mpg)
1067 // we take the conservative approach and discard both
1068 // Note, if this is misbehaving for a H.264 file then possibly presentation_delayed is not set correctly.
1069 if(delay==1 && pkt->dts == pkt->pts && pkt->dts != AV_NOPTS_VALUE && presentation_delayed){
1070 av_log(s, AV_LOG_DEBUG, "invalid dts/pts combination %"PRIi64"\n", pkt->dts);
1071 if(strcmp(s->iformat->name, "mov,mp4,m4a,3gp,3g2,mj2")) // otherwise we discard correct timestamps for vc1-wmapro.ism
1072 pkt->dts= AV_NOPTS_VALUE;
1075 if (pkt->duration == 0) {
1076 ff_compute_frame_duration(&num, &den, st, pc, pkt);
1078 pkt->duration = av_rescale_rnd(1, num * (int64_t)st->time_base.den, den * (int64_t)st->time_base.num, AV_ROUND_DOWN);
1081 if(pkt->duration != 0 && (s->packet_buffer || s->parse_queue))
1082 update_initial_durations(s, st, pkt->stream_index, pkt->duration);
1084 /* correct timestamps with byte offset if demuxers only have timestamps
1085 on packet boundaries */
1086 if(pc && st->need_parsing == AVSTREAM_PARSE_TIMESTAMPS && pkt->size){
1087 /* this will estimate bitrate based on this frame's duration and size */
1088 offset = av_rescale(pc->offset, pkt->duration, pkt->size);
1089 if(pkt->pts != AV_NOPTS_VALUE)
1091 if(pkt->dts != AV_NOPTS_VALUE)
1095 if (pc && pc->dts_sync_point >= 0) {
1096 // we have synchronization info from the parser
1097 int64_t den = st->codec->time_base.den * (int64_t) st->time_base.num;
1099 int64_t num = st->codec->time_base.num * (int64_t) st->time_base.den;
1100 if (pkt->dts != AV_NOPTS_VALUE) {
1101 // got DTS from the stream, update reference timestamp
1102 st->reference_dts = pkt->dts - pc->dts_ref_dts_delta * num / den;
1103 } else if (st->reference_dts != AV_NOPTS_VALUE) {
1104 // compute DTS based on reference timestamp
1105 pkt->dts = st->reference_dts + pc->dts_ref_dts_delta * num / den;
1108 if (st->reference_dts != AV_NOPTS_VALUE && pkt->pts == AV_NOPTS_VALUE)
1109 pkt->pts = pkt->dts + pc->pts_dts_delta * num / den;
1111 if (pc->dts_sync_point > 0)
1112 st->reference_dts = pkt->dts; // new reference
1116 /* This may be redundant, but it should not hurt. */
1117 if(pkt->dts != AV_NOPTS_VALUE && pkt->pts != AV_NOPTS_VALUE && pkt->pts > pkt->dts)
1118 presentation_delayed = 1;
1120 av_dlog(NULL, "IN delayed:%d pts:%s, dts:%s cur_dts:%s st:%d pc:%p duration:%d\n",
1121 presentation_delayed, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts), pkt->stream_index, pc, pkt->duration);
1122 /* interpolate PTS and DTS if they are not present */
1123 //We skip H264 currently because delay and has_b_frames are not reliably set
1124 if((delay==0 || (delay==1 && pc)) && st->codec->codec_id != AV_CODEC_ID_H264){
1125 if (presentation_delayed) {
1126 /* DTS = decompression timestamp */
1127 /* PTS = presentation timestamp */
1128 if (pkt->dts == AV_NOPTS_VALUE)
1129 pkt->dts = st->last_IP_pts;
1130 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt);
1131 if (pkt->dts == AV_NOPTS_VALUE)
1132 pkt->dts = st->cur_dts;
1134 /* this is tricky: the dts must be incremented by the duration
1135 of the frame we are displaying, i.e. the last I- or P-frame */
1136 if (st->last_IP_duration == 0)
1137 st->last_IP_duration = pkt->duration;
1138 if(pkt->dts != AV_NOPTS_VALUE)
1139 st->cur_dts = pkt->dts + st->last_IP_duration;
1140 st->last_IP_duration = pkt->duration;
1141 st->last_IP_pts= pkt->pts;
1142 /* cannot compute PTS if not present (we can compute it only
1143 by knowing the future */
1144 } else if (pkt->pts != AV_NOPTS_VALUE ||
1145 pkt->dts != AV_NOPTS_VALUE ||
1147 int duration = pkt->duration;
1149 /* presentation is not delayed : PTS and DTS are the same */
1150 if (pkt->pts == AV_NOPTS_VALUE)
1151 pkt->pts = pkt->dts;
1152 update_initial_timestamps(s, pkt->stream_index, pkt->pts,
1154 if (pkt->pts == AV_NOPTS_VALUE)
1155 pkt->pts = st->cur_dts;
1156 pkt->dts = pkt->pts;
1157 if (pkt->pts != AV_NOPTS_VALUE)
1158 st->cur_dts = pkt->pts + duration;
1162 if(pkt->pts != AV_NOPTS_VALUE && delay <= MAX_REORDER_DELAY && has_decode_delay_been_guessed(st)){
1163 st->pts_buffer[0]= pkt->pts;
1164 for(i=0; i<delay && st->pts_buffer[i] > st->pts_buffer[i+1]; i++)
1165 FFSWAP(int64_t, st->pts_buffer[i], st->pts_buffer[i+1]);
1166 if(pkt->dts == AV_NOPTS_VALUE)
1167 pkt->dts= st->pts_buffer[0];
1169 if(st->codec->codec_id == AV_CODEC_ID_H264){ // we skipped it above so we try here
1170 update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts, pkt); // this should happen on the first packet
1172 if(pkt->dts > st->cur_dts)
1173 st->cur_dts = pkt->dts;
1175 av_dlog(NULL, "OUTdelayed:%d/%d pts:%s, dts:%s cur_dts:%s\n",
1176 presentation_delayed, delay, av_ts2str(pkt->pts), av_ts2str(pkt->dts), av_ts2str(st->cur_dts));
1179 if (is_intra_only(st->codec))
1180 pkt->flags |= AV_PKT_FLAG_KEY;
1182 pkt->convergence_duration = pc->convergence_duration;
1185 static void free_packet_buffer(AVPacketList **pkt_buf, AVPacketList **pkt_buf_end)
1188 AVPacketList *pktl = *pkt_buf;
1189 *pkt_buf = pktl->next;
1190 av_free_packet(&pktl->pkt);
1193 *pkt_buf_end = NULL;
1197 * Parse a packet, add all split parts to parse_queue
1199 * @param pkt packet to parse, NULL when flushing the parser at end of stream
1201 static int parse_packet(AVFormatContext *s, AVPacket *pkt, int stream_index)
1203 AVPacket out_pkt = { 0 }, flush_pkt = { 0 };
1204 AVStream *st = s->streams[stream_index];
1205 uint8_t *data = pkt ? pkt->data : NULL;
1206 int size = pkt ? pkt->size : 0;
1207 int ret = 0, got_output = 0;
1210 av_init_packet(&flush_pkt);
1213 } else if (!size && st->parser->flags & PARSER_FLAG_COMPLETE_FRAMES) {
1214 // preserve 0-size sync packets
1215 compute_pkt_fields(s, st, st->parser, pkt);
1218 while (size > 0 || (pkt == &flush_pkt && got_output)) {
1221 av_init_packet(&out_pkt);
1222 len = av_parser_parse2(st->parser, st->codec,
1223 &out_pkt.data, &out_pkt.size, data, size,
1224 pkt->pts, pkt->dts, pkt->pos);
1226 pkt->pts = pkt->dts = AV_NOPTS_VALUE;
1228 /* increment read pointer */
1232 got_output = !!out_pkt.size;
1237 if (pkt->side_data) {
1238 out_pkt.side_data = pkt->side_data;
1239 out_pkt.side_data_elems = pkt->side_data_elems;
1240 pkt->side_data = NULL;
1241 pkt->side_data_elems = 0;
1244 /* set the duration */
1245 out_pkt.duration = 0;
1246 if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
1247 if (st->codec->sample_rate > 0) {
1248 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1249 (AVRational){ 1, st->codec->sample_rate },
1253 } else if (st->codec->time_base.num != 0 &&
1254 st->codec->time_base.den != 0) {
1255 out_pkt.duration = av_rescale_q_rnd(st->parser->duration,
1256 st->codec->time_base,
1261 out_pkt.stream_index = st->index;
1262 out_pkt.pts = st->parser->pts;
1263 out_pkt.dts = st->parser->dts;
1264 out_pkt.pos = st->parser->pos;
1266 if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW)
1267 out_pkt.pos = st->parser->frame_offset;
1269 if (st->parser->key_frame == 1 ||
1270 (st->parser->key_frame == -1 &&
1271 st->parser->pict_type == AV_PICTURE_TYPE_I))
1272 out_pkt.flags |= AV_PKT_FLAG_KEY;
1274 if(st->parser->key_frame == -1 && st->parser->pict_type==AV_PICTURE_TYPE_NONE && (pkt->flags&AV_PKT_FLAG_KEY))
1275 out_pkt.flags |= AV_PKT_FLAG_KEY;
1277 compute_pkt_fields(s, st, st->parser, &out_pkt);
1279 if (out_pkt.data == pkt->data && out_pkt.size == pkt->size) {
1280 out_pkt.buf = pkt->buf;
1282 #if FF_API_DESTRUCT_PACKET
1283 FF_DISABLE_DEPRECATION_WARNINGS
1284 out_pkt.destruct = pkt->destruct;
1285 pkt->destruct = NULL;
1286 FF_ENABLE_DEPRECATION_WARNINGS
1289 if ((ret = av_dup_packet(&out_pkt)) < 0)
1292 if (!add_to_pktbuf(&s->parse_queue, &out_pkt, &s->parse_queue_end)) {
1293 av_free_packet(&out_pkt);
1294 ret = AVERROR(ENOMEM);
1300 /* end of the stream => close and free the parser */
1301 if (pkt == &flush_pkt) {
1302 av_parser_close(st->parser);
1307 av_free_packet(pkt);
1311 static int read_from_packet_buffer(AVPacketList **pkt_buffer,
1312 AVPacketList **pkt_buffer_end,
1316 av_assert0(*pkt_buffer);
1319 *pkt_buffer = pktl->next;
1321 *pkt_buffer_end = NULL;
1326 static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
1328 int ret = 0, i, got_packet = 0;
1330 av_init_packet(pkt);
1332 while (!got_packet && !s->parse_queue) {
1336 /* read next packet */
1337 ret = ff_read_packet(s, &cur_pkt);
1339 if (ret == AVERROR(EAGAIN))
1341 /* flush the parsers */
1342 for(i = 0; i < s->nb_streams; i++) {
1344 if (st->parser && st->need_parsing)
1345 parse_packet(s, NULL, st->index);
1347 /* all remaining packets are now in parse_queue =>
1348 * really terminate parsing */
1352 st = s->streams[cur_pkt.stream_index];
1354 if (cur_pkt.pts != AV_NOPTS_VALUE &&
1355 cur_pkt.dts != AV_NOPTS_VALUE &&
1356 cur_pkt.pts < cur_pkt.dts) {
1357 av_log(s, AV_LOG_WARNING, "Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",
1358 cur_pkt.stream_index,
1359 av_ts2str(cur_pkt.pts),
1360 av_ts2str(cur_pkt.dts),
1363 if (s->debug & FF_FDEBUG_TS)
1364 av_log(s, AV_LOG_DEBUG, "ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1365 cur_pkt.stream_index,
1366 av_ts2str(cur_pkt.pts),
1367 av_ts2str(cur_pkt.dts),
1372 if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {
1373 st->parser = av_parser_init(st->codec->codec_id);
1375 av_log(s, AV_LOG_VERBOSE, "parser not found for codec "
1376 "%s, packets or times may be invalid.\n",
1377 avcodec_get_name(st->codec->codec_id));
1378 /* no parser available: just output the raw packets */
1379 st->need_parsing = AVSTREAM_PARSE_NONE;
1380 } else if(st->need_parsing == AVSTREAM_PARSE_HEADERS) {
1381 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
1382 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_ONCE) {
1383 st->parser->flags |= PARSER_FLAG_ONCE;
1384 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
1385 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
1389 if (!st->need_parsing || !st->parser) {
1390 /* no parsing needed: we just output the packet as is */
1392 compute_pkt_fields(s, st, NULL, pkt);
1393 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&
1394 (pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {
1395 ff_reduce_index(s, st->index);
1396 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1399 } else if (st->discard < AVDISCARD_ALL) {
1400 if ((ret = parse_packet(s, &cur_pkt, cur_pkt.stream_index)) < 0)
1404 av_free_packet(&cur_pkt);
1406 if (pkt->flags & AV_PKT_FLAG_KEY)
1407 st->skip_to_keyframe = 0;
1408 if (st->skip_to_keyframe) {
1409 av_free_packet(&cur_pkt);
1417 if (!got_packet && s->parse_queue)
1418 ret = read_from_packet_buffer(&s->parse_queue, &s->parse_queue_end, pkt);
1420 if(s->debug & FF_FDEBUG_TS)
1421 av_log(s, AV_LOG_DEBUG, "read_frame_internal stream=%d, pts=%s, dts=%s, size=%d, duration=%d, flags=%d\n",
1423 av_ts2str(pkt->pts),
1424 av_ts2str(pkt->dts),
1432 int av_read_frame(AVFormatContext *s, AVPacket *pkt)
1434 const int genpts = s->flags & AVFMT_FLAG_GENPTS;
1440 ret = s->packet_buffer ?
1441 read_from_packet_buffer(&s->packet_buffer, &s->packet_buffer_end, pkt) :
1442 read_frame_internal(s, pkt);
1449 AVPacketList *pktl = s->packet_buffer;
1452 AVPacket *next_pkt = &pktl->pkt;
1454 if (next_pkt->dts != AV_NOPTS_VALUE) {
1455 int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;
1456 // last dts seen for this stream. if any of packets following
1457 // current one had no dts, we will set this to AV_NOPTS_VALUE.
1458 int64_t last_dts = next_pkt->dts;
1459 while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {
1460 if (pktl->pkt.stream_index == next_pkt->stream_index &&
1461 (av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2LL << (wrap_bits - 1)) < 0)) {
1462 if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2LL << (wrap_bits - 1))) { //not b frame
1463 next_pkt->pts = pktl->pkt.dts;
1465 if (last_dts != AV_NOPTS_VALUE) {
1466 // Once last dts was set to AV_NOPTS_VALUE, we don't change it.
1467 last_dts = pktl->pkt.dts;
1472 if (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {
1473 // Fixing the last reference frame had none pts issue (For MXF etc).
1474 // We only do this when
1476 // 2. we are not able to resolve a pts value for current packet.
1477 // 3. the packets for this stream at the end of the files had valid dts.
1478 next_pkt->pts = last_dts + next_pkt->duration;
1480 pktl = s->packet_buffer;
1483 /* read packet from packet buffer, if there is data */
1484 if (!(next_pkt->pts == AV_NOPTS_VALUE &&
1485 next_pkt->dts != AV_NOPTS_VALUE && !eof)) {
1486 ret = read_from_packet_buffer(&s->packet_buffer,
1487 &s->packet_buffer_end, pkt);
1492 ret = read_frame_internal(s, pkt);
1494 if (pktl && ret != AVERROR(EAGAIN)) {
1501 if (av_dup_packet(add_to_pktbuf(&s->packet_buffer, pkt,
1502 &s->packet_buffer_end)) < 0)
1503 return AVERROR(ENOMEM);
1508 st = s->streams[pkt->stream_index];
1509 if (st->skip_samples) {
1510 uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);
1512 AV_WL32(p, st->skip_samples);
1513 av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d\n", st->skip_samples);
1515 st->skip_samples = 0;
1518 if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {
1519 ff_reduce_index(s, st->index);
1520 av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);
1523 if (is_relative(pkt->dts))
1524 pkt->dts -= RELATIVE_TS_BASE;
1525 if (is_relative(pkt->pts))
1526 pkt->pts -= RELATIVE_TS_BASE;
1531 /* XXX: suppress the packet queue */
1532 static void flush_packet_queue(AVFormatContext *s)
1534 free_packet_buffer(&s->parse_queue, &s->parse_queue_end);
1535 free_packet_buffer(&s->packet_buffer, &s->packet_buffer_end);
1536 free_packet_buffer(&s->raw_packet_buffer, &s->raw_packet_buffer_end);
1538 s->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;
1541 /*******************************************************/
1544 int av_find_default_stream_index(AVFormatContext *s)
1546 int first_audio_index = -1;
1550 if (s->nb_streams <= 0)
1552 for(i = 0; i < s->nb_streams; i++) {
1554 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO &&
1555 !(st->disposition & AV_DISPOSITION_ATTACHED_PIC)) {
1558 if (first_audio_index < 0 && st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
1559 first_audio_index = i;
1561 return first_audio_index >= 0 ? first_audio_index : 0;
1565 * Flush the frame reader.
1567 void ff_read_frame_flush(AVFormatContext *s)
1572 flush_packet_queue(s);
1574 /* for each stream, reset read state */
1575 for(i = 0; i < s->nb_streams; i++) {
1579 av_parser_close(st->parser);
1582 st->last_IP_pts = AV_NOPTS_VALUE;
1583 if(st->first_dts == AV_NOPTS_VALUE) st->cur_dts = RELATIVE_TS_BASE;
1584 else st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
1585 st->reference_dts = AV_NOPTS_VALUE;
1587 st->probe_packets = MAX_PROBE_PACKETS;
1589 for(j=0; j<MAX_REORDER_DELAY+1; j++)
1590 st->pts_buffer[j]= AV_NOPTS_VALUE;
1594 void ff_update_cur_dts(AVFormatContext *s, AVStream *ref_st, int64_t timestamp)
1598 for(i = 0; i < s->nb_streams; i++) {
1599 AVStream *st = s->streams[i];
1601 st->cur_dts = av_rescale(timestamp,
1602 st->time_base.den * (int64_t)ref_st->time_base.num,
1603 st->time_base.num * (int64_t)ref_st->time_base.den);
1607 void ff_reduce_index(AVFormatContext *s, int stream_index)
1609 AVStream *st= s->streams[stream_index];
1610 unsigned int max_entries= s->max_index_size / sizeof(AVIndexEntry);
1612 if((unsigned)st->nb_index_entries >= max_entries){
1614 for(i=0; 2*i<st->nb_index_entries; i++)
1615 st->index_entries[i]= st->index_entries[2*i];
1616 st->nb_index_entries= i;
1620 int ff_add_index_entry(AVIndexEntry **index_entries,
1621 int *nb_index_entries,
1622 unsigned int *index_entries_allocated_size,
1623 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1625 AVIndexEntry *entries, *ie;
1628 if((unsigned)*nb_index_entries + 1 >= UINT_MAX / sizeof(AVIndexEntry))
1631 if(timestamp == AV_NOPTS_VALUE)
1632 return AVERROR(EINVAL);
1634 if (size < 0 || size > 0x3FFFFFFF)
1635 return AVERROR(EINVAL);
1637 if (is_relative(timestamp)) //FIXME this maintains previous behavior but we should shift by the correct offset once known
1638 timestamp -= RELATIVE_TS_BASE;
1640 entries = av_fast_realloc(*index_entries,
1641 index_entries_allocated_size,
1642 (*nb_index_entries + 1) *
1643 sizeof(AVIndexEntry));
1647 *index_entries= entries;
1649 index= ff_index_search_timestamp(*index_entries, *nb_index_entries, timestamp, AVSEEK_FLAG_ANY);
1652 index= (*nb_index_entries)++;
1653 ie= &entries[index];
1654 av_assert0(index==0 || ie[-1].timestamp < timestamp);
1656 ie= &entries[index];
1657 if(ie->timestamp != timestamp){
1658 if(ie->timestamp <= timestamp)
1660 memmove(entries + index + 1, entries + index, sizeof(AVIndexEntry)*(*nb_index_entries - index));
1661 (*nb_index_entries)++;
1662 }else if(ie->pos == pos && distance < ie->min_distance) //do not reduce the distance
1663 distance= ie->min_distance;
1667 ie->timestamp = timestamp;
1668 ie->min_distance= distance;
1675 int av_add_index_entry(AVStream *st,
1676 int64_t pos, int64_t timestamp, int size, int distance, int flags)
1678 timestamp = wrap_timestamp(st, timestamp);
1679 return ff_add_index_entry(&st->index_entries, &st->nb_index_entries,
1680 &st->index_entries_allocated_size, pos,
1681 timestamp, size, distance, flags);
1684 int ff_index_search_timestamp(const AVIndexEntry *entries, int nb_entries,
1685 int64_t wanted_timestamp, int flags)
1693 //optimize appending index entries at the end
1694 if(b && entries[b-1].timestamp < wanted_timestamp)
1699 timestamp = entries[m].timestamp;
1700 if(timestamp >= wanted_timestamp)
1702 if(timestamp <= wanted_timestamp)
1705 m= (flags & AVSEEK_FLAG_BACKWARD) ? a : b;
1707 if(!(flags & AVSEEK_FLAG_ANY)){
1708 while(m>=0 && m<nb_entries && !(entries[m].flags & AVINDEX_KEYFRAME)){
1709 m += (flags & AVSEEK_FLAG_BACKWARD) ? -1 : 1;
1718 int av_index_search_timestamp(AVStream *st, int64_t wanted_timestamp,
1721 return ff_index_search_timestamp(st->index_entries, st->nb_index_entries,
1722 wanted_timestamp, flags);
1725 static int64_t ff_read_timestamp(AVFormatContext *s, int stream_index, int64_t *ppos, int64_t pos_limit,
1726 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1728 int64_t ts = read_timestamp(s, stream_index, ppos, pos_limit);
1729 if (stream_index >= 0)
1730 ts = wrap_timestamp(s->streams[stream_index], ts);
1734 int ff_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts, int flags)
1736 AVInputFormat *avif= s->iformat;
1737 int64_t av_uninit(pos_min), av_uninit(pos_max), pos, pos_limit;
1738 int64_t ts_min, ts_max, ts;
1743 if (stream_index < 0)
1746 av_dlog(s, "read_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1749 ts_min= AV_NOPTS_VALUE;
1750 pos_limit= -1; //gcc falsely says it may be uninitialized
1752 st= s->streams[stream_index];
1753 if(st->index_entries){
1756 index= av_index_search_timestamp(st, target_ts, flags | AVSEEK_FLAG_BACKWARD); //FIXME whole func must be checked for non-keyframe entries in index case, especially read_timestamp()
1757 index= FFMAX(index, 0);
1758 e= &st->index_entries[index];
1760 if(e->timestamp <= target_ts || e->pos == e->min_distance){
1762 ts_min= e->timestamp;
1763 av_dlog(s, "using cached pos_min=0x%"PRIx64" dts_min=%s\n",
1764 pos_min, av_ts2str(ts_min));
1766 av_assert1(index==0);
1769 index= av_index_search_timestamp(st, target_ts, flags & ~AVSEEK_FLAG_BACKWARD);
1770 av_assert0(index < st->nb_index_entries);
1772 e= &st->index_entries[index];
1773 av_assert1(e->timestamp >= target_ts);
1775 ts_max= e->timestamp;
1776 pos_limit= pos_max - e->min_distance;
1777 av_dlog(s, "using cached pos_max=0x%"PRIx64" pos_limit=0x%"PRIx64" dts_max=%s\n",
1778 pos_max, pos_limit, av_ts2str(ts_max));
1782 pos= ff_gen_search(s, stream_index, target_ts, pos_min, pos_max, pos_limit, ts_min, ts_max, flags, &ts, avif->read_timestamp);
1787 if ((ret = avio_seek(s->pb, pos, SEEK_SET)) < 0)
1790 ff_read_frame_flush(s);
1791 ff_update_cur_dts(s, st, ts);
1796 int ff_find_last_ts(AVFormatContext *s, int stream_index, int64_t *ts, int64_t *pos,
1797 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1800 int64_t limit, ts_max;
1801 int64_t filesize = avio_size(s->pb);
1802 int64_t pos_max = filesize - 1;
1805 pos_max = FFMAX(0, (pos_max) - step);
1806 ts_max = ff_read_timestamp(s, stream_index, &pos_max, limit, read_timestamp);
1808 }while(ts_max == AV_NOPTS_VALUE && 2*limit > step);
1809 if (ts_max == AV_NOPTS_VALUE)
1813 int64_t tmp_pos = pos_max + 1;
1814 int64_t tmp_ts = ff_read_timestamp(s, stream_index, &tmp_pos, INT64_MAX, read_timestamp);
1815 if(tmp_ts == AV_NOPTS_VALUE)
1817 av_assert0(tmp_pos > pos_max);
1820 if(tmp_pos >= filesize)
1832 int64_t ff_gen_search(AVFormatContext *s, int stream_index, int64_t target_ts,
1833 int64_t pos_min, int64_t pos_max, int64_t pos_limit,
1834 int64_t ts_min, int64_t ts_max, int flags, int64_t *ts_ret,
1835 int64_t (*read_timestamp)(struct AVFormatContext *, int , int64_t *, int64_t ))
1842 av_dlog(s, "gen_seek: %d %s\n", stream_index, av_ts2str(target_ts));
1844 if(ts_min == AV_NOPTS_VALUE){
1845 pos_min = s->data_offset;
1846 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1847 if (ts_min == AV_NOPTS_VALUE)
1851 if(ts_min >= target_ts){
1856 if(ts_max == AV_NOPTS_VALUE){
1857 if ((ret = ff_find_last_ts(s, stream_index, &ts_max, &pos_max, read_timestamp)) < 0)
1862 if(ts_max <= target_ts){
1867 if(ts_min > ts_max){
1869 }else if(ts_min == ts_max){
1874 while (pos_min < pos_limit) {
1875 av_dlog(s, "pos_min=0x%"PRIx64" pos_max=0x%"PRIx64" dts_min=%s dts_max=%s\n",
1876 pos_min, pos_max, av_ts2str(ts_min), av_ts2str(ts_max));
1877 assert(pos_limit <= pos_max);
1880 int64_t approximate_keyframe_distance= pos_max - pos_limit;
1881 // interpolate position (better than dichotomy)
1882 pos = av_rescale(target_ts - ts_min, pos_max - pos_min, ts_max - ts_min)
1883 + pos_min - approximate_keyframe_distance;
1884 }else if(no_change==1){
1885 // bisection, if interpolation failed to change min or max pos last time
1886 pos = (pos_min + pos_limit)>>1;
1888 /* linear search if bisection failed, can only happen if there
1889 are very few or no keyframes between min/max */
1894 else if(pos > pos_limit)
1898 ts = ff_read_timestamp(s, stream_index, &pos, INT64_MAX, read_timestamp); //may pass pos_limit instead of -1
1903 av_dlog(s, "%"PRId64" %"PRId64" %"PRId64" / %s %s %s target:%s limit:%"PRId64" start:%"PRId64" noc:%d\n",
1904 pos_min, pos, pos_max,
1905 av_ts2str(ts_min), av_ts2str(ts), av_ts2str(ts_max), av_ts2str(target_ts),
1906 pos_limit, start_pos, no_change);
1907 if(ts == AV_NOPTS_VALUE){
1908 av_log(s, AV_LOG_ERROR, "read_timestamp() failed in the middle\n");
1911 assert(ts != AV_NOPTS_VALUE);
1912 if (target_ts <= ts) {
1913 pos_limit = start_pos - 1;
1917 if (target_ts >= ts) {
1923 pos = (flags & AVSEEK_FLAG_BACKWARD) ? pos_min : pos_max;
1924 ts = (flags & AVSEEK_FLAG_BACKWARD) ? ts_min : ts_max;
1927 ts_min = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1929 ts_max = ff_read_timestamp(s, stream_index, &pos_min, INT64_MAX, read_timestamp);
1930 av_dlog(s, "pos=0x%"PRIx64" %s<=%s<=%s\n",
1931 pos, av_ts2str(ts_min), av_ts2str(target_ts), av_ts2str(ts_max));
1937 static int seek_frame_byte(AVFormatContext *s, int stream_index, int64_t pos, int flags){
1938 int64_t pos_min, pos_max;
1940 pos_min = s->data_offset;
1941 pos_max = avio_size(s->pb) - 1;
1943 if (pos < pos_min) pos= pos_min;
1944 else if(pos > pos_max) pos= pos_max;
1946 avio_seek(s->pb, pos, SEEK_SET);
1948 s->io_repositioned = 1;
1953 static int seek_frame_generic(AVFormatContext *s,
1954 int stream_index, int64_t timestamp, int flags)
1961 st = s->streams[stream_index];
1963 index = av_index_search_timestamp(st, timestamp, flags);
1965 if(index < 0 && st->nb_index_entries && timestamp < st->index_entries[0].timestamp)
1968 if(index < 0 || index==st->nb_index_entries-1){
1972 if(st->nb_index_entries){
1973 av_assert0(st->index_entries);
1974 ie= &st->index_entries[st->nb_index_entries-1];
1975 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
1977 ff_update_cur_dts(s, st, ie->timestamp);
1979 if ((ret = avio_seek(s->pb, s->data_offset, SEEK_SET)) < 0)
1985 read_status = av_read_frame(s, &pkt);
1986 } while (read_status == AVERROR(EAGAIN));
1987 if (read_status < 0)
1989 av_free_packet(&pkt);
1990 if(stream_index == pkt.stream_index && pkt.dts > timestamp){
1991 if(pkt.flags & AV_PKT_FLAG_KEY)
1993 if(nonkey++ > 1000 && st->codec->codec_id != AV_CODEC_ID_CDGRAPHICS){
1994 av_log(s, AV_LOG_ERROR,"seek_frame_generic failed as this stream seems to contain no keyframes after the target timestamp, %d non keyframes found\n", nonkey);
1999 index = av_index_search_timestamp(st, timestamp, flags);
2004 ff_read_frame_flush(s);
2005 if (s->iformat->read_seek){
2006 if(s->iformat->read_seek(s, stream_index, timestamp, flags) >= 0)
2009 ie = &st->index_entries[index];
2010 if ((ret = avio_seek(s->pb, ie->pos, SEEK_SET)) < 0)
2012 ff_update_cur_dts(s, st, ie->timestamp);
2017 static int seek_frame_internal(AVFormatContext *s, int stream_index,
2018 int64_t timestamp, int flags)
2023 if (flags & AVSEEK_FLAG_BYTE) {
2024 if (s->iformat->flags & AVFMT_NO_BYTE_SEEK)
2026 ff_read_frame_flush(s);
2027 return seek_frame_byte(s, stream_index, timestamp, flags);
2030 if(stream_index < 0){
2031 stream_index= av_find_default_stream_index(s);
2032 if(stream_index < 0)
2035 st= s->streams[stream_index];
2036 /* timestamp for default must be expressed in AV_TIME_BASE units */
2037 timestamp = av_rescale(timestamp, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
2040 /* first, we try the format specific seek */
2041 if (s->iformat->read_seek) {
2042 ff_read_frame_flush(s);
2043 ret = s->iformat->read_seek(s, stream_index, timestamp, flags);
2050 if (s->iformat->read_timestamp && !(s->iformat->flags & AVFMT_NOBINSEARCH)) {
2051 ff_read_frame_flush(s);
2052 return ff_seek_frame_binary(s, stream_index, timestamp, flags);
2053 } else if (!(s->iformat->flags & AVFMT_NOGENSEARCH)) {
2054 ff_read_frame_flush(s);
2055 return seek_frame_generic(s, stream_index, timestamp, flags);
2061 int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
2065 if (s->iformat->read_seek2 && !s->iformat->read_seek) {
2066 int64_t min_ts = INT64_MIN, max_ts = INT64_MAX;
2067 if ((flags & AVSEEK_FLAG_BACKWARD))
2071 return avformat_seek_file(s, stream_index, min_ts, timestamp, max_ts,
2072 flags & ~AVSEEK_FLAG_BACKWARD);
2075 ret = seek_frame_internal(s, stream_index, timestamp, flags);
2078 ret = avformat_queue_attached_pictures(s);
2083 int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
2085 if(min_ts > ts || max_ts < ts)
2087 if (stream_index < -1 || stream_index >= (int)s->nb_streams)
2088 return AVERROR(EINVAL);
2091 flags |= AVSEEK_FLAG_ANY;
2092 flags &= ~AVSEEK_FLAG_BACKWARD;
2094 if (s->iformat->read_seek2) {
2096 ff_read_frame_flush(s);
2098 if (stream_index == -1 && s->nb_streams == 1) {
2099 AVRational time_base = s->streams[0]->time_base;
2100 ts = av_rescale_q(ts, AV_TIME_BASE_Q, time_base);
2101 min_ts = av_rescale_rnd(min_ts, time_base.den,
2102 time_base.num * (int64_t)AV_TIME_BASE,
2103 AV_ROUND_UP | AV_ROUND_PASS_MINMAX);
2104 max_ts = av_rescale_rnd(max_ts, time_base.den,
2105 time_base.num * (int64_t)AV_TIME_BASE,
2106 AV_ROUND_DOWN | AV_ROUND_PASS_MINMAX);
2109 ret = s->iformat->read_seek2(s, stream_index, min_ts, ts, max_ts, flags);
2112 ret = avformat_queue_attached_pictures(s);
2116 if(s->iformat->read_timestamp){
2117 //try to seek via read_timestamp()
2120 // Fall back on old API if new is not implemented but old is.
2121 // Note the old API has somewhat different semantics.
2122 if (s->iformat->read_seek || 1) {
2123 int dir = (ts - (uint64_t)min_ts > (uint64_t)max_ts - ts ? AVSEEK_FLAG_BACKWARD : 0);
2124 int ret = av_seek_frame(s, stream_index, ts, flags | dir);
2125 if (ret<0 && ts != min_ts && max_ts != ts) {
2126 ret = av_seek_frame(s, stream_index, dir ? max_ts : min_ts, flags | dir);
2128 ret = av_seek_frame(s, stream_index, ts, flags | (dir^AVSEEK_FLAG_BACKWARD));
2133 // try some generic seek like seek_frame_generic() but with new ts semantics
2134 return -1; //unreachable
2137 /*******************************************************/
2140 * Return TRUE if the stream has accurate duration in any stream.
2142 * @return TRUE if the stream has accurate duration for at least one component.
2144 static int has_duration(AVFormatContext *ic)
2149 for(i = 0;i < ic->nb_streams; i++) {
2150 st = ic->streams[i];
2151 if (st->duration != AV_NOPTS_VALUE)
2154 if (ic->duration != AV_NOPTS_VALUE)
2160 * Estimate the stream timings from the one of each components.
2162 * Also computes the global bitrate if possible.
2164 static void update_stream_timings(AVFormatContext *ic)
2166 int64_t start_time, start_time1, start_time_text, end_time, end_time1;
2167 int64_t duration, duration1, filesize;
2172 start_time = INT64_MAX;
2173 start_time_text = INT64_MAX;
2174 end_time = INT64_MIN;
2175 duration = INT64_MIN;
2176 for(i = 0;i < ic->nb_streams; i++) {
2177 st = ic->streams[i];
2178 if (st->start_time != AV_NOPTS_VALUE && st->time_base.den) {
2179 start_time1= av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q);
2180 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE || st->codec->codec_type == AVMEDIA_TYPE_DATA) {
2181 if (start_time1 < start_time_text)
2182 start_time_text = start_time1;
2184 start_time = FFMIN(start_time, start_time1);
2185 end_time1 = AV_NOPTS_VALUE;
2186 if (st->duration != AV_NOPTS_VALUE) {
2187 end_time1 = start_time1
2188 + av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2189 end_time = FFMAX(end_time, end_time1);
2191 for(p = NULL; (p = av_find_program_from_stream(ic, p, i)); ){
2192 if(p->start_time == AV_NOPTS_VALUE || p->start_time > start_time1)
2193 p->start_time = start_time1;
2194 if(p->end_time < end_time1)
2195 p->end_time = end_time1;
2198 if (st->duration != AV_NOPTS_VALUE) {
2199 duration1 = av_rescale_q(st->duration, st->time_base, AV_TIME_BASE_Q);
2200 duration = FFMAX(duration, duration1);
2203 if (start_time == INT64_MAX || (start_time > start_time_text && start_time - start_time_text < AV_TIME_BASE))
2204 start_time = start_time_text;
2205 else if(start_time > start_time_text)
2206 av_log(ic, AV_LOG_VERBOSE, "Ignoring outlier non primary stream starttime %f\n", start_time_text / (float)AV_TIME_BASE);
2208 if (start_time != INT64_MAX) {
2209 ic->start_time = start_time;
2210 if (end_time != INT64_MIN) {
2211 if (ic->nb_programs) {
2212 for (i=0; i<ic->nb_programs; i++) {
2213 p = ic->programs[i];
2214 if(p->start_time != AV_NOPTS_VALUE && p->end_time > p->start_time)
2215 duration = FFMAX(duration, p->end_time - p->start_time);
2218 duration = FFMAX(duration, end_time - start_time);
2221 if (duration != INT64_MIN && duration > 0 && ic->duration == AV_NOPTS_VALUE) {
2222 ic->duration = duration;
2224 if (ic->pb && (filesize = avio_size(ic->pb)) > 0 && ic->duration != AV_NOPTS_VALUE) {
2225 /* compute the bitrate */
2226 double bitrate = (double)filesize * 8.0 * AV_TIME_BASE /
2227 (double)ic->duration;
2228 if (bitrate >= 0 && bitrate <= INT_MAX)
2229 ic->bit_rate = bitrate;
2233 static void fill_all_stream_timings(AVFormatContext *ic)
2238 update_stream_timings(ic);
2239 for(i = 0;i < ic->nb_streams; i++) {
2240 st = ic->streams[i];
2241 if (st->start_time == AV_NOPTS_VALUE) {
2242 if(ic->start_time != AV_NOPTS_VALUE)
2243 st->start_time = av_rescale_q(ic->start_time, AV_TIME_BASE_Q, st->time_base);
2244 if(ic->duration != AV_NOPTS_VALUE)
2245 st->duration = av_rescale_q(ic->duration, AV_TIME_BASE_Q, st->time_base);
2250 static void estimate_timings_from_bit_rate(AVFormatContext *ic)
2252 int64_t filesize, duration;
2253 int i, show_warning = 0;
2256 /* if bit_rate is already set, we believe it */
2257 if (ic->bit_rate <= 0) {
2259 for(i=0;i<ic->nb_streams;i++) {
2260 st = ic->streams[i];
2261 if (st->codec->bit_rate > 0) {
2262 if (INT_MAX - st->codec->bit_rate < bit_rate) {
2266 bit_rate += st->codec->bit_rate;
2269 ic->bit_rate = bit_rate;
2272 /* if duration is already set, we believe it */
2273 if (ic->duration == AV_NOPTS_VALUE &&
2274 ic->bit_rate != 0) {
2275 filesize = ic->pb ? avio_size(ic->pb) : 0;
2277 for(i = 0; i < ic->nb_streams; i++) {
2278 st = ic->streams[i];
2279 if ( st->time_base.num <= INT64_MAX / ic->bit_rate
2280 && st->duration == AV_NOPTS_VALUE) {
2281 duration= av_rescale(8*filesize, st->time_base.den, ic->bit_rate*(int64_t)st->time_base.num);
2282 st->duration = duration;
2289 av_log(ic, AV_LOG_WARNING, "Estimating duration from bitrate, this may be inaccurate\n");
2292 #define DURATION_MAX_READ_SIZE 250000LL
2293 #define DURATION_MAX_RETRY 4
2295 /* only usable for MPEG-PS streams */
2296 static void estimate_timings_from_pts(AVFormatContext *ic, int64_t old_offset)
2298 AVPacket pkt1, *pkt = &pkt1;
2300 int read_size, i, ret;
2302 int64_t filesize, offset, duration;
2305 /* flush packet queue */
2306 flush_packet_queue(ic);
2308 for (i=0; i<ic->nb_streams; i++) {
2309 st = ic->streams[i];
2310 if (st->start_time == AV_NOPTS_VALUE && st->first_dts == AV_NOPTS_VALUE)
2311 av_log(st->codec, AV_LOG_WARNING, "start time is not set in estimate_timings_from_pts\n");
2314 av_parser_close(st->parser);
2319 /* estimate the end time (duration) */
2320 /* XXX: may need to support wrapping */
2321 filesize = ic->pb ? avio_size(ic->pb) : 0;
2322 end_time = AV_NOPTS_VALUE;
2324 offset = filesize - (DURATION_MAX_READ_SIZE<<retry);
2328 avio_seek(ic->pb, offset, SEEK_SET);
2331 if (read_size >= DURATION_MAX_READ_SIZE<<(FFMAX(retry-1,0)))
2335 ret = ff_read_packet(ic, pkt);
2336 } while(ret == AVERROR(EAGAIN));
2339 read_size += pkt->size;
2340 st = ic->streams[pkt->stream_index];
2341 if (pkt->pts != AV_NOPTS_VALUE &&
2342 (st->start_time != AV_NOPTS_VALUE ||
2343 st->first_dts != AV_NOPTS_VALUE)) {
2344 duration = end_time = pkt->pts;
2345 if (st->start_time != AV_NOPTS_VALUE)
2346 duration -= st->start_time;
2348 duration -= st->first_dts;
2350 if (st->duration == AV_NOPTS_VALUE || st->info->last_duration<=0 ||
2351 (st->duration < duration && FFABS(duration - st->info->last_duration) < 60LL*st->time_base.den / st->time_base.num))
2352 st->duration = duration;
2353 st->info->last_duration = duration;
2356 av_free_packet(pkt);
2358 }while( end_time==AV_NOPTS_VALUE
2359 && filesize > (DURATION_MAX_READ_SIZE<<retry)
2360 && ++retry <= DURATION_MAX_RETRY);
2362 fill_all_stream_timings(ic);
2364 avio_seek(ic->pb, old_offset, SEEK_SET);
2365 for (i=0; i<ic->nb_streams; i++) {
2367 st->cur_dts= st->first_dts;
2368 st->last_IP_pts = AV_NOPTS_VALUE;
2369 st->reference_dts = AV_NOPTS_VALUE;
2373 static void estimate_timings(AVFormatContext *ic, int64_t old_offset)
2377 /* get the file size, if possible */
2378 if (ic->iformat->flags & AVFMT_NOFILE) {
2381 file_size = avio_size(ic->pb);
2382 file_size = FFMAX(0, file_size);
2385 if ((!strcmp(ic->iformat->name, "mpeg") ||
2386 !strcmp(ic->iformat->name, "mpegts")) &&
2387 file_size && ic->pb->seekable) {
2388 /* get accurate estimate from the PTSes */
2389 estimate_timings_from_pts(ic, old_offset);
2390 ic->duration_estimation_method = AVFMT_DURATION_FROM_PTS;
2391 } else if (has_duration(ic)) {
2392 /* at least one component has timings - we use them for all
2394 fill_all_stream_timings(ic);
2395 ic->duration_estimation_method = AVFMT_DURATION_FROM_STREAM;
2397 /* less precise: use bitrate info */
2398 estimate_timings_from_bit_rate(ic);
2399 ic->duration_estimation_method = AVFMT_DURATION_FROM_BITRATE;
2401 update_stream_timings(ic);
2405 AVStream av_unused *st;
2406 for(i = 0;i < ic->nb_streams; i++) {
2407 st = ic->streams[i];
2408 av_dlog(ic, "%d: start_time: %0.3f duration: %0.3f\n", i,
2409 (double) st->start_time / AV_TIME_BASE,
2410 (double) st->duration / AV_TIME_BASE);
2412 av_dlog(ic, "stream: start_time: %0.3f duration: %0.3f bitrate=%d kb/s\n",
2413 (double) ic->start_time / AV_TIME_BASE,
2414 (double) ic->duration / AV_TIME_BASE,
2415 ic->bit_rate / 1000);
2419 static int has_codec_parameters(AVStream *st, const char **errmsg_ptr)
2421 AVCodecContext *avctx = st->codec;
2423 #define FAIL(errmsg) do { \
2425 *errmsg_ptr = errmsg; \
2429 switch (avctx->codec_type) {
2430 case AVMEDIA_TYPE_AUDIO:
2431 if (!avctx->frame_size && determinable_frame_size(avctx))
2432 FAIL("unspecified frame size");
2433 if (st->info->found_decoder >= 0 && avctx->sample_fmt == AV_SAMPLE_FMT_NONE)
2434 FAIL("unspecified sample format");
2435 if (!avctx->sample_rate)
2436 FAIL("unspecified sample rate");
2437 if (!avctx->channels)
2438 FAIL("unspecified number of channels");
2439 if (st->info->found_decoder >= 0 && !st->nb_decoded_frames && avctx->codec_id == AV_CODEC_ID_DTS)
2440 FAIL("no decodable DTS frames");
2442 case AVMEDIA_TYPE_VIDEO:
2444 FAIL("unspecified size");
2445 if (st->info->found_decoder >= 0 && avctx->pix_fmt == AV_PIX_FMT_NONE)
2446 FAIL("unspecified pixel format");
2447 if (st->codec->codec_id == AV_CODEC_ID_RV30 || st->codec->codec_id == AV_CODEC_ID_RV40)
2448 if (!st->sample_aspect_ratio.num && !st->codec->sample_aspect_ratio.num && !st->codec_info_nb_frames)
2449 FAIL("no frame in rv30/40 and no sar");
2451 case AVMEDIA_TYPE_SUBTITLE:
2452 if (avctx->codec_id == AV_CODEC_ID_HDMV_PGS_SUBTITLE && !avctx->width)
2453 FAIL("unspecified size");
2455 case AVMEDIA_TYPE_DATA:
2456 if(avctx->codec_id == AV_CODEC_ID_NONE) return 1;
2459 if (avctx->codec_id == AV_CODEC_ID_NONE)
2460 FAIL("unknown codec");
2464 /* returns 1 or 0 if or if not decoded data was returned, or a negative error */
2465 static int try_decode_frame(AVFormatContext *s, AVStream *st, AVPacket *avpkt, AVDictionary **options)
2467 const AVCodec *codec;
2468 int got_picture = 1, ret = 0;
2469 AVFrame *frame = avcodec_alloc_frame();
2470 AVSubtitle subtitle;
2471 AVPacket pkt = *avpkt;
2474 return AVERROR(ENOMEM);
2476 if (!avcodec_is_open(st->codec) && !st->info->found_decoder) {
2477 AVDictionary *thread_opt = NULL;
2479 codec = find_decoder(s, st, st->codec->codec_id);
2482 st->info->found_decoder = -1;
2487 /* force thread count to 1 since the h264 decoder will not extract SPS
2488 * and PPS to extradata during multi-threaded decoding */
2489 av_dict_set(options ? options : &thread_opt, "threads", "1", 0);
2490 ret = avcodec_open2(st->codec, codec, options ? options : &thread_opt);
2492 av_dict_free(&thread_opt);
2494 st->info->found_decoder = -1;
2497 st->info->found_decoder = 1;
2498 } else if (!st->info->found_decoder)
2499 st->info->found_decoder = 1;
2501 if (st->info->found_decoder < 0) {
2506 while ((pkt.size > 0 || (!pkt.data && got_picture)) &&
2508 (!has_codec_parameters(st, NULL) ||
2509 !has_decode_delay_been_guessed(st) ||
2510 (!st->codec_info_nb_frames && st->codec->codec->capabilities & CODEC_CAP_CHANNEL_CONF))) {
2512 avcodec_get_frame_defaults(frame);
2513 switch(st->codec->codec_type) {
2514 case AVMEDIA_TYPE_VIDEO:
2515 ret = avcodec_decode_video2(st->codec, frame,
2516 &got_picture, &pkt);
2518 case AVMEDIA_TYPE_AUDIO:
2519 ret = avcodec_decode_audio4(st->codec, frame, &got_picture, &pkt);
2521 case AVMEDIA_TYPE_SUBTITLE:
2522 ret = avcodec_decode_subtitle2(st->codec, &subtitle,
2523 &got_picture, &pkt);
2531 st->nb_decoded_frames++;
2538 if(!pkt.data && !got_picture)
2542 avcodec_free_frame(&frame);
2546 unsigned int ff_codec_get_tag(const AVCodecTag *tags, enum AVCodecID id)
2548 while (tags->id != AV_CODEC_ID_NONE) {
2556 enum AVCodecID ff_codec_get_id(const AVCodecTag *tags, unsigned int tag)
2559 for(i=0; tags[i].id != AV_CODEC_ID_NONE;i++) {
2560 if(tag == tags[i].tag)
2563 for(i=0; tags[i].id != AV_CODEC_ID_NONE; i++) {
2564 if (avpriv_toupper4(tag) == avpriv_toupper4(tags[i].tag))
2567 return AV_CODEC_ID_NONE;
2570 enum AVCodecID ff_get_pcm_codec_id(int bps, int flt, int be, int sflags)
2574 case 32: return be ? AV_CODEC_ID_PCM_F32BE : AV_CODEC_ID_PCM_F32LE;
2575 case 64: return be ? AV_CODEC_ID_PCM_F64BE : AV_CODEC_ID_PCM_F64LE;
2576 default: return AV_CODEC_ID_NONE;
2581 if (sflags & (1 << (bps - 1))) {
2583 case 1: return AV_CODEC_ID_PCM_S8;
2584 case 2: return be ? AV_CODEC_ID_PCM_S16BE : AV_CODEC_ID_PCM_S16LE;
2585 case 3: return be ? AV_CODEC_ID_PCM_S24BE : AV_CODEC_ID_PCM_S24LE;
2586 case 4: return be ? AV_CODEC_ID_PCM_S32BE : AV_CODEC_ID_PCM_S32LE;
2587 default: return AV_CODEC_ID_NONE;
2591 case 1: return AV_CODEC_ID_PCM_U8;
2592 case 2: return be ? AV_CODEC_ID_PCM_U16BE : AV_CODEC_ID_PCM_U16LE;
2593 case 3: return be ? AV_CODEC_ID_PCM_U24BE : AV_CODEC_ID_PCM_U24LE;
2594 case 4: return be ? AV_CODEC_ID_PCM_U32BE : AV_CODEC_ID_PCM_U32LE;
2595 default: return AV_CODEC_ID_NONE;
2601 unsigned int av_codec_get_tag(const AVCodecTag * const *tags, enum AVCodecID id)
2604 if (!av_codec_get_tag2(tags, id, &tag))
2609 int av_codec_get_tag2(const AVCodecTag * const *tags, enum AVCodecID id,
2613 for(i=0; tags && tags[i]; i++){
2614 const AVCodecTag *codec_tags = tags[i];
2615 while (codec_tags->id != AV_CODEC_ID_NONE) {
2616 if (codec_tags->id == id) {
2617 *tag = codec_tags->tag;
2626 enum AVCodecID av_codec_get_id(const AVCodecTag * const *tags, unsigned int tag)
2629 for(i=0; tags && tags[i]; i++){
2630 enum AVCodecID id= ff_codec_get_id(tags[i], tag);
2631 if(id!=AV_CODEC_ID_NONE) return id;
2633 return AV_CODEC_ID_NONE;
2636 static void compute_chapters_end(AVFormatContext *s)
2639 int64_t max_time = s->duration + ((s->start_time == AV_NOPTS_VALUE) ? 0 : s->start_time);
2641 for (i = 0; i < s->nb_chapters; i++)
2642 if (s->chapters[i]->end == AV_NOPTS_VALUE) {
2643 AVChapter *ch = s->chapters[i];
2644 int64_t end = max_time ? av_rescale_q(max_time, AV_TIME_BASE_Q, ch->time_base)
2647 for (j = 0; j < s->nb_chapters; j++) {
2648 AVChapter *ch1 = s->chapters[j];
2649 int64_t next_start = av_rescale_q(ch1->start, ch1->time_base, ch->time_base);
2650 if (j != i && next_start > ch->start && next_start < end)
2653 ch->end = (end == INT64_MAX) ? ch->start : end;
2657 static int get_std_framerate(int i){
2658 if(i<60*12) return (i+1)*1001;
2659 else return ((const int[]){24,30,60,12,15,48})[i-60*12]*1000*12;
2663 * Is the time base unreliable.
2664 * This is a heuristic to balance between quick acceptance of the values in
2665 * the headers vs. some extra checks.
2666 * Old DivX and Xvid often have nonsense timebases like 1fps or 2fps.
2667 * MPEG-2 commonly misuses field repeat flags to store different framerates.
2668 * And there are "variable" fps files this needs to detect as well.
2670 static int tb_unreliable(AVCodecContext *c){
2671 if( c->time_base.den >= 101L*c->time_base.num
2672 || c->time_base.den < 5L*c->time_base.num
2673 /* || c->codec_tag == AV_RL32("DIVX")
2674 || c->codec_tag == AV_RL32("XVID")*/
2675 || c->codec_tag == AV_RL32("mp4v")
2676 || c->codec_id == AV_CODEC_ID_MPEG2VIDEO
2677 || c->codec_id == AV_CODEC_ID_H264
2683 #if FF_API_FORMAT_PARAMETERS
2684 int av_find_stream_info(AVFormatContext *ic)
2686 return avformat_find_stream_info(ic, NULL);
2690 int ff_alloc_extradata(AVCodecContext *avctx, int size)
2694 if (size < 0 || size >= INT32_MAX - FF_INPUT_BUFFER_PADDING_SIZE) {
2695 avctx->extradata_size = 0;
2696 return AVERROR(EINVAL);
2698 avctx->extradata = av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);
2699 if (avctx->extradata) {
2700 memset(avctx->extradata + size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
2701 avctx->extradata_size = size;
2704 avctx->extradata_size = 0;
2705 ret = AVERROR(ENOMEM);
2710 int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
2712 int i, count, ret = 0, j;
2715 AVPacket pkt1, *pkt;
2716 int64_t old_offset = avio_tell(ic->pb);
2717 int orig_nb_streams = ic->nb_streams; // new streams might appear, no options for those
2718 int flush_codecs = ic->probesize > 0;
2721 av_log(ic, AV_LOG_DEBUG, "File position before avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
2723 for(i=0;i<ic->nb_streams;i++) {
2724 const AVCodec *codec;
2725 AVDictionary *thread_opt = NULL;
2726 st = ic->streams[i];
2728 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2729 st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE) {
2730 /* if(!st->time_base.num)
2732 if(!st->codec->time_base.num)
2733 st->codec->time_base= st->time_base;
2735 //only for the split stuff
2736 if (!st->parser && !(ic->flags & AVFMT_FLAG_NOPARSE)) {
2737 st->parser = av_parser_init(st->codec->codec_id);
2739 if(st->need_parsing == AVSTREAM_PARSE_HEADERS){
2740 st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;
2741 } else if(st->need_parsing == AVSTREAM_PARSE_FULL_RAW) {
2742 st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;
2744 } else if (st->need_parsing) {
2745 av_log(ic, AV_LOG_VERBOSE, "parser not found for codec "
2746 "%s, packets or times may be invalid.\n",
2747 avcodec_get_name(st->codec->codec_id));
2750 codec = find_decoder(ic, st, st->codec->codec_id);
2752 /* force thread count to 1 since the h264 decoder will not extract SPS
2753 * and PPS to extradata during multi-threaded decoding */
2754 av_dict_set(options ? &options[i] : &thread_opt, "threads", "1", 0);
2756 /* Ensure that subtitle_header is properly set. */
2757 if (st->codec->codec_type == AVMEDIA_TYPE_SUBTITLE
2758 && codec && !st->codec->codec)
2759 avcodec_open2(st->codec, codec, options ? &options[i]
2762 //try to just open decoders, in case this is enough to get parameters
2763 if (!has_codec_parameters(st, NULL) && st->request_probe <= 0) {
2764 if (codec && !st->codec->codec)
2765 avcodec_open2(st->codec, codec, options ? &options[i]
2769 av_dict_free(&thread_opt);
2772 for (i=0; i<ic->nb_streams; i++) {
2773 #if FF_API_R_FRAME_RATE
2774 ic->streams[i]->info->last_dts = AV_NOPTS_VALUE;
2776 ic->streams[i]->info->fps_first_dts = AV_NOPTS_VALUE;
2777 ic->streams[i]->info->fps_last_dts = AV_NOPTS_VALUE;
2783 if (ff_check_interrupt(&ic->interrupt_callback)){
2785 av_log(ic, AV_LOG_DEBUG, "interrupted\n");
2789 /* check if one codec still needs to be handled */
2790 for(i=0;i<ic->nb_streams;i++) {
2791 int fps_analyze_framecount = 20;
2793 st = ic->streams[i];
2794 if (!has_codec_parameters(st, NULL))
2796 /* if the timebase is coarse (like the usual millisecond precision
2797 of mkv), we need to analyze more frames to reliably arrive at
2799 if (av_q2d(st->time_base) > 0.0005)
2800 fps_analyze_framecount *= 2;
2801 if (ic->fps_probe_size >= 0)
2802 fps_analyze_framecount = ic->fps_probe_size;
2803 if (st->disposition & AV_DISPOSITION_ATTACHED_PIC)
2804 fps_analyze_framecount = 0;
2805 /* variable fps and no guess at the real fps */
2806 if( tb_unreliable(st->codec) && !(st->r_frame_rate.num && st->avg_frame_rate.num)
2807 && st->info->duration_count < fps_analyze_framecount
2808 && st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2810 if(st->parser && st->parser->parser->split && !st->codec->extradata)
2812 if (st->first_dts == AV_NOPTS_VALUE &&
2813 (st->codec->codec_type == AVMEDIA_TYPE_VIDEO ||
2814 st->codec->codec_type == AVMEDIA_TYPE_AUDIO))
2817 if (i == ic->nb_streams) {
2818 /* NOTE: if the format has no header, then we need to read
2819 some packets to get most of the streams, so we cannot
2821 if (!(ic->ctx_flags & AVFMTCTX_NOHEADER)) {
2822 /* if we found the info for all the codecs, we can stop */
2824 av_log(ic, AV_LOG_DEBUG, "All info found\n");
2829 /* we did not get all the codec info, but we read too much data */
2830 if (read_size >= ic->probesize) {
2832 av_log(ic, AV_LOG_DEBUG, "Probe buffer size limit of %d bytes reached\n", ic->probesize);
2833 for (i = 0; i < ic->nb_streams; i++)
2834 if (!ic->streams[i]->r_frame_rate.num &&
2835 ic->streams[i]->info->duration_count <= 1 &&
2836 strcmp(ic->iformat->name, "image2"))
2837 av_log(ic, AV_LOG_WARNING,
2838 "Stream #%d: not enough frames to estimate rate; "
2839 "consider increasing probesize\n", i);
2843 /* NOTE: a new stream can be added there if no header in file
2844 (AVFMTCTX_NOHEADER) */
2845 ret = read_frame_internal(ic, &pkt1);
2846 if (ret == AVERROR(EAGAIN))
2854 if (ic->flags & AVFMT_FLAG_NOBUFFER)
2855 free_packet_buffer(&ic->packet_buffer, &ic->packet_buffer_end);
2857 pkt = add_to_pktbuf(&ic->packet_buffer, &pkt1,
2858 &ic->packet_buffer_end);
2860 ret = AVERROR(ENOMEM);
2861 goto find_stream_info_err;
2863 if ((ret = av_dup_packet(pkt)) < 0)
2864 goto find_stream_info_err;
2867 st = ic->streams[pkt->stream_index];
2868 if (!(st->disposition & AV_DISPOSITION_ATTACHED_PIC))
2869 read_size += pkt->size;
2871 if (pkt->dts != AV_NOPTS_VALUE && st->codec_info_nb_frames > 1) {
2872 /* check for non-increasing dts */
2873 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2874 st->info->fps_last_dts >= pkt->dts) {
2875 av_log(ic, AV_LOG_DEBUG, "Non-increasing DTS in stream %d: "
2876 "packet %d with DTS %"PRId64", packet %d with DTS "
2877 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2878 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2879 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2881 /* check for a discontinuity in dts - if the difference in dts
2882 * is more than 1000 times the average packet duration in the sequence,
2883 * we treat it as a discontinuity */
2884 if (st->info->fps_last_dts != AV_NOPTS_VALUE &&
2885 st->info->fps_last_dts_idx > st->info->fps_first_dts_idx &&
2886 (pkt->dts - st->info->fps_last_dts) / 1000 >
2887 (st->info->fps_last_dts - st->info->fps_first_dts) / (st->info->fps_last_dts_idx - st->info->fps_first_dts_idx)) {
2888 av_log(ic, AV_LOG_WARNING, "DTS discontinuity in stream %d: "
2889 "packet %d with DTS %"PRId64", packet %d with DTS "
2890 "%"PRId64"\n", st->index, st->info->fps_last_dts_idx,
2891 st->info->fps_last_dts, st->codec_info_nb_frames, pkt->dts);
2892 st->info->fps_first_dts = st->info->fps_last_dts = AV_NOPTS_VALUE;
2895 /* update stored dts values */
2896 if (st->info->fps_first_dts == AV_NOPTS_VALUE) {
2897 st->info->fps_first_dts = pkt->dts;
2898 st->info->fps_first_dts_idx = st->codec_info_nb_frames;
2900 st->info->fps_last_dts = pkt->dts;
2901 st->info->fps_last_dts_idx = st->codec_info_nb_frames;
2903 if (st->codec_info_nb_frames>1) {
2905 if (st->time_base.den > 0)
2906 t = av_rescale_q(st->info->codec_info_duration, st->time_base, AV_TIME_BASE_Q);
2907 if (st->avg_frame_rate.num > 0)
2908 t = FFMAX(t, av_rescale_q(st->codec_info_nb_frames, av_inv_q(st->avg_frame_rate), AV_TIME_BASE_Q));
2911 && st->codec_info_nb_frames>30
2912 && st->info->fps_first_dts != AV_NOPTS_VALUE
2913 && st->info->fps_last_dts != AV_NOPTS_VALUE)
2914 t = FFMAX(t, av_rescale_q(st->info->fps_last_dts - st->info->fps_first_dts, st->time_base, AV_TIME_BASE_Q));
2916 if (t >= ic->max_analyze_duration) {
2917 av_log(ic, AV_LOG_VERBOSE, "max_analyze_duration %d reached at %"PRId64" microseconds\n", ic->max_analyze_duration, t);
2920 if (pkt->duration) {
2921 st->info->codec_info_duration += pkt->duration;
2922 st->info->codec_info_duration_fields += st->parser && st->need_parsing && st->codec->ticks_per_frame==2 ? st->parser->repeat_pict + 1 : 2;
2925 #if FF_API_R_FRAME_RATE
2927 int64_t last = st->info->last_dts;
2929 if( pkt->dts != AV_NOPTS_VALUE && last != AV_NOPTS_VALUE && pkt->dts > last
2930 && pkt->dts - (uint64_t)last < INT64_MAX){
2931 double dts= (is_relative(pkt->dts) ? pkt->dts - RELATIVE_TS_BASE : pkt->dts) * av_q2d(st->time_base);
2932 int64_t duration= pkt->dts - last;
2934 if (!st->info->duration_error)
2935 st->info->duration_error = av_mallocz(sizeof(st->info->duration_error[0])*2);
2936 if (!st->info->duration_error)
2937 return AVERROR(ENOMEM);
2939 // if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
2940 // av_log(NULL, AV_LOG_ERROR, "%f\n", dts);
2941 for (i=0; i<MAX_STD_TIMEBASES; i++) {
2942 int framerate= get_std_framerate(i);
2943 double sdts= dts*framerate/(1001*12);
2945 int64_t ticks= llrint(sdts+j*0.5);
2946 double error= sdts - ticks + j*0.5;
2947 st->info->duration_error[j][0][i] += error;
2948 st->info->duration_error[j][1][i] += error*error;
2951 st->info->duration_count++;
2952 // ignore the first 4 values, they might have some random jitter
2953 if (st->info->duration_count > 3 && is_relative(pkt->dts) == is_relative(last))
2954 st->info->duration_gcd = av_gcd(st->info->duration_gcd, duration);
2956 if (pkt->dts != AV_NOPTS_VALUE)
2957 st->info->last_dts = pkt->dts;
2960 if(st->parser && st->parser->parser->split && !st->codec->extradata){
2961 int i= st->parser->parser->split(st->codec, pkt->data, pkt->size);
2962 if (i > 0 && i < FF_MAX_EXTRADATA_SIZE) {
2963 if (ff_alloc_extradata(st->codec, i))
2964 return AVERROR(ENOMEM);
2965 memcpy(st->codec->extradata, pkt->data, st->codec->extradata_size);
2969 /* if still no information, we try to open the codec and to
2970 decompress the frame. We try to avoid that in most cases as
2971 it takes longer and uses more memory. For MPEG-4, we need to
2972 decompress for QuickTime.
2974 If CODEC_CAP_CHANNEL_CONF is set this will force decoding of at
2975 least one frame of codec data, this makes sure the codec initializes
2976 the channel configuration and does not only trust the values from the container.
2978 try_decode_frame(ic, st, pkt, (options && i < orig_nb_streams ) ? &options[i] : NULL);
2980 st->codec_info_nb_frames++;
2985 AVPacket empty_pkt = { 0 };
2987 av_init_packet(&empty_pkt);
2989 for(i=0;i<ic->nb_streams;i++) {
2991 st = ic->streams[i];
2993 /* flush the decoders */
2994 if (st->info->found_decoder == 1) {
2996 err = try_decode_frame(ic, st, &empty_pkt,
2997 (options && i < orig_nb_streams) ?
2998 &options[i] : NULL);
2999 } while (err > 0 && !has_codec_parameters(st, NULL));
3002 av_log(ic, AV_LOG_INFO,
3003 "decoding for stream %d failed\n", st->index);
3009 // close codecs which were opened in try_decode_frame()
3010 for(i=0;i<ic->nb_streams;i++) {
3011 st = ic->streams[i];
3012 avcodec_close(st->codec);
3014 for(i=0;i<ic->nb_streams;i++) {
3015 st = ic->streams[i];
3016 if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
3017 if(st->codec->codec_id == AV_CODEC_ID_RAWVIDEO && !st->codec->codec_tag && !st->codec->bits_per_coded_sample){
3018 uint32_t tag= avcodec_pix_fmt_to_codec_tag(st->codec->pix_fmt);
3019 if (avpriv_find_pix_fmt(ff_raw_pix_fmt_tags, tag) == st->codec->pix_fmt)
3020 st->codec->codec_tag= tag;
3023 /* estimate average framerate if not set by demuxer */
3024 if (st->info->codec_info_duration_fields && !st->avg_frame_rate.num && st->info->codec_info_duration) {
3026 double best_error = 0.01;
3028 if (st->info->codec_info_duration >= INT64_MAX / st->time_base.num / 2||
3029 st->info->codec_info_duration_fields >= INT64_MAX / st->time_base.den ||
3030 st->info->codec_info_duration < 0)
3032 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3033 st->info->codec_info_duration_fields*(int64_t)st->time_base.den,
3034 st->info->codec_info_duration*2*(int64_t)st->time_base.num, 60000);
3036 /* round guessed framerate to a "standard" framerate if it's
3037 * within 1% of the original estimate*/
3038 for (j = 1; j < MAX_STD_TIMEBASES; j++) {
3039 AVRational std_fps = { get_std_framerate(j), 12*1001 };
3040 double error = fabs(av_q2d(st->avg_frame_rate) / av_q2d(std_fps) - 1);
3042 if (error < best_error) {
3044 best_fps = std_fps.num;
3048 av_reduce(&st->avg_frame_rate.num, &st->avg_frame_rate.den,
3049 best_fps, 12*1001, INT_MAX);
3052 // the check for tb_unreliable() is not completely correct, since this is not about handling
3053 // a unreliable/inexact time base, but a time base that is finer than necessary, as e.g.
3054 // ipmovie.c produces.
3055 if (tb_unreliable(st->codec) && st->info->duration_count > 15 && st->info->duration_gcd > FFMAX(1, st->time_base.den/(500LL*st->time_base.num)) && !st->r_frame_rate.num)
3056 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, st->time_base.den, st->time_base.num * st->info->duration_gcd, INT_MAX);
3057 if (st->info->duration_count>1 && !st->r_frame_rate.num
3058 && tb_unreliable(st->codec)) {
3060 double best_error= 0.01;
3062 for (j=0; j<MAX_STD_TIMEBASES; j++) {
3065 if(st->info->codec_info_duration && st->info->codec_info_duration*av_q2d(st->time_base) < (1001*12.0)/get_std_framerate(j))
3067 if(!st->info->codec_info_duration && 1.0 < (1001*12.0)/get_std_framerate(j))
3070 int n= st->info->duration_count;
3071 double a= st->info->duration_error[k][0][j] / n;
3072 double error= st->info->duration_error[k][1][j]/n - a*a;
3074 if(error < best_error && best_error> 0.000000001){
3076 num = get_std_framerate(j);
3079 av_log(NULL, AV_LOG_DEBUG, "rfps: %f %f\n", get_std_framerate(j) / 12.0/1001, error);
3082 // do not increase frame rate by more than 1 % in order to match a standard rate.
3083 if (num && (!st->r_frame_rate.num || (double)num/(12*1001) < 1.01 * av_q2d(st->r_frame_rate)))
3084 av_reduce(&st->r_frame_rate.num, &st->r_frame_rate.den, num, 12*1001, INT_MAX);
3087 if (!st->r_frame_rate.num){
3088 if( st->codec->time_base.den * (int64_t)st->time_base.num
3089 <= st->codec->time_base.num * st->codec->ticks_per_frame * (int64_t)st->time_base.den){
3090 st->r_frame_rate.num = st->codec->time_base.den;
3091 st->r_frame_rate.den = st->codec->time_base.num * st->codec->ticks_per_frame;
3093 st->r_frame_rate.num = st->time_base.den;
3094 st->r_frame_rate.den = st->time_base.num;
3097 }else if(st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
3098 if(!st->codec->bits_per_coded_sample)
3099 st->codec->bits_per_coded_sample= av_get_bits_per_sample(st->codec->codec_id);
3100 // set stream disposition based on audio service type
3101 switch (st->codec->audio_service_type) {
3102 case AV_AUDIO_SERVICE_TYPE_EFFECTS:
3103 st->disposition = AV_DISPOSITION_CLEAN_EFFECTS; break;
3104 case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED:
3105 st->disposition = AV_DISPOSITION_VISUAL_IMPAIRED; break;
3106 case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED:
3107 st->disposition = AV_DISPOSITION_HEARING_IMPAIRED; break;
3108 case AV_AUDIO_SERVICE_TYPE_COMMENTARY:
3109 st->disposition = AV_DISPOSITION_COMMENT; break;
3110 case AV_AUDIO_SERVICE_TYPE_KARAOKE:
3111 st->disposition = AV_DISPOSITION_KARAOKE; break;
3117 estimate_timings(ic, old_offset);
3119 if (ret >= 0 && ic->nb_streams)
3120 ret = -1; /* we could not have all the codec parameters before EOF */
3121 for(i=0;i<ic->nb_streams;i++) {
3123 st = ic->streams[i];
3124 if (!has_codec_parameters(st, &errmsg)) {
3126 avcodec_string(buf, sizeof(buf), st->codec, 0);
3127 av_log(ic, AV_LOG_WARNING,
3128 "Could not find codec parameters for stream %d (%s): %s\n"
3129 "Consider increasing the value for the 'analyzeduration' and 'probesize' options\n",
3136 compute_chapters_end(ic);
3138 find_stream_info_err:
3139 for (i=0; i < ic->nb_streams; i++) {
3140 st = ic->streams[i];
3141 if (ic->streams[i]->codec && ic->streams[i]->codec->codec_type != AVMEDIA_TYPE_AUDIO)
3142 ic->streams[i]->codec->thread_count = 0;
3144 av_freep(&st->info->duration_error);
3145 av_freep(&ic->streams[i]->info);
3148 av_log(ic, AV_LOG_DEBUG, "File position after avformat_find_stream_info() is %"PRId64"\n", avio_tell(ic->pb));
3152 AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
3156 for (i = 0; i < ic->nb_programs; i++) {
3157 if (ic->programs[i] == last) {
3161 for (j = 0; j < ic->programs[i]->nb_stream_indexes; j++)
3162 if (ic->programs[i]->stream_index[j] == s)
3163 return ic->programs[i];
3169 int av_find_best_stream(AVFormatContext *ic,
3170 enum AVMediaType type,
3171 int wanted_stream_nb,
3173 AVCodec **decoder_ret,
3176 int i, nb_streams = ic->nb_streams;
3177 int ret = AVERROR_STREAM_NOT_FOUND, best_count = -1, best_bitrate = -1, best_multiframe = -1, count, bitrate, multiframe;
3178 unsigned *program = NULL;
3179 AVCodec *decoder = NULL, *best_decoder = NULL;
3181 if (related_stream >= 0 && wanted_stream_nb < 0) {
3182 AVProgram *p = av_find_program_from_stream(ic, NULL, related_stream);
3184 program = p->stream_index;
3185 nb_streams = p->nb_stream_indexes;
3188 for (i = 0; i < nb_streams; i++) {
3189 int real_stream_index = program ? program[i] : i;
3190 AVStream *st = ic->streams[real_stream_index];
3191 AVCodecContext *avctx = st->codec;
3192 if (avctx->codec_type != type)
3194 if (wanted_stream_nb >= 0 && real_stream_index != wanted_stream_nb)
3196 if (st->disposition & (AV_DISPOSITION_HEARING_IMPAIRED|AV_DISPOSITION_VISUAL_IMPAIRED))
3199 decoder = find_decoder(ic, st, st->codec->codec_id);
3202 ret = AVERROR_DECODER_NOT_FOUND;
3206 count = st->codec_info_nb_frames;
3207 bitrate = avctx->bit_rate;
3208 multiframe = FFMIN(5, count);
3209 if ((best_multiframe > multiframe) ||
3210 (best_multiframe == multiframe && best_bitrate > bitrate) ||
3211 (best_multiframe == multiframe && best_bitrate == bitrate && best_count >= count))
3214 best_bitrate = bitrate;
3215 best_multiframe = multiframe;
3216 ret = real_stream_index;
3217 best_decoder = decoder;
3218 if (program && i == nb_streams - 1 && ret < 0) {
3220 nb_streams = ic->nb_streams;
3221 i = 0; /* no related stream found, try again with everything */
3225 *decoder_ret = best_decoder;
3229 /*******************************************************/
3231 int av_read_play(AVFormatContext *s)
3233 if (s->iformat->read_play)
3234 return s->iformat->read_play(s);
3236 return avio_pause(s->pb, 0);
3237 return AVERROR(ENOSYS);
3240 int av_read_pause(AVFormatContext *s)
3242 if (s->iformat->read_pause)
3243 return s->iformat->read_pause(s);
3245 return avio_pause(s->pb, 1);
3246 return AVERROR(ENOSYS);
3249 void ff_free_stream(AVFormatContext *s, AVStream *st){
3250 av_assert0(s->nb_streams>0);
3251 av_assert0(s->streams[ s->nb_streams-1 ] == st);
3254 av_parser_close(st->parser);
3256 if (st->attached_pic.data)
3257 av_free_packet(&st->attached_pic);
3258 av_dict_free(&st->metadata);
3259 av_freep(&st->probe_data.buf);
3260 av_freep(&st->index_entries);
3261 av_freep(&st->codec->extradata);
3262 av_freep(&st->codec->subtitle_header);
3263 av_freep(&st->codec);
3264 av_freep(&st->priv_data);
3266 av_freep(&st->info->duration_error);
3267 av_freep(&st->info);
3268 av_freep(&s->streams[ --s->nb_streams ]);
3271 void avformat_free_context(AVFormatContext *s)
3279 if (s->iformat && s->iformat->priv_class && s->priv_data)
3280 av_opt_free(s->priv_data);
3282 for(i=s->nb_streams-1; i>=0; i--) {
3283 ff_free_stream(s, s->streams[i]);
3285 for(i=s->nb_programs-1; i>=0; i--) {
3286 av_dict_free(&s->programs[i]->metadata);
3287 av_freep(&s->programs[i]->stream_index);
3288 av_freep(&s->programs[i]);
3290 av_freep(&s->programs);
3291 av_freep(&s->priv_data);
3292 while(s->nb_chapters--) {
3293 av_dict_free(&s->chapters[s->nb_chapters]->metadata);
3294 av_freep(&s->chapters[s->nb_chapters]);
3296 av_freep(&s->chapters);
3297 av_dict_free(&s->metadata);
3298 av_freep(&s->streams);
3302 #if FF_API_CLOSE_INPUT_FILE
3303 void av_close_input_file(AVFormatContext *s)
3305 avformat_close_input(&s);
3309 void avformat_close_input(AVFormatContext **ps)
3320 if ((s->iformat && s->iformat->flags & AVFMT_NOFILE) ||
3321 (s->flags & AVFMT_FLAG_CUSTOM_IO))
3324 flush_packet_queue(s);
3327 if (s->iformat->read_close)
3328 s->iformat->read_close(s);
3331 avformat_free_context(s);
3338 #if FF_API_NEW_STREAM
3339 AVStream *av_new_stream(AVFormatContext *s, int id)
3341 AVStream *st = avformat_new_stream(s, NULL);
3348 AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c)
3354 if (s->nb_streams >= INT_MAX/sizeof(*streams))
3356 streams = av_realloc_array(s->streams, s->nb_streams + 1, sizeof(*streams));
3359 s->streams = streams;
3361 st = av_mallocz(sizeof(AVStream));
3364 if (!(st->info = av_mallocz(sizeof(*st->info)))) {
3368 st->info->last_dts = AV_NOPTS_VALUE;
3370 st->codec = avcodec_alloc_context3(c);
3372 /* no default bitrate if decoding */
3373 st->codec->bit_rate = 0;
3375 st->index = s->nb_streams;
3376 st->start_time = AV_NOPTS_VALUE;
3377 st->duration = AV_NOPTS_VALUE;
3378 /* we set the current DTS to 0 so that formats without any timestamps
3379 but durations get some timestamps, formats with some unknown
3380 timestamps have their first few packets buffered and the
3381 timestamps corrected before they are returned to the user */
3382 st->cur_dts = s->iformat ? RELATIVE_TS_BASE : 0;
3383 st->first_dts = AV_NOPTS_VALUE;
3384 st->probe_packets = MAX_PROBE_PACKETS;
3385 st->pts_wrap_reference = AV_NOPTS_VALUE;
3386 st->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3388 /* default pts setting is MPEG-like */
3389 avpriv_set_pts_info(st, 33, 1, 90000);
3390 st->last_IP_pts = AV_NOPTS_VALUE;
3391 for(i=0; i<MAX_REORDER_DELAY+1; i++)
3392 st->pts_buffer[i]= AV_NOPTS_VALUE;
3393 st->reference_dts = AV_NOPTS_VALUE;
3395 st->sample_aspect_ratio = (AVRational){0,1};
3397 #if FF_API_R_FRAME_RATE
3398 st->info->last_dts = AV_NOPTS_VALUE;
3400 st->info->fps_first_dts = AV_NOPTS_VALUE;
3401 st->info->fps_last_dts = AV_NOPTS_VALUE;
3403 s->streams[s->nb_streams++] = st;
3407 AVProgram *av_new_program(AVFormatContext *ac, int id)
3409 AVProgram *program=NULL;
3412 av_dlog(ac, "new_program: id=0x%04x\n", id);
3414 for(i=0; i<ac->nb_programs; i++)
3415 if(ac->programs[i]->id == id)
3416 program = ac->programs[i];
3419 program = av_mallocz(sizeof(AVProgram));
3422 dynarray_add(&ac->programs, &ac->nb_programs, program);
3423 program->discard = AVDISCARD_NONE;
3426 program->pts_wrap_reference = AV_NOPTS_VALUE;
3427 program->pts_wrap_behavior = AV_PTS_WRAP_IGNORE;
3429 program->start_time =
3430 program->end_time = AV_NOPTS_VALUE;
3435 AVChapter *avpriv_new_chapter(AVFormatContext *s, int id, AVRational time_base, int64_t start, int64_t end, const char *title)
3437 AVChapter *chapter = NULL;
3440 for(i=0; i<s->nb_chapters; i++)
3441 if(s->chapters[i]->id == id)
3442 chapter = s->chapters[i];
3445 chapter= av_mallocz(sizeof(AVChapter));
3448 dynarray_add(&s->chapters, &s->nb_chapters, chapter);
3450 av_dict_set(&chapter->metadata, "title", title, 0);
3452 chapter->time_base= time_base;
3453 chapter->start = start;
3459 void ff_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx)
3462 AVProgram *program=NULL;
3465 if (idx >= ac->nb_streams) {
3466 av_log(ac, AV_LOG_ERROR, "stream index %d is not valid\n", idx);
3470 for(i=0; i<ac->nb_programs; i++){
3471 if(ac->programs[i]->id != progid)
3473 program = ac->programs[i];
3474 for(j=0; j<program->nb_stream_indexes; j++)
3475 if(program->stream_index[j] == idx)
3478 tmp = av_realloc_array(program->stream_index, program->nb_stream_indexes+1, sizeof(unsigned int));
3481 program->stream_index = tmp;
3482 program->stream_index[program->nb_stream_indexes++] = idx;
3487 static void print_fps(double d, const char *postfix){
3488 uint64_t v= lrintf(d*100);
3489 if (v% 100 ) av_log(NULL, AV_LOG_INFO, ", %3.2f %s", d, postfix);
3490 else if(v%(100*1000)) av_log(NULL, AV_LOG_INFO, ", %1.0f %s", d, postfix);
3491 else av_log(NULL, AV_LOG_INFO, ", %1.0fk %s", d/1000, postfix);
3494 static void dump_metadata(void *ctx, AVDictionary *m, const char *indent)
3496 if(m && !(av_dict_count(m) == 1 && av_dict_get(m, "language", NULL, 0))){
3497 AVDictionaryEntry *tag=NULL;
3499 av_log(ctx, AV_LOG_INFO, "%sMetadata:\n", indent);
3500 while((tag=av_dict_get(m, "", tag, AV_DICT_IGNORE_SUFFIX))) {
3501 if(strcmp("language", tag->key)){
3502 const char *p = tag->value;
3503 av_log(ctx, AV_LOG_INFO, "%s %-16s: ", indent, tag->key);
3506 size_t len = strcspn(p, "\x8\xa\xb\xc\xd");
3507 av_strlcpy(tmp, p, FFMIN(sizeof(tmp), len+1));
3508 av_log(ctx, AV_LOG_INFO, "%s", tmp);
3510 if (*p == 0xd) av_log(ctx, AV_LOG_INFO, " ");
3511 if (*p == 0xa) av_log(ctx, AV_LOG_INFO, "\n%s %-16s: ", indent, "");
3514 av_log(ctx, AV_LOG_INFO, "\n");
3520 /* "user interface" functions */
3521 static void dump_stream_format(AVFormatContext *ic, int i, int index, int is_output)
3524 int flags = (is_output ? ic->oformat->flags : ic->iformat->flags);
3525 AVStream *st = ic->streams[i];
3526 int g = av_gcd(st->time_base.num, st->time_base.den);
3527 AVDictionaryEntry *lang = av_dict_get(st->metadata, "language", NULL, 0);
3528 avcodec_string(buf, sizeof(buf), st->codec, is_output);
3529 av_log(NULL, AV_LOG_INFO, " Stream #%d:%d", index, i);
3530 /* the pid is an important information, so we display it */
3531 /* XXX: add a generic system */
3532 if (flags & AVFMT_SHOW_IDS)
3533 av_log(NULL, AV_LOG_INFO, "[0x%x]", st->id);
3535 av_log(NULL, AV_LOG_INFO, "(%s)", lang->value);
3536 av_log(NULL, AV_LOG_DEBUG, ", %d, %d/%d", st->codec_info_nb_frames, st->time_base.num/g, st->time_base.den/g);
3537 av_log(NULL, AV_LOG_INFO, ": %s", buf);
3538 if (st->sample_aspect_ratio.num && // default
3539 av_cmp_q(st->sample_aspect_ratio, st->codec->sample_aspect_ratio)) {
3540 AVRational display_aspect_ratio;
3541 av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den,
3542 st->codec->width*st->sample_aspect_ratio.num,
3543 st->codec->height*st->sample_aspect_ratio.den,
3545 av_log(NULL, AV_LOG_INFO, ", SAR %d:%d DAR %d:%d",
3546 st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
3547 display_aspect_ratio.num, display_aspect_ratio.den);
3549 if(st->codec->codec_type == AVMEDIA_TYPE_VIDEO){
3550 if(st->avg_frame_rate.den && st->avg_frame_rate.num)
3551 print_fps(av_q2d(st->avg_frame_rate), "fps");
3552 #if FF_API_R_FRAME_RATE
3553 if(st->r_frame_rate.den && st->r_frame_rate.num)
3554 print_fps(av_q2d(st->r_frame_rate), "tbr");
3556 if(st->time_base.den && st->time_base.num)
3557 print_fps(1/av_q2d(st->time_base), "tbn");
3558 if(st->codec->time_base.den && st->codec->time_base.num)
3559 print_fps(1/av_q2d(st->codec->time_base), "tbc");
3561 if (st->disposition & AV_DISPOSITION_DEFAULT)
3562 av_log(NULL, AV_LOG_INFO, " (default)");
3563 if (st->disposition & AV_DISPOSITION_DUB)
3564 av_log(NULL, AV_LOG_INFO, " (dub)");
3565 if (st->disposition & AV_DISPOSITION_ORIGINAL)
3566 av_log(NULL, AV_LOG_INFO, " (original)");
3567 if (st->disposition & AV_DISPOSITION_COMMENT)
3568 av_log(NULL, AV_LOG_INFO, " (comment)");
3569 if (st->disposition & AV_DISPOSITION_LYRICS)
3570 av_log(NULL, AV_LOG_INFO, " (lyrics)");
3571 if (st->disposition & AV_DISPOSITION_KARAOKE)
3572 av_log(NULL, AV_LOG_INFO, " (karaoke)");
3573 if (st->disposition & AV_DISPOSITION_FORCED)
3574 av_log(NULL, AV_LOG_INFO, " (forced)");
3575 if (st->disposition & AV_DISPOSITION_HEARING_IMPAIRED)
3576 av_log(NULL, AV_LOG_INFO, " (hearing impaired)");
3577 if (st->disposition & AV_DISPOSITION_VISUAL_IMPAIRED)
3578 av_log(NULL, AV_LOG_INFO, " (visual impaired)");
3579 if (st->disposition & AV_DISPOSITION_CLEAN_EFFECTS)
3580 av_log(NULL, AV_LOG_INFO, " (clean effects)");
3581 av_log(NULL, AV_LOG_INFO, "\n");
3582 dump_metadata(NULL, st->metadata, " ");
3585 void av_dump_format(AVFormatContext *ic,
3591 uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
3592 if (ic->nb_streams && !printed)
3595 av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
3596 is_output ? "Output" : "Input",
3598 is_output ? ic->oformat->name : ic->iformat->name,
3599 is_output ? "to" : "from", url);
3600 dump_metadata(NULL, ic->metadata, " ");
3602 av_log(NULL, AV_LOG_INFO, " Duration: ");
3603 if (ic->duration != AV_NOPTS_VALUE) {
3604 int hours, mins, secs, us;
3605 int64_t duration = ic->duration + 5000;
3606 secs = duration / AV_TIME_BASE;
3607 us = duration % AV_TIME_BASE;
3612 av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
3613 (100 * us) / AV_TIME_BASE);
3615 av_log(NULL, AV_LOG_INFO, "N/A");
3617 if (ic->start_time != AV_NOPTS_VALUE) {
3619 av_log(NULL, AV_LOG_INFO, ", start: ");
3620 secs = ic->start_time / AV_TIME_BASE;
3621 us = abs(ic->start_time % AV_TIME_BASE);
3622 av_log(NULL, AV_LOG_INFO, "%d.%06d",
3623 secs, (int)av_rescale(us, 1000000, AV_TIME_BASE));
3625 av_log(NULL, AV_LOG_INFO, ", bitrate: ");
3627 av_log(NULL, AV_LOG_INFO,"%d kb/s", ic->bit_rate / 1000);
3629 av_log(NULL, AV_LOG_INFO, "N/A");
3631 av_log(NULL, AV_LOG_INFO, "\n");
3633 for (i = 0; i < ic->nb_chapters; i++) {
3634 AVChapter *ch = ic->chapters[i];
3635 av_log(NULL, AV_LOG_INFO, " Chapter #%d.%d: ", index, i);
3636 av_log(NULL, AV_LOG_INFO, "start %f, ", ch->start * av_q2d(ch->time_base));
3637 av_log(NULL, AV_LOG_INFO, "end %f\n", ch->end * av_q2d(ch->time_base));
3639 dump_metadata(NULL, ch->metadata, " ");
3641 if(ic->nb_programs) {
3642 int j, k, total = 0;
3643 for(j=0; j<ic->nb_programs; j++) {
3644 AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
3646 av_log(NULL, AV_LOG_INFO, " Program %d %s\n", ic->programs[j]->id,
3647 name ? name->value : "");
3648 dump_metadata(NULL, ic->programs[j]->metadata, " ");
3649 for(k=0; k<ic->programs[j]->nb_stream_indexes; k++) {
3650 dump_stream_format(ic, ic->programs[j]->stream_index[k], index, is_output);
3651 printed[ic->programs[j]->stream_index[k]] = 1;
3653 total += ic->programs[j]->nb_stream_indexes;
3655 if (total < ic->nb_streams)
3656 av_log(NULL, AV_LOG_INFO, " No Program\n");
3658 for(i=0;i<ic->nb_streams;i++)
3660 dump_stream_format(ic, i, index, is_output);
3665 uint64_t ff_ntp_time(void)
3667 return (av_gettime() / 1000) * 1000 + NTP_OFFSET_US;
3670 int av_get_frame_filename(char *buf, int buf_size,
3671 const char *path, int number)
3674 char *q, buf1[20], c;
3675 int nd, len, percentd_found;
3687 while (av_isdigit(*p)) {
3688 nd = nd * 10 + *p++ - '0';
3691 } while (av_isdigit(c));
3700 snprintf(buf1, sizeof(buf1), "%0*d", nd, number);
3702 if ((q - buf + len) > buf_size - 1)
3704 memcpy(q, buf1, len);
3712 if ((q - buf) < buf_size - 1)
3716 if (!percentd_found)
3725 static void hex_dump_internal(void *avcl, FILE *f, int level,
3726 const uint8_t *buf, int size)
3729 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3731 for(i=0;i<size;i+=16) {
3738 PRINT(" %02x", buf[i+j]);
3743 for(j=0;j<len;j++) {
3745 if (c < ' ' || c > '~')
3754 void av_hex_dump(FILE *f, const uint8_t *buf, int size)
3756 hex_dump_internal(NULL, f, 0, buf, size);
3759 void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size)
3761 hex_dump_internal(avcl, NULL, level, buf, size);
3764 static void pkt_dump_internal(void *avcl, FILE *f, int level, AVPacket *pkt, int dump_payload, AVRational time_base)
3766 #define PRINT(...) do { if (!f) av_log(avcl, level, __VA_ARGS__); else fprintf(f, __VA_ARGS__); } while(0)
3767 PRINT("stream #%d:\n", pkt->stream_index);
3768 PRINT(" keyframe=%d\n", ((pkt->flags & AV_PKT_FLAG_KEY) != 0));
3769 PRINT(" duration=%0.3f\n", pkt->duration * av_q2d(time_base));
3770 /* DTS is _always_ valid after av_read_frame() */
3772 if (pkt->dts == AV_NOPTS_VALUE)
3775 PRINT("%0.3f", pkt->dts * av_q2d(time_base));
3776 /* PTS may not be known if B-frames are present. */
3778 if (pkt->pts == AV_NOPTS_VALUE)
3781 PRINT("%0.3f", pkt->pts * av_q2d(time_base));
3783 PRINT(" size=%d\n", pkt->size);
3786 av_hex_dump(f, pkt->data, pkt->size);
3789 void av_pkt_dump2(FILE *f, AVPacket *pkt, int dump_payload, AVStream *st)
3791 pkt_dump_internal(NULL, f, 0, pkt, dump_payload, st->time_base);
3794 void av_pkt_dump_log2(void *avcl, int level, AVPacket *pkt, int dump_payload,
3797 pkt_dump_internal(avcl, NULL, level, pkt, dump_payload, st->time_base);
3800 void av_url_split(char *proto, int proto_size,
3801 char *authorization, int authorization_size,
3802 char *hostname, int hostname_size,
3804 char *path, int path_size,
3807 const char *p, *ls, *ls2, *at, *at2, *col, *brk;
3809 if (port_ptr) *port_ptr = -1;
3810 if (proto_size > 0) proto[0] = 0;
3811 if (authorization_size > 0) authorization[0] = 0;
3812 if (hostname_size > 0) hostname[0] = 0;
3813 if (path_size > 0) path[0] = 0;
3815 /* parse protocol */
3816 if ((p = strchr(url, ':'))) {
3817 av_strlcpy(proto, url, FFMIN(proto_size, p + 1 - url));
3822 /* no protocol means plain filename */
3823 av_strlcpy(path, url, path_size);
3827 /* separate path from hostname */
3828 ls = strchr(p, '/');
3829 ls2 = strchr(p, '?');
3833 ls = FFMIN(ls, ls2);
3835 av_strlcpy(path, ls, path_size);
3837 ls = &p[strlen(p)]; // XXX
3839 /* the rest is hostname, use that to parse auth/port */
3841 /* authorization (user[:pass]@hostname) */
3843 while ((at = strchr(p, '@')) && at < ls) {
3844 av_strlcpy(authorization, at2,
3845 FFMIN(authorization_size, at + 1 - at2));
3846 p = at + 1; /* skip '@' */
3849 if (*p == '[' && (brk = strchr(p, ']')) && brk < ls) {
3851 av_strlcpy(hostname, p + 1,
3852 FFMIN(hostname_size, brk - p));
3853 if (brk[1] == ':' && port_ptr)
3854 *port_ptr = atoi(brk + 2);
3855 } else if ((col = strchr(p, ':')) && col < ls) {
3856 av_strlcpy(hostname, p,
3857 FFMIN(col + 1 - p, hostname_size));
3858 if (port_ptr) *port_ptr = atoi(col + 1);
3860 av_strlcpy(hostname, p,
3861 FFMIN(ls + 1 - p, hostname_size));
3865 char *ff_data_to_hex(char *buff, const uint8_t *src, int s, int lowercase)
3868 static const char hex_table_uc[16] = { '0', '1', '2', '3',
3871 'C', 'D', 'E', 'F' };
3872 static const char hex_table_lc[16] = { '0', '1', '2', '3',
3875 'c', 'd', 'e', 'f' };
3876 const char *hex_table = lowercase ? hex_table_lc : hex_table_uc;
3878 for(i = 0; i < s; i++) {
3879 buff[i * 2] = hex_table[src[i] >> 4];
3880 buff[i * 2 + 1] = hex_table[src[i] & 0xF];
3886 int ff_hex_to_data(uint8_t *data, const char *p)
3893 p += strspn(p, SPACE_CHARS);
3896 c = av_toupper((unsigned char) *p++);
3897 if (c >= '0' && c <= '9')
3899 else if (c >= 'A' && c <= 'F')
3914 #if FF_API_SET_PTS_INFO
3915 void av_set_pts_info(AVStream *s, int pts_wrap_bits,
3916 unsigned int pts_num, unsigned int pts_den)
3918 avpriv_set_pts_info(s, pts_wrap_bits, pts_num, pts_den);
3922 void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits,
3923 unsigned int pts_num, unsigned int pts_den)
3926 if(av_reduce(&new_tb.num, &new_tb.den, pts_num, pts_den, INT_MAX)){
3927 if(new_tb.num != pts_num)
3928 av_log(NULL, AV_LOG_DEBUG, "st:%d removing common factor %d from timebase\n", s->index, pts_num/new_tb.num);
3930 av_log(NULL, AV_LOG_WARNING, "st:%d has too large timebase, reducing\n", s->index);
3932 if(new_tb.num <= 0 || new_tb.den <= 0) {
3933 av_log(NULL, AV_LOG_ERROR, "Ignoring attempt to set invalid timebase %d/%d for st:%d\n", new_tb.num, new_tb.den, s->index);
3936 s->time_base = new_tb;
3937 av_codec_set_pkt_timebase(s->codec, new_tb);
3938 s->pts_wrap_bits = pts_wrap_bits;
3941 void ff_parse_key_value(const char *str, ff_parse_key_val_cb callback_get_buf,
3944 const char *ptr = str;
3946 /* Parse key=value pairs. */
3949 char *dest = NULL, *dest_end;
3950 int key_len, dest_len = 0;
3952 /* Skip whitespace and potential commas. */
3953 while (*ptr && (av_isspace(*ptr) || *ptr == ','))
3960 if (!(ptr = strchr(key, '=')))
3963 key_len = ptr - key;
3965 callback_get_buf(context, key, key_len, &dest, &dest_len);
3966 dest_end = dest + dest_len - 1;
3970 while (*ptr && *ptr != '\"') {
3974 if (dest && dest < dest_end)
3978 if (dest && dest < dest_end)
3986 for (; *ptr && !(av_isspace(*ptr) || *ptr == ','); ptr++)
3987 if (dest && dest < dest_end)
3995 int ff_find_stream_index(AVFormatContext *s, int id)
3998 for (i = 0; i < s->nb_streams; i++) {
3999 if (s->streams[i]->id == id)
4005 int64_t ff_iso8601_to_unix_time(const char *datestr)
4007 struct tm time1 = {0}, time2 = {0};
4009 ret1 = av_small_strptime(datestr, "%Y - %m - %d %H:%M:%S", &time1);
4010 ret2 = av_small_strptime(datestr, "%Y - %m - %dT%H:%M:%S", &time2);
4012 return av_timegm(&time2);
4014 return av_timegm(&time1);
4017 int avformat_query_codec(AVOutputFormat *ofmt, enum AVCodecID codec_id, int std_compliance)
4020 if (ofmt->query_codec)
4021 return ofmt->query_codec(codec_id, std_compliance);
4022 else if (ofmt->codec_tag)
4023 return !!av_codec_get_tag(ofmt->codec_tag, codec_id);
4024 else if (codec_id == ofmt->video_codec || codec_id == ofmt->audio_codec ||
4025 codec_id == ofmt->subtitle_codec)
4028 return AVERROR_PATCHWELCOME;
4031 int avformat_network_init(void)
4035 ff_network_inited_globally = 1;
4036 if ((ret = ff_network_init()) < 0)
4043 int avformat_network_deinit(void)
4052 int ff_add_param_change(AVPacket *pkt, int32_t channels,
4053 uint64_t channel_layout, int32_t sample_rate,
4054 int32_t width, int32_t height)
4060 return AVERROR(EINVAL);
4063 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT;
4065 if (channel_layout) {
4067 flags |= AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT;
4071 flags |= AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE;
4073 if (width || height) {
4075 flags |= AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS;
4077 data = av_packet_new_side_data(pkt, AV_PKT_DATA_PARAM_CHANGE, size);
4079 return AVERROR(ENOMEM);
4080 bytestream_put_le32(&data, flags);
4082 bytestream_put_le32(&data, channels);
4084 bytestream_put_le64(&data, channel_layout);
4086 bytestream_put_le32(&data, sample_rate);
4087 if (width || height) {
4088 bytestream_put_le32(&data, width);
4089 bytestream_put_le32(&data, height);
4094 AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
4096 AVRational undef = {0, 1};
4097 AVRational stream_sample_aspect_ratio = stream ? stream->sample_aspect_ratio : undef;
4098 AVRational codec_sample_aspect_ratio = stream && stream->codec ? stream->codec->sample_aspect_ratio : undef;
4099 AVRational frame_sample_aspect_ratio = frame ? frame->sample_aspect_ratio : codec_sample_aspect_ratio;
4101 av_reduce(&stream_sample_aspect_ratio.num, &stream_sample_aspect_ratio.den,
4102 stream_sample_aspect_ratio.num, stream_sample_aspect_ratio.den, INT_MAX);
4103 if (stream_sample_aspect_ratio.num <= 0 || stream_sample_aspect_ratio.den <= 0)
4104 stream_sample_aspect_ratio = undef;
4106 av_reduce(&frame_sample_aspect_ratio.num, &frame_sample_aspect_ratio.den,
4107 frame_sample_aspect_ratio.num, frame_sample_aspect_ratio.den, INT_MAX);
4108 if (frame_sample_aspect_ratio.num <= 0 || frame_sample_aspect_ratio.den <= 0)
4109 frame_sample_aspect_ratio = undef;
4111 if (stream_sample_aspect_ratio.num)
4112 return stream_sample_aspect_ratio;
4114 return frame_sample_aspect_ratio;
4117 AVRational av_guess_frame_rate(AVFormatContext *format, AVStream *st, AVFrame *frame)
4119 AVRational fr = st->r_frame_rate;
4121 if (st->codec->ticks_per_frame > 1) {
4122 AVRational codec_fr = av_inv_q(st->codec->time_base);
4123 AVRational avg_fr = st->avg_frame_rate;
4124 codec_fr.den *= st->codec->ticks_per_frame;
4125 if ( codec_fr.num > 0 && codec_fr.den > 0 && av_q2d(codec_fr) < av_q2d(fr)*0.7
4126 && fabs(1.0 - av_q2d(av_div_q(avg_fr, fr))) > 0.1)
4133 int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
4136 if (*spec <= '9' && *spec >= '0') /* opt:index */
4137 return strtol(spec, NULL, 0) == st->index;
4138 else if (*spec == 'v' || *spec == 'a' || *spec == 's' || *spec == 'd' ||
4139 *spec == 't') { /* opt:[vasdt] */
4140 enum AVMediaType type;
4143 case 'v': type = AVMEDIA_TYPE_VIDEO; break;
4144 case 'a': type = AVMEDIA_TYPE_AUDIO; break;
4145 case 's': type = AVMEDIA_TYPE_SUBTITLE; break;
4146 case 'd': type = AVMEDIA_TYPE_DATA; break;
4147 case 't': type = AVMEDIA_TYPE_ATTACHMENT; break;
4148 default: av_assert0(0);
4150 if (type != st->codec->codec_type)
4152 if (*spec++ == ':') { /* possibly followed by :index */
4153 int i, index = strtol(spec, NULL, 0);
4154 for (i = 0; i < s->nb_streams; i++)
4155 if (s->streams[i]->codec->codec_type == type && index-- == 0)
4156 return i == st->index;
4160 } else if (*spec == 'p' && *(spec + 1) == ':') {
4164 prog_id = strtol(spec, &endptr, 0);
4165 for (i = 0; i < s->nb_programs; i++) {
4166 if (s->programs[i]->id != prog_id)
4169 if (*endptr++ == ':') {
4170 int stream_idx = strtol(endptr, NULL, 0);
4171 return stream_idx >= 0 &&
4172 stream_idx < s->programs[i]->nb_stream_indexes &&
4173 st->index == s->programs[i]->stream_index[stream_idx];
4176 for (j = 0; j < s->programs[i]->nb_stream_indexes; j++)
4177 if (st->index == s->programs[i]->stream_index[j])
4181 } else if (*spec == '#') {
4184 sid = strtol(spec + 1, &endptr, 0);
4186 return st->id == sid;
4187 } else if (!*spec) /* empty specifier, matches everything */
4190 av_log(s, AV_LOG_ERROR, "Invalid stream specifier: %s.\n", spec);
4191 return AVERROR(EINVAL);
4194 void ff_generate_avci_extradata(AVStream *st)
4196 static const uint8_t avci100_1080p_extradata[] = {
4198 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4199 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4200 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4201 0x18, 0x21, 0x02, 0x56, 0xb9, 0x3d, 0x7d, 0x7e,
4202 0x4f, 0xe3, 0x3f, 0x11, 0xf1, 0x9e, 0x08, 0xb8,
4203 0x8c, 0x54, 0x43, 0xc0, 0x78, 0x02, 0x27, 0xe2,
4204 0x70, 0x1e, 0x30, 0x10, 0x10, 0x14, 0x00, 0x00,
4205 0x03, 0x00, 0x04, 0x00, 0x00, 0x03, 0x00, 0xca,
4206 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
4208 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4211 static const uint8_t avci100_1080i_extradata[] = {
4213 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4214 0xb6, 0xd4, 0x20, 0x22, 0x33, 0x19, 0xc6, 0x63,
4215 0x23, 0x21, 0x01, 0x11, 0x98, 0xce, 0x33, 0x19,
4216 0x18, 0x21, 0x03, 0x3a, 0x46, 0x65, 0x6a, 0x65,
4217 0x24, 0xad, 0xe9, 0x12, 0x32, 0x14, 0x1a, 0x26,
4218 0x34, 0xad, 0xa4, 0x41, 0x82, 0x23, 0x01, 0x50,
4219 0x2b, 0x1a, 0x24, 0x69, 0x48, 0x30, 0x40, 0x2e,
4220 0x11, 0x12, 0x08, 0xc6, 0x8c, 0x04, 0x41, 0x28,
4221 0x4c, 0x34, 0xf0, 0x1e, 0x01, 0x13, 0xf2, 0xe0,
4222 0x3c, 0x60, 0x20, 0x20, 0x28, 0x00, 0x00, 0x03,
4223 0x00, 0x08, 0x00, 0x00, 0x03, 0x01, 0x94, 0x00,
4225 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x33, 0x48,
4228 static const uint8_t avci50_1080i_extradata[] = {
4230 0x00, 0x00, 0x00, 0x01, 0x67, 0x6e, 0x10, 0x28,
4231 0xa6, 0xd4, 0x20, 0x32, 0x33, 0x0c, 0x71, 0x18,
4232 0x88, 0x62, 0x10, 0x19, 0x19, 0x86, 0x38, 0x8c,
4233 0x44, 0x30, 0x21, 0x02, 0x56, 0x4e, 0x6e, 0x61,
4234 0x87, 0x3e, 0x73, 0x4d, 0x98, 0x0c, 0x03, 0x06,
4235 0x9c, 0x0b, 0x73, 0xe6, 0xc0, 0xb5, 0x18, 0x63,
4236 0x0d, 0x39, 0xe0, 0x5b, 0x02, 0xd4, 0xc6, 0x19,
4237 0x1a, 0x79, 0x8c, 0x32, 0x34, 0x24, 0xf0, 0x16,
4238 0x81, 0x13, 0xf7, 0xff, 0x80, 0x02, 0x00, 0x01,
4239 0xf1, 0x80, 0x80, 0x80, 0xa0, 0x00, 0x00, 0x03,
4240 0x00, 0x20, 0x00, 0x00, 0x06, 0x50, 0x80, 0x00,
4242 0x00, 0x00, 0x00, 0x01, 0x68, 0xee, 0x31, 0x12,
4245 static const uint8_t avci100_720p_extradata[] = {
4247 0x00, 0x00, 0x00, 0x01, 0x67, 0x7a, 0x10, 0x29,
4248 0xb6, 0xd4, 0x20, 0x2a, 0x33, 0x1d, 0xc7, 0x62,
4249 0xa1, 0x08, 0x40, 0x54, 0x66, 0x3b, 0x8e, 0xc5,
4250 0x42, 0x02, 0x10, 0x25, 0x64, 0x2c, 0x89, 0xe8,
4251 0x85, 0xe4, 0x21, 0x4b, 0x90, 0x83, 0x06, 0x95,
4252 0xd1, 0x06, 0x46, 0x97, 0x20, 0xc8, 0xd7, 0x43,
4253 0x08, 0x11, 0xc2, 0x1e, 0x4c, 0x91, 0x0f, 0x01,
4254 0x40, 0x16, 0xec, 0x07, 0x8c, 0x04, 0x04, 0x05,
4255 0x00, 0x00, 0x03, 0x00, 0x01, 0x00, 0x00, 0x03,
4256 0x00, 0x64, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00,
4258 0x00, 0x00, 0x00, 0x01, 0x68, 0xce, 0x31, 0x12,
4262 const uint8_t *data = 0;
4263 if (st->codec->width == 1920) {
4264 if (st->codec->field_order == AV_FIELD_PROGRESSIVE) {
4265 data = avci100_1080p_extradata;
4266 size = sizeof(avci100_1080p_extradata);
4268 data = avci100_1080i_extradata;
4269 size = sizeof(avci100_1080i_extradata);
4271 } else if (st->codec->width == 1440) {
4272 data = avci50_1080i_extradata;
4273 size = sizeof(avci50_1080i_extradata);
4274 } else if (st->codec->width == 1280) {
4275 data = avci100_720p_extradata;
4276 size = sizeof(avci100_720p_extradata);
4280 av_freep(&st->codec->extradata);
4281 if (ff_alloc_extradata(st->codec, size))
4283 memcpy(st->codec->extradata, data, size);