OSDN Git Service

Merge remote-tracking branch 'x86/pie-x86' into q-x86
[android-x86/external-ffmpeg.git] / ffmpeg.c
index bf04a6c..cd19594 100644 (file)
--- a/ffmpeg.c
+++ b/ffmpeg.c
@@ -536,7 +536,6 @@ static void ffmpeg_cleanup(int ret)
         for (j = 0; j < ost->nb_bitstream_filters; j++)
             av_bsf_free(&ost->bsf_ctx[j]);
         av_freep(&ost->bsf_ctx);
-        av_freep(&ost->bsf_extradata_updated);
 
         av_frame_free(&ost->filtered_frame);
         av_frame_free(&ost->last_frame);
@@ -669,12 +668,28 @@ static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream,
     }
 }
 
-static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
+static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
 {
     AVFormatContext *s = of->ctx;
     AVStream *st = ost->st;
     int ret;
 
+    /*
+     * Audio encoders may split the packets --  #frames in != #packets out.
+     * But there is no reordering, so we can limit the number of output packets
+     * by simply dropping them here.
+     * Counting encoded video frames needs to be done separately because of
+     * reordering, see do_video_out().
+     * Do not count the packet when unqueued because it has been counted when queued.
+     */
+    if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
+        if (ost->frame_number >= ost->max_frames) {
+            av_packet_unref(pkt);
+            return;
+        }
+        ost->frame_number++;
+    }
+
     if (!of->header_written) {
         AVPacket tmp_pkt = {0};
         /* the muxer is not initialized yet, buffer the packet */
@@ -703,20 +718,6 @@ static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
         (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && audio_sync_method < 0))
         pkt->pts = pkt->dts = AV_NOPTS_VALUE;
 
-    /*
-     * Audio encoders may split the packets --  #frames in != #packets out.
-     * But there is no reordering, so we can limit the number of output packets
-     * by simply dropping them here.
-     * Counting encoded video frames needs to be done separately because of
-     * reordering, see do_video_out()
-     */
-    if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed)) {
-        if (ost->frame_number >= ost->max_frames) {
-            av_packet_unref(pkt);
-            return;
-        }
-        ost->frame_number++;
-    }
     if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
         int i;
         uint8_t *sd = av_packet_get_side_data(pkt, AV_PKT_DATA_QUALITY_STATS,
@@ -835,36 +836,18 @@ static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost)
                 continue;
             } else if (ret < 0)
                 goto finish;
-            /* HACK! - aac_adtstoasc updates extradata after filtering the first frame when
-             * the api states this shouldn't happen after init(). Propagate it here to the
-             * muxer and to the next filters in the chain to workaround this.
-             * TODO/FIXME - Make aac_adtstoasc use new packet side data instead of changing
-             * par_out->extradata and adapt muxers accordingly to get rid of this. */
-            if (!(ost->bsf_extradata_updated[idx - 1] & 1)) {
-                ret = avcodec_parameters_copy(ost->st->codecpar, ost->bsf_ctx[idx - 1]->par_out);
-                if (ret < 0)
-                    goto finish;
-                ost->bsf_extradata_updated[idx - 1] |= 1;
-            }
 
             /* send it to the next filter down the chain or to the muxer */
             if (idx < ost->nb_bitstream_filters) {
-                /* HACK/FIXME! - See above */
-                if (!(ost->bsf_extradata_updated[idx] & 2)) {
-                    ret = avcodec_parameters_copy(ost->bsf_ctx[idx]->par_out, ost->bsf_ctx[idx - 1]->par_out);
-                    if (ret < 0)
-                        goto finish;
-                    ost->bsf_extradata_updated[idx] |= 2;
-                }
                 ret = av_bsf_send_packet(ost->bsf_ctx[idx], pkt);
                 if (ret < 0)
                     goto finish;
                 idx++;
             } else
-                write_packet(of, pkt, ost);
+                write_packet(of, pkt, ost, 0);
         }
     } else
-        write_packet(of, pkt, ost);
+        write_packet(of, pkt, ost, 0);
 
 finish:
     if (ret < 0 && ret != AVERROR_EOF) {
@@ -2972,7 +2955,7 @@ static int check_init_output_file(OutputFile *of, int file_index)
         while (av_fifo_size(ost->muxing_queue)) {
             AVPacket pkt;
             av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
-            write_packet(of, &pkt, ost);
+            write_packet(of, &pkt, ost, 1);
         }
     }
 
@@ -3232,6 +3215,30 @@ static void parse_forced_key_frames(char *kf, OutputStream *ost,
     ost->forced_kf_pts   = pts;
 }
 
+static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
+{
+    InputStream *ist = get_input_stream(ost);
+    AVCodecContext *enc_ctx = ost->enc_ctx;
+    AVFormatContext *oc;
+
+    if (ost->enc_timebase.num > 0) {
+        enc_ctx->time_base = ost->enc_timebase;
+        return;
+    }
+
+    if (ost->enc_timebase.num < 0) {
+        if (ist) {
+            enc_ctx->time_base = ist->st->time_base;
+            return;
+        }
+
+        oc = output_files[ost->file_index]->ctx;
+        av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
+    }
+
+    enc_ctx->time_base = default_time_base;
+}
+
 static int init_output_stream_encode(OutputStream *ost)
 {
     InputStream *ist = get_input_stream(ost);
@@ -3302,10 +3309,13 @@ static int init_output_stream_encode(OutputStream *ost)
         enc_ctx->sample_rate    = av_buffersink_get_sample_rate(ost->filter->filter);
         enc_ctx->channel_layout = av_buffersink_get_channel_layout(ost->filter->filter);
         enc_ctx->channels       = av_buffersink_get_channels(ost->filter->filter);
-        enc_ctx->time_base      = (AVRational){ 1, enc_ctx->sample_rate };
+
+        init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
         break;
+
     case AVMEDIA_TYPE_VIDEO:
-        enc_ctx->time_base = av_inv_q(ost->frame_rate);
+        init_encoder_time_base(ost, av_inv_q(ost->frame_rate));
+
         if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
             enc_ctx->time_base = av_buffersink_get_time_base(ost->filter->filter);
         if (   av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH